From 64abef62842678ce312c4014231e2463b0accbc9 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Thu, 19 Jan 2023 16:22:59 +0000 Subject: [PATCH 01/15] Update imports and test fetcher --- tests/models/beit/test_image_processing_beit.py | 5 ++--- tests/models/blip/test_image_processing_blip.py | 6 +++--- .../chinese_clip/test_image_processing_chinese_clip.py | 6 +++--- tests/models/clip/test_image_processing_clip.py | 6 +++--- .../test_image_processing_conditional_detr.py | 5 ++--- tests/models/convnext/test_image_processing_convnext.py | 5 ++--- .../test_image_processing_deformable_detr.py | 5 ++--- tests/models/deit/test_image_processing_deit.py | 5 ++--- tests/models/detr/test_image_processing_detr.py | 5 ++--- tests/models/donut/test_image_processing_donut.py | 5 ++--- tests/models/dpt/test_image_processing_dpt.py | 5 ++--- tests/models/flava/test_image_processing_flava.py | 5 ++--- tests/models/glpn/test_image_processing_glpn.py | 5 ++--- tests/models/imagegpt/test_image_processing_imagegpt.py | 4 ++-- tests/models/layoutlmv2/test_image_processing_layoutlmv2.py | 5 ++--- tests/models/layoutlmv3/test_image_processing_layoutlmv3.py | 5 ++--- tests/models/levit/test_image_processing_levit.py | 5 ++--- tests/models/maskformer/test_image_processing_maskformer.py | 5 ++--- .../mobilenet_v1/test_image_processing_mobilenet_v1.py | 5 ++--- .../mobilenet_v2/test_image_processing_mobilenet_v2.py | 5 ++--- tests/models/mobilevit/test_image_processing_mobilevit.py | 5 ++--- tests/models/oneformer/test_image_processing_oneformer.py | 5 ++--- tests/models/owlvit/test_image_processing_owlvit.py | 5 ++--- tests/models/poolformer/test_image_processing_poolformer.py | 5 ++--- tests/models/segformer/test_image_processing_segformer.py | 5 ++--- tests/models/swin2sr/test_image_processing_swin2sr.py | 4 ++-- tests/models/videomae/test_image_processing_videomae.py | 5 ++--- tests/models/vilt/test_image_processing_vilt.py | 5 ++--- tests/models/vit/test_image_processing_vit.py | 5 ++--- tests/models/yolos/test_image_processing_yolos.py | 5 ++--- utils/tests_fetcher.py | 1 + 31 files changed, 64 insertions(+), 88 deletions(-) diff --git a/tests/models/beit/test_image_processing_beit.py b/tests/models/beit/test_image_processing_beit.py index b499f008457b..ad6b2ae72331 100644 --- a/tests/models/beit/test_image_processing_beit.py +++ b/tests/models/beit/test_image_processing_beit.py @@ -22,8 +22,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -105,7 +104,7 @@ def prepare_semantic_batch_inputs(): @require_torch @require_vision -class BeitFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class BeitFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = BeitFeatureExtractor if is_vision_available() else None diff --git a/tests/models/blip/test_image_processing_blip.py b/tests/models/blip/test_image_processing_blip.py index ea31038b14ab..1becb12a2243 100644 --- a/tests/models/blip/test_image_processing_blip.py +++ b/tests/models/blip/test_image_processing_blip.py @@ -21,7 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): @@ -109,7 +109,7 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_torch @require_vision -class BlipImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class BlipImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = BlipImageProcessor if is_vision_available() else None @@ -231,7 +231,7 @@ def test_call_pytorch(self): @require_torch @require_vision -class BlipImageProcessingTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): +class BlipImageProcessingTestFourChannels(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = BlipImageProcessor if is_vision_available() else None diff --git a/tests/models/chinese_clip/test_image_processing_chinese_clip.py b/tests/models/chinese_clip/test_image_processing_chinese_clip.py index 616dfa3ffc7a..9b16f8525ff3 100644 --- a/tests/models/chinese_clip/test_image_processing_chinese_clip.py +++ b/tests/models/chinese_clip/test_image_processing_chinese_clip.py @@ -21,7 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): @@ -113,7 +113,7 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_torch @require_vision -class ChineseCLIPFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ChineseCLIPFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = ChineseCLIPFeatureExtractor if is_vision_available() else None @@ -246,7 +246,7 @@ def test_call_pytorch(self): @require_torch @require_vision -class ChineseCLIPFeatureExtractionTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ChineseCLIPFeatureExtractionTestFourChannels(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = ChineseCLIPFeatureExtractor if is_vision_available() else None diff --git a/tests/models/clip/test_image_processing_clip.py b/tests/models/clip/test_image_processing_clip.py index 8f29b63bbb55..0dd2d7e7646e 100644 --- a/tests/models/clip/test_image_processing_clip.py +++ b/tests/models/clip/test_image_processing_clip.py @@ -21,7 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): @@ -113,7 +113,7 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_torch @require_vision -class CLIPFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class CLIPFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = CLIPFeatureExtractor if is_vision_available() else None @@ -246,7 +246,7 @@ def test_call_pytorch(self): @require_torch @require_vision -class CLIPFeatureExtractionTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): +class CLIPFeatureExtractionTestFourChannels(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = CLIPFeatureExtractor if is_vision_available() else None diff --git a/tests/models/conditional_detr/test_image_processing_conditional_detr.py b/tests/models/conditional_detr/test_image_processing_conditional_detr.py index b4e6f46d3e9e..038e5d52e118 100644 --- a/tests/models/conditional_detr/test_image_processing_conditional_detr.py +++ b/tests/models/conditional_detr/test_image_processing_conditional_detr.py @@ -23,8 +23,7 @@ from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -115,7 +114,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class ConditionalDetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ConditionalDetrFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = ConditionalDetrFeatureExtractor if is_vision_available() else None diff --git a/tests/models/convnext/test_image_processing_convnext.py b/tests/models/convnext/test_image_processing_convnext.py index 4fd62fc51d19..053189701210 100644 --- a/tests/models/convnext/test_image_processing_convnext.py +++ b/tests/models/convnext/test_image_processing_convnext.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -77,7 +76,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class ConvNextFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ConvNextFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = ConvNextFeatureExtractor if is_vision_available() else None diff --git a/tests/models/deformable_detr/test_image_processing_deformable_detr.py b/tests/models/deformable_detr/test_image_processing_deformable_detr.py index bc6368953949..f582d5553995 100644 --- a/tests/models/deformable_detr/test_image_processing_deformable_detr.py +++ b/tests/models/deformable_detr/test_image_processing_deformable_detr.py @@ -23,8 +23,7 @@ from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -115,7 +114,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class DeformableDetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class DeformableDetrFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = DeformableDetrFeatureExtractor if is_vision_available() else None diff --git a/tests/models/deit/test_image_processing_deit.py b/tests/models/deit/test_image_processing_deit.py index db1e42f77109..18508df5d45c 100644 --- a/tests/models/deit/test_image_processing_deit.py +++ b/tests/models/deit/test_image_processing_deit.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -82,7 +81,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class DeiTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class DeiTFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = DeiTFeatureExtractor if is_vision_available() else None test_cast_dtype = True diff --git a/tests/models/detr/test_image_processing_detr.py b/tests/models/detr/test_image_processing_detr.py index 253ffb7c2972..1da7ae101240 100644 --- a/tests/models/detr/test_image_processing_detr.py +++ b/tests/models/detr/test_image_processing_detr.py @@ -23,8 +23,7 @@ from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -115,7 +114,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class DetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class DetrFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = DetrFeatureExtractor if is_vision_available() else None diff --git a/tests/models/donut/test_image_processing_donut.py b/tests/models/donut/test_image_processing_donut.py index 550d166e460d..81dc4389a1f0 100644 --- a/tests/models/donut/test_image_processing_donut.py +++ b/tests/models/donut/test_image_processing_donut.py @@ -21,8 +21,7 @@ from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -82,7 +81,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class DonutFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class DonutFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = DonutFeatureExtractor if is_vision_available() else None diff --git a/tests/models/dpt/test_image_processing_dpt.py b/tests/models/dpt/test_image_processing_dpt.py index 0bbeb173e597..e17abce74ae6 100644 --- a/tests/models/dpt/test_image_processing_dpt.py +++ b/tests/models/dpt/test_image_processing_dpt.py @@ -21,8 +21,7 @@ from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -74,7 +73,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class DPTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class DPTFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = DPTFeatureExtractor if is_vision_available() else None diff --git a/tests/models/flava/test_image_processing_flava.py b/tests/models/flava/test_image_processing_flava.py index 28718748200d..939520e780ba 100644 --- a/tests/models/flava/test_image_processing_flava.py +++ b/tests/models/flava/test_image_processing_flava.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -160,7 +159,7 @@ def get_expected_codebook_image_size(self): @require_torch @require_vision -class FlavaFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class FlavaFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = FlavaFeatureExtractor if is_vision_available() else None maxDiff = None diff --git a/tests/models/glpn/test_image_processing_glpn.py b/tests/models/glpn/test_image_processing_glpn.py index 31e527761771..bbbd54269fc9 100644 --- a/tests/models/glpn/test_image_processing_glpn.py +++ b/tests/models/glpn/test_image_processing_glpn.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -67,7 +66,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class GLPNFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class GLPNFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = GLPNFeatureExtractor if is_vision_available() else None diff --git a/tests/models/imagegpt/test_image_processing_imagegpt.py b/tests/models/imagegpt/test_image_processing_imagegpt.py index 465a6015a39a..2ef41cfc5df8 100644 --- a/tests/models/imagegpt/test_image_processing_imagegpt.py +++ b/tests/models/imagegpt/test_image_processing_imagegpt.py @@ -25,7 +25,7 @@ from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): @@ -78,7 +78,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class ImageGPTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ImageGPTFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = ImageGPTFeatureExtractor if is_vision_available() else None diff --git a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py index 4423d33376e4..112d40cca383 100644 --- a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py +++ b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -64,7 +63,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_pytesseract -class LayoutLMv2FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class LayoutLMv2FeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = LayoutLMv2FeatureExtractor if is_pytesseract_available() else None diff --git a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py index 829fc8d79dde..6d9f4fd58b6f 100644 --- a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py +++ b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -64,7 +63,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_pytesseract -class LayoutLMv3FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class LayoutLMv3FeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = LayoutLMv3FeatureExtractor if is_pytesseract_available() else None diff --git a/tests/models/levit/test_image_processing_levit.py b/tests/models/levit/test_image_processing_levit.py index 76f3c66e1ade..912bb9deb545 100644 --- a/tests/models/levit/test_image_processing_levit.py +++ b/tests/models/levit/test_image_processing_levit.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -81,7 +80,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class LevitFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class LevitFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = LevitFeatureExtractor if is_vision_available() else None diff --git a/tests/models/maskformer/test_image_processing_maskformer.py b/tests/models/maskformer/test_image_processing_maskformer.py index f8ddf8c9dc03..ccad43d4185a 100644 --- a/tests/models/maskformer/test_image_processing_maskformer.py +++ b/tests/models/maskformer/test_image_processing_maskformer.py @@ -23,8 +23,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -131,7 +130,7 @@ def get_fake_maskformer_outputs(self): @require_torch @require_vision -class MaskFormerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class MaskFormerFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = MaskFormerFeatureExtractor if (is_vision_available() and is_torch_available()) else None diff --git a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py index 383f91c554f8..c0b6f2979405 100644 --- a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py +++ b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -72,7 +71,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class MobileNetV1FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class MobileNetV1FeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = MobileNetV1FeatureExtractor if is_vision_available() else None diff --git a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py index e207932e38e0..aa35b62383da 100644 --- a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py +++ b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -72,7 +71,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class MobileNetV2FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class MobileNetV2FeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = MobileNetV2FeatureExtractor if is_vision_available() else None diff --git a/tests/models/mobilevit/test_image_processing_mobilevit.py b/tests/models/mobilevit/test_image_processing_mobilevit.py index a22fc2c1d541..ce3d07a3ad23 100644 --- a/tests/models/mobilevit/test_image_processing_mobilevit.py +++ b/tests/models/mobilevit/test_image_processing_mobilevit.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -75,7 +74,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class MobileViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class MobileViTFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = MobileViTFeatureExtractor if is_vision_available() else None diff --git a/tests/models/oneformer/test_image_processing_oneformer.py b/tests/models/oneformer/test_image_processing_oneformer.py index 79c6d82c3f42..8faa441b5184 100644 --- a/tests/models/oneformer/test_image_processing_oneformer.py +++ b/tests/models/oneformer/test_image_processing_oneformer.py @@ -23,8 +23,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -156,7 +155,7 @@ def get_fake_oneformer_outputs(self): @require_torch @require_vision -class OneFormerImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class OneFormerImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_feat_extracttion_common.test_feat_extract_to_json_string feature_extraction_class = image_processing_class diff --git a/tests/models/owlvit/test_image_processing_owlvit.py b/tests/models/owlvit/test_image_processing_owlvit.py index bf2cd8d666d2..77e3ebc52049 100644 --- a/tests/models/owlvit/test_image_processing_owlvit.py +++ b/tests/models/owlvit/test_image_processing_owlvit.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -82,7 +81,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class OwlViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class OwlViTFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = OwlViTFeatureExtractor if is_vision_available() else None diff --git a/tests/models/poolformer/test_image_processing_poolformer.py b/tests/models/poolformer/test_image_processing_poolformer.py index 47e583a3211a..7f999c9c13f3 100644 --- a/tests/models/poolformer/test_image_processing_poolformer.py +++ b/tests/models/poolformer/test_image_processing_poolformer.py @@ -20,8 +20,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -78,7 +77,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class PoolFormerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class PoolFormerFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = PoolFormerFeatureExtractor if is_vision_available() else None diff --git a/tests/models/segformer/test_image_processing_segformer.py b/tests/models/segformer/test_image_processing_segformer.py index a104fc2f4835..de0c2d2ac203 100644 --- a/tests/models/segformer/test_image_processing_segformer.py +++ b/tests/models/segformer/test_image_processing_segformer.py @@ -22,8 +22,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -96,7 +95,7 @@ def prepare_semantic_batch_inputs(): @require_torch @require_vision -class SegformerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class SegformerFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = SegformerFeatureExtractor if is_vision_available() else None diff --git a/tests/models/swin2sr/test_image_processing_swin2sr.py b/tests/models/swin2sr/test_image_processing_swin2sr.py index 393a44ecface..488f55714ec2 100644 --- a/tests/models/swin2sr/test_image_processing_swin2sr.py +++ b/tests/models/swin2sr/test_image_processing_swin2sr.py @@ -21,7 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): @@ -100,7 +100,7 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_torch @require_vision -class Swin2SRImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class Swin2SRImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = Swin2SRImageProcessor if is_vision_available() else None diff --git a/tests/models/videomae/test_image_processing_videomae.py b/tests/models/videomae/test_image_processing_videomae.py index 025c39ef97f8..98a60ac39e56 100644 --- a/tests/models/videomae/test_image_processing_videomae.py +++ b/tests/models/videomae/test_image_processing_videomae.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_video_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): @@ -81,7 +80,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class VideoMAEFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class VideoMAEFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = VideoMAEFeatureExtractor if is_vision_available() else None diff --git a/tests/models/vilt/test_image_processing_vilt.py b/tests/models/vilt/test_image_processing_vilt.py index 5d7be90a7475..2c75ddee06d9 100644 --- a/tests/models/vilt/test_image_processing_vilt.py +++ b/tests/models/vilt/test_image_processing_vilt.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -117,7 +116,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class ViltFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ViltFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = ViltFeatureExtractor if is_vision_available() else None diff --git a/tests/models/vit/test_image_processing_vit.py b/tests/models/vit/test_image_processing_vit.py index a0db60887e40..f358e2a39e03 100644 --- a/tests/models/vit/test_image_processing_vit.py +++ b/tests/models/vit/test_image_processing_vit.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -74,7 +73,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class ViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ViTFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = ViTFeatureExtractor if is_vision_available() else None diff --git a/tests/models/yolos/test_image_processing_yolos.py b/tests/models/yolos/test_image_processing_yolos.py index 4e22baa4d668..2c82ac6ecbe7 100644 --- a/tests/models/yolos/test_image_processing_yolos.py +++ b/tests/models/yolos/test_image_processing_yolos.py @@ -23,8 +23,7 @@ from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -115,7 +114,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class YolosFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class YolosFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = YolosFeatureExtractor if is_vision_available() else None diff --git a/utils/tests_fetcher.py b/utils/tests_fetcher.py index d388c11361e7..c9c9dfaaba50 100644 --- a/utils/tests_fetcher.py +++ b/utils/tests_fetcher.py @@ -355,6 +355,7 @@ def create_reverse_dependency_map(): "file_utils.py": ["utils/test_file_utils.py", "utils/test_model_output.py"], "image_processing_utils.py": ["test_image_processing_common.py", "utils/test_image_processing_utils.py"], "image_transforms.py": "test_image_transforms.py", + "image_processing_utils.py": ["test_image_processing_common.py", "utils/test_image_processing_utils.py"], "utils/generic.py": ["utils/test_file_utils.py", "utils/test_model_output.py", "utils/test_generic.py"], "utils/hub.py": "utils/test_hub_utils.py", "modelcard.py": "utils/test_model_card.py", From f56be09e131de7f41da98fd7bcd7146de3e27e51 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Thu, 19 Jan 2023 17:01:13 +0000 Subject: [PATCH 02/15] Revert but keep test fetcher update --- .../models/beit/test_image_processing_beit.py | 5 +++-- .../models/blip/test_image_processing_blip.py | 6 +++--- .../test_image_processing_chinese_clip.py | 6 +++--- .../models/clip/test_image_processing_clip.py | 6 +++--- .../test_image_processing_conditional_detr.py | 5 +++-- .../test_image_processing_convnext.py | 5 +++-- .../test_image_processing_deformable_detr.py | 5 +++-- .../models/deit/test_image_processing_deit.py | 5 +++-- .../models/detr/test_image_processing_detr.py | 5 +++-- .../donut/test_image_processing_donut.py | 5 +++-- tests/models/dpt/test_image_processing_dpt.py | 5 +++-- .../flava/test_image_processing_flava.py | 5 +++-- .../models/glpn/test_image_processing_glpn.py | 5 +++-- .../test_image_processing_imagegpt.py | 4 ++-- .../test_image_processing_layoutlmv2.py | 5 +++-- .../test_image_processing_layoutlmv3.py | 5 +++-- .../levit/test_image_processing_levit.py | 5 +++-- .../test_image_processing_maskformer.py | 5 +++-- .../test_image_processing_mobilenet_v1.py | 5 +++-- .../test_image_processing_mobilenet_v2.py | 5 +++-- .../test_image_processing_mobilevit.py | 5 +++-- .../test_image_processing_oneformer.py | 4 ++-- .../owlvit/test_image_processing_owlvit.py | 5 +++-- .../test_image_processing_poolformer.py | 5 +++-- .../test_image_processing_segformer.py | 5 +++-- .../swin2sr/test_image_processing_swin2sr.py | 4 ++-- .../test_image_processing_videomae.py | 5 +++-- .../models/vilt/test_image_processing_vilt.py | 5 +++-- tests/models/vit/test_image_processing_vit.py | 5 +++-- .../yolos/test_image_processing_yolos.py | 5 +++-- tests/test_feature_extraction_common.py | 19 ++++++++++++++++++- utils/tests_fetcher.py | 1 - 32 files changed, 105 insertions(+), 65 deletions(-) diff --git a/tests/models/beit/test_image_processing_beit.py b/tests/models/beit/test_image_processing_beit.py index ad6b2ae72331..b499f008457b 100644 --- a/tests/models/beit/test_image_processing_beit.py +++ b/tests/models/beit/test_image_processing_beit.py @@ -22,7 +22,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -104,7 +105,7 @@ def prepare_semantic_batch_inputs(): @require_torch @require_vision -class BeitFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class BeitFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = BeitFeatureExtractor if is_vision_available() else None diff --git a/tests/models/blip/test_image_processing_blip.py b/tests/models/blip/test_image_processing_blip.py index 1becb12a2243..ea31038b14ab 100644 --- a/tests/models/blip/test_image_processing_blip.py +++ b/tests/models/blip/test_image_processing_blip.py @@ -21,7 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_torch_available(): @@ -109,7 +109,7 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_torch @require_vision -class BlipImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class BlipImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = BlipImageProcessor if is_vision_available() else None @@ -231,7 +231,7 @@ def test_call_pytorch(self): @require_torch @require_vision -class BlipImageProcessingTestFourChannels(ImageProcessingSavingTestMixin, unittest.TestCase): +class BlipImageProcessingTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = BlipImageProcessor if is_vision_available() else None diff --git a/tests/models/chinese_clip/test_image_processing_chinese_clip.py b/tests/models/chinese_clip/test_image_processing_chinese_clip.py index 9b16f8525ff3..616dfa3ffc7a 100644 --- a/tests/models/chinese_clip/test_image_processing_chinese_clip.py +++ b/tests/models/chinese_clip/test_image_processing_chinese_clip.py @@ -21,7 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_torch_available(): @@ -113,7 +113,7 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_torch @require_vision -class ChineseCLIPFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class ChineseCLIPFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = ChineseCLIPFeatureExtractor if is_vision_available() else None @@ -246,7 +246,7 @@ def test_call_pytorch(self): @require_torch @require_vision -class ChineseCLIPFeatureExtractionTestFourChannels(ImageProcessingSavingTestMixin, unittest.TestCase): +class ChineseCLIPFeatureExtractionTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = ChineseCLIPFeatureExtractor if is_vision_available() else None diff --git a/tests/models/clip/test_image_processing_clip.py b/tests/models/clip/test_image_processing_clip.py index 0dd2d7e7646e..8f29b63bbb55 100644 --- a/tests/models/clip/test_image_processing_clip.py +++ b/tests/models/clip/test_image_processing_clip.py @@ -21,7 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_torch_available(): @@ -113,7 +113,7 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_torch @require_vision -class CLIPFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class CLIPFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = CLIPFeatureExtractor if is_vision_available() else None @@ -246,7 +246,7 @@ def test_call_pytorch(self): @require_torch @require_vision -class CLIPFeatureExtractionTestFourChannels(ImageProcessingSavingTestMixin, unittest.TestCase): +class CLIPFeatureExtractionTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = CLIPFeatureExtractor if is_vision_available() else None diff --git a/tests/models/conditional_detr/test_image_processing_conditional_detr.py b/tests/models/conditional_detr/test_image_processing_conditional_detr.py index 038e5d52e118..b4e6f46d3e9e 100644 --- a/tests/models/conditional_detr/test_image_processing_conditional_detr.py +++ b/tests/models/conditional_detr/test_image_processing_conditional_detr.py @@ -23,7 +23,8 @@ from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -114,7 +115,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class ConditionalDetrFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class ConditionalDetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = ConditionalDetrFeatureExtractor if is_vision_available() else None diff --git a/tests/models/convnext/test_image_processing_convnext.py b/tests/models/convnext/test_image_processing_convnext.py index 053189701210..4fd62fc51d19 100644 --- a/tests/models/convnext/test_image_processing_convnext.py +++ b/tests/models/convnext/test_image_processing_convnext.py @@ -21,7 +21,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -76,7 +77,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class ConvNextFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class ConvNextFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = ConvNextFeatureExtractor if is_vision_available() else None diff --git a/tests/models/deformable_detr/test_image_processing_deformable_detr.py b/tests/models/deformable_detr/test_image_processing_deformable_detr.py index f582d5553995..bc6368953949 100644 --- a/tests/models/deformable_detr/test_image_processing_deformable_detr.py +++ b/tests/models/deformable_detr/test_image_processing_deformable_detr.py @@ -23,7 +23,8 @@ from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -114,7 +115,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class DeformableDetrFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class DeformableDetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = DeformableDetrFeatureExtractor if is_vision_available() else None diff --git a/tests/models/deit/test_image_processing_deit.py b/tests/models/deit/test_image_processing_deit.py index 18508df5d45c..db1e42f77109 100644 --- a/tests/models/deit/test_image_processing_deit.py +++ b/tests/models/deit/test_image_processing_deit.py @@ -21,7 +21,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -81,7 +82,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class DeiTFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class DeiTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = DeiTFeatureExtractor if is_vision_available() else None test_cast_dtype = True diff --git a/tests/models/detr/test_image_processing_detr.py b/tests/models/detr/test_image_processing_detr.py index 1da7ae101240..253ffb7c2972 100644 --- a/tests/models/detr/test_image_processing_detr.py +++ b/tests/models/detr/test_image_processing_detr.py @@ -23,7 +23,8 @@ from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -114,7 +115,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class DetrFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class DetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = DetrFeatureExtractor if is_vision_available() else None diff --git a/tests/models/donut/test_image_processing_donut.py b/tests/models/donut/test_image_processing_donut.py index 81dc4389a1f0..550d166e460d 100644 --- a/tests/models/donut/test_image_processing_donut.py +++ b/tests/models/donut/test_image_processing_donut.py @@ -21,7 +21,8 @@ from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -81,7 +82,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class DonutFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class DonutFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = DonutFeatureExtractor if is_vision_available() else None diff --git a/tests/models/dpt/test_image_processing_dpt.py b/tests/models/dpt/test_image_processing_dpt.py index e17abce74ae6..0bbeb173e597 100644 --- a/tests/models/dpt/test_image_processing_dpt.py +++ b/tests/models/dpt/test_image_processing_dpt.py @@ -21,7 +21,8 @@ from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -73,7 +74,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class DPTFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class DPTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = DPTFeatureExtractor if is_vision_available() else None diff --git a/tests/models/flava/test_image_processing_flava.py b/tests/models/flava/test_image_processing_flava.py index 939520e780ba..28718748200d 100644 --- a/tests/models/flava/test_image_processing_flava.py +++ b/tests/models/flava/test_image_processing_flava.py @@ -21,7 +21,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -159,7 +160,7 @@ def get_expected_codebook_image_size(self): @require_torch @require_vision -class FlavaFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class FlavaFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = FlavaFeatureExtractor if is_vision_available() else None maxDiff = None diff --git a/tests/models/glpn/test_image_processing_glpn.py b/tests/models/glpn/test_image_processing_glpn.py index bbbd54269fc9..31e527761771 100644 --- a/tests/models/glpn/test_image_processing_glpn.py +++ b/tests/models/glpn/test_image_processing_glpn.py @@ -21,7 +21,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -66,7 +67,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class GLPNFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class GLPNFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = GLPNFeatureExtractor if is_vision_available() else None diff --git a/tests/models/imagegpt/test_image_processing_imagegpt.py b/tests/models/imagegpt/test_image_processing_imagegpt.py index 2ef41cfc5df8..465a6015a39a 100644 --- a/tests/models/imagegpt/test_image_processing_imagegpt.py +++ b/tests/models/imagegpt/test_image_processing_imagegpt.py @@ -25,7 +25,7 @@ from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_torch_available(): @@ -78,7 +78,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class ImageGPTFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class ImageGPTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = ImageGPTFeatureExtractor if is_vision_available() else None diff --git a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py index 112d40cca383..4423d33376e4 100644 --- a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py +++ b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py @@ -21,7 +21,8 @@ from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -63,7 +64,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_pytesseract -class LayoutLMv2FeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class LayoutLMv2FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = LayoutLMv2FeatureExtractor if is_pytesseract_available() else None diff --git a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py index 6d9f4fd58b6f..829fc8d79dde 100644 --- a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py +++ b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py @@ -21,7 +21,8 @@ from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -63,7 +64,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_pytesseract -class LayoutLMv3FeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class LayoutLMv3FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = LayoutLMv3FeatureExtractor if is_pytesseract_available() else None diff --git a/tests/models/levit/test_image_processing_levit.py b/tests/models/levit/test_image_processing_levit.py index 912bb9deb545..76f3c66e1ade 100644 --- a/tests/models/levit/test_image_processing_levit.py +++ b/tests/models/levit/test_image_processing_levit.py @@ -21,7 +21,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -80,7 +81,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class LevitFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class LevitFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = LevitFeatureExtractor if is_vision_available() else None diff --git a/tests/models/maskformer/test_image_processing_maskformer.py b/tests/models/maskformer/test_image_processing_maskformer.py index ccad43d4185a..f8ddf8c9dc03 100644 --- a/tests/models/maskformer/test_image_processing_maskformer.py +++ b/tests/models/maskformer/test_image_processing_maskformer.py @@ -23,7 +23,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -130,7 +131,7 @@ def get_fake_maskformer_outputs(self): @require_torch @require_vision -class MaskFormerFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class MaskFormerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = MaskFormerFeatureExtractor if (is_vision_available() and is_torch_available()) else None diff --git a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py index c0b6f2979405..383f91c554f8 100644 --- a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py +++ b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py @@ -21,7 +21,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -71,7 +72,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class MobileNetV1FeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class MobileNetV1FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = MobileNetV1FeatureExtractor if is_vision_available() else None diff --git a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py index aa35b62383da..e207932e38e0 100644 --- a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py +++ b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py @@ -21,7 +21,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -71,7 +72,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class MobileNetV2FeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class MobileNetV2FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = MobileNetV2FeatureExtractor if is_vision_available() else None diff --git a/tests/models/mobilevit/test_image_processing_mobilevit.py b/tests/models/mobilevit/test_image_processing_mobilevit.py index ce3d07a3ad23..a22fc2c1d541 100644 --- a/tests/models/mobilevit/test_image_processing_mobilevit.py +++ b/tests/models/mobilevit/test_image_processing_mobilevit.py @@ -21,7 +21,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -74,7 +75,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class MobileViTFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class MobileViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = MobileViTFeatureExtractor if is_vision_available() else None diff --git a/tests/models/oneformer/test_image_processing_oneformer.py b/tests/models/oneformer/test_image_processing_oneformer.py index 8faa441b5184..f34ae080cf96 100644 --- a/tests/models/oneformer/test_image_processing_oneformer.py +++ b/tests/models/oneformer/test_image_processing_oneformer.py @@ -23,7 +23,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -155,7 +155,7 @@ def get_fake_oneformer_outputs(self): @require_torch @require_vision -class OneFormerImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class OneFormerImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_feat_extracttion_common.test_feat_extract_to_json_string feature_extraction_class = image_processing_class diff --git a/tests/models/owlvit/test_image_processing_owlvit.py b/tests/models/owlvit/test_image_processing_owlvit.py index 77e3ebc52049..bf2cd8d666d2 100644 --- a/tests/models/owlvit/test_image_processing_owlvit.py +++ b/tests/models/owlvit/test_image_processing_owlvit.py @@ -21,7 +21,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -81,7 +82,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class OwlViTFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class OwlViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = OwlViTFeatureExtractor if is_vision_available() else None diff --git a/tests/models/poolformer/test_image_processing_poolformer.py b/tests/models/poolformer/test_image_processing_poolformer.py index 7f999c9c13f3..47e583a3211a 100644 --- a/tests/models/poolformer/test_image_processing_poolformer.py +++ b/tests/models/poolformer/test_image_processing_poolformer.py @@ -20,7 +20,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -77,7 +78,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class PoolFormerFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class PoolFormerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = PoolFormerFeatureExtractor if is_vision_available() else None diff --git a/tests/models/segformer/test_image_processing_segformer.py b/tests/models/segformer/test_image_processing_segformer.py index de0c2d2ac203..a104fc2f4835 100644 --- a/tests/models/segformer/test_image_processing_segformer.py +++ b/tests/models/segformer/test_image_processing_segformer.py @@ -22,7 +22,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -95,7 +96,7 @@ def prepare_semantic_batch_inputs(): @require_torch @require_vision -class SegformerFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class SegformerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = SegformerFeatureExtractor if is_vision_available() else None diff --git a/tests/models/swin2sr/test_image_processing_swin2sr.py b/tests/models/swin2sr/test_image_processing_swin2sr.py index 488f55714ec2..393a44ecface 100644 --- a/tests/models/swin2sr/test_image_processing_swin2sr.py +++ b/tests/models/swin2sr/test_image_processing_swin2sr.py @@ -21,7 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_torch_available(): @@ -100,7 +100,7 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_torch @require_vision -class Swin2SRImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class Swin2SRImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = Swin2SRImageProcessor if is_vision_available() else None diff --git a/tests/models/videomae/test_image_processing_videomae.py b/tests/models/videomae/test_image_processing_videomae.py index 98a60ac39e56..025c39ef97f8 100644 --- a/tests/models/videomae/test_image_processing_videomae.py +++ b/tests/models/videomae/test_image_processing_videomae.py @@ -21,7 +21,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_video_inputs if is_torch_available(): @@ -80,7 +81,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class VideoMAEFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class VideoMAEFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = VideoMAEFeatureExtractor if is_vision_available() else None diff --git a/tests/models/vilt/test_image_processing_vilt.py b/tests/models/vilt/test_image_processing_vilt.py index 2c75ddee06d9..5d7be90a7475 100644 --- a/tests/models/vilt/test_image_processing_vilt.py +++ b/tests/models/vilt/test_image_processing_vilt.py @@ -21,7 +21,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -116,7 +117,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class ViltFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class ViltFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = ViltFeatureExtractor if is_vision_available() else None diff --git a/tests/models/vit/test_image_processing_vit.py b/tests/models/vit/test_image_processing_vit.py index f358e2a39e03..a0db60887e40 100644 --- a/tests/models/vit/test_image_processing_vit.py +++ b/tests/models/vit/test_image_processing_vit.py @@ -21,7 +21,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -73,7 +74,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class ViTFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class ViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = ViTFeatureExtractor if is_vision_available() else None diff --git a/tests/models/yolos/test_image_processing_yolos.py b/tests/models/yolos/test_image_processing_yolos.py index 2c82ac6ecbe7..4e22baa4d668 100644 --- a/tests/models/yolos/test_image_processing_yolos.py +++ b/tests/models/yolos/test_image_processing_yolos.py @@ -23,7 +23,8 @@ from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available -from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): @@ -114,7 +115,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class YolosFeatureExtractionTest(ImageProcessingSavingTestMixin, unittest.TestCase): +class YolosFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): feature_extraction_class = YolosFeatureExtractor if is_vision_available() else None diff --git a/tests/test_feature_extraction_common.py b/tests/test_feature_extraction_common.py index 5c60cf58ac25..98f143506bc8 100644 --- a/tests/test_feature_extraction_common.py +++ b/tests/test_feature_extraction_common.py @@ -25,7 +25,16 @@ from huggingface_hub import HfFolder, delete_repo, set_access_token from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, Wav2Vec2FeatureExtractor -from transformers.testing_utils import TOKEN, USER, check_json_file_has_correct_format, get_tests_dir, is_staging_test +from transformers.testing_utils import ( + TOKEN, + USER, + check_json_file_has_correct_format, + get_tests_dir, + is_staging_test, + require_torch, + require_vision, +) +from transformers.utils import is_torch_available, is_vision_available sys.path.append(str(Path(__file__).parent.parent / "utils")) @@ -33,6 +42,14 @@ from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 +if is_torch_available(): + import numpy as np + import torch + +if is_vision_available(): + from PIL import Image + + SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR = get_tests_dir("fixtures") diff --git a/utils/tests_fetcher.py b/utils/tests_fetcher.py index c9c9dfaaba50..d388c11361e7 100644 --- a/utils/tests_fetcher.py +++ b/utils/tests_fetcher.py @@ -355,7 +355,6 @@ def create_reverse_dependency_map(): "file_utils.py": ["utils/test_file_utils.py", "utils/test_model_output.py"], "image_processing_utils.py": ["test_image_processing_common.py", "utils/test_image_processing_utils.py"], "image_transforms.py": "test_image_transforms.py", - "image_processing_utils.py": ["test_image_processing_common.py", "utils/test_image_processing_utils.py"], "utils/generic.py": ["utils/test_file_utils.py", "utils/test_model_output.py", "utils/test_generic.py"], "utils/hub.py": "utils/test_hub_utils.py", "modelcard.py": "utils/test_model_card.py", From 2f5bb776d8dcc9c9530c9ce8cd3af0f875b0fa52 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Fri, 20 Jan 2023 14:43:12 +0000 Subject: [PATCH 03/15] Fix imports --- tests/test_feature_extraction_common.py | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/tests/test_feature_extraction_common.py b/tests/test_feature_extraction_common.py index 98f143506bc8..5c60cf58ac25 100644 --- a/tests/test_feature_extraction_common.py +++ b/tests/test_feature_extraction_common.py @@ -25,16 +25,7 @@ from huggingface_hub import HfFolder, delete_repo, set_access_token from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, Wav2Vec2FeatureExtractor -from transformers.testing_utils import ( - TOKEN, - USER, - check_json_file_has_correct_format, - get_tests_dir, - is_staging_test, - require_torch, - require_vision, -) -from transformers.utils import is_torch_available, is_vision_available +from transformers.testing_utils import TOKEN, USER, check_json_file_has_correct_format, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) @@ -42,14 +33,6 @@ from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 -if is_torch_available(): - import numpy as np - import torch - -if is_vision_available(): - from PIL import Image - - SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR = get_tests_dir("fixtures") From 96f7d57ce86cdcbe8e894d9ca3cb3aef4e45b99a Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Fri, 20 Jan 2023 15:34:22 +0000 Subject: [PATCH 04/15] Fix all imports --- tests/models/oneformer/test_image_processing_oneformer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/models/oneformer/test_image_processing_oneformer.py b/tests/models/oneformer/test_image_processing_oneformer.py index f34ae080cf96..79c6d82c3f42 100644 --- a/tests/models/oneformer/test_image_processing_oneformer.py +++ b/tests/models/oneformer/test_image_processing_oneformer.py @@ -23,7 +23,8 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import prepare_image_inputs if is_torch_available(): From 608cc1498632dcf71f659d98e46046b811fc59cd Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Fri, 9 Dec 2022 17:54:03 +0000 Subject: [PATCH 05/15] Replace fe with ip names --- .../models/beit/test_image_processing_beit.py | 174 ++++++++--------- .../test_image_processing_chinese_clip.py | 158 ++++++++-------- .../models/clip/test_image_processing_clip.py | 158 ++++++++-------- .../test_image_processing_conditional_detr.py | 110 +++++------ .../test_image_processing_convnext.py | 104 +++++----- .../test_image_processing_deformable_detr.py | 116 ++++++------ .../models/deit/test_image_processing_deit.py | 106 +++++------ .../models/detr/test_image_processing_detr.py | 118 ++++++------ .../donut/test_image_processing_donut.py | 108 +++++------ tests/models/dpt/test_image_processing_dpt.py | 100 +++++----- .../flava/test_image_processing_flava.py | 150 +++++++-------- .../models/glpn/test_image_processing_glpn.py | 68 +++---- .../test_image_processing_imagegpt.py | 87 ++++----- .../test_image_processing_layoutlmv2.py | 104 +++++----- .../test_image_processing_layoutlmv3.py | 104 +++++----- .../levit/test_image_processing_levit.py | 104 +++++----- .../test_image_processing_maskformer.py | 178 +++++++++--------- .../test_image_processing_mobilenet_v1.py | 98 +++++----- .../test_image_processing_mobilenet_v2.py | 107 +++++------ .../test_image_processing_mobilevit.py | 100 +++++----- .../owlvit/test_image_processing_owlvit.py | 108 +++++------ .../test_image_processing_poolformer.py | 104 +++++----- .../test_image_processing_segformer.py | 172 ++++++++--------- .../test_image_processing_videomae.py | 116 ++++++------ .../models/vilt/test_image_processing_vilt.py | 106 +++++------ tests/models/vit/test_image_processing_vit.py | 100 +++++----- .../yolos/test_image_processing_yolos.py | 110 +++++------ 27 files changed, 1576 insertions(+), 1592 deletions(-) diff --git a/tests/models/beit/test_image_processing_beit.py b/tests/models/beit/test_image_processing_beit.py index b499f008457b..d5844afa498b 100644 --- a/tests/models/beit/test_image_processing_beit.py +++ b/tests/models/beit/test_image_processing_beit.py @@ -32,10 +32,10 @@ if is_vision_available(): from PIL import Image - from transformers import BeitFeatureExtractor + from transformers import BeitImageProcessor -class BeitFeatureExtractionTester(unittest.TestCase): +class BeitImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -70,7 +70,7 @@ def __init__( self.image_std = image_std self.do_reduce_labels = do_reduce_labels - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -107,24 +107,24 @@ def prepare_semantic_batch_inputs(): @require_vision class BeitFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = BeitFeatureExtractor if is_vision_available() else None + image_processing_class = BeitImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = BeitFeatureExtractionTester(self) + self.image_processor_tester = BeitImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_center_crop")) - self.assertTrue(hasattr(feature_extractor, "center_crop")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_center_crop")) + self.assertTrue(hasattr(image_processing, "center_crop")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -143,128 +143,128 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_segmentation_maps(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) maps = [] for image in image_inputs: self.assertIsInstance(image, torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input - encoding = feature_extractor(image_inputs[0], maps[0], return_tensors="pt") + encoding = image_processing(image_inputs[0], maps[0], return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) @@ -272,22 +272,22 @@ def test_call_segmentation_maps(self): self.assertTrue(encoding["labels"].max().item() <= 255) # Test batched - encoding = feature_extractor(image_inputs, maps, return_tensors="pt") + encoding = image_processing(image_inputs, maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) @@ -297,22 +297,22 @@ def test_call_segmentation_maps(self): # Test not batched input (PIL images) image, segmentation_map = prepare_semantic_single_inputs() - encoding = feature_extractor(image, segmentation_map, return_tensors="pt") + encoding = image_processing(image, segmentation_map, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) @@ -322,22 +322,22 @@ def test_call_segmentation_maps(self): # Test batched input (PIL images) images, segmentation_maps = prepare_semantic_batch_inputs() - encoding = feature_extractor(images, segmentation_maps, return_tensors="pt") + encoding = image_processing(images, segmentation_maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 2, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 2, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) @@ -345,16 +345,16 @@ def test_call_segmentation_maps(self): self.assertTrue(encoding["labels"].max().item() <= 255) def test_reduce_labels(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 image, map = prepare_semantic_single_inputs() - encoding = feature_extractor(image, map, return_tensors="pt") + encoding = image_processing(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 150) - feature_extractor.reduce_labels = True - encoding = feature_extractor(image, map, return_tensors="pt") + image_processing.reduce_labels = True + encoding = image_processing(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) diff --git a/tests/models/chinese_clip/test_image_processing_chinese_clip.py b/tests/models/chinese_clip/test_image_processing_chinese_clip.py index 616dfa3ffc7a..217d037597ee 100644 --- a/tests/models/chinese_clip/test_image_processing_chinese_clip.py +++ b/tests/models/chinese_clip/test_image_processing_chinese_clip.py @@ -30,10 +30,10 @@ if is_vision_available(): from PIL import Image - from transformers import ChineseCLIPFeatureExtractor + from transformers import ChineseCLIPImageProcessor -class ChineseCLIPFeatureExtractionTester(unittest.TestCase): +class ChineseCLIPImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -68,7 +68,7 @@ def __init__( self.image_std = image_std self.do_convert_rgb = do_convert_rgb - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -115,25 +115,25 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_vision class ChineseCLIPFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = ChineseCLIPFeatureExtractor if is_vision_available() else None + image_processing_class = ChineseCLIPImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = ChineseCLIPFeatureExtractionTester(self, do_center_crop=True) + self.image_processor_tester = ChineseCLIPImageProcessingTester(self, do_center_crop=True) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_center_crop")) - self.assertTrue(hasattr(feature_extractor, "center_crop")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_convert_rgb")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_center_crop")) + self.assertTrue(hasattr(image_processing, "center_crop")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -148,98 +148,98 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False) + image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False, numpify=True) + image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False, torchify=True) + image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) @@ -248,58 +248,58 @@ def test_call_pytorch(self): @require_vision class ChineseCLIPFeatureExtractionTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = ChineseCLIPFeatureExtractor if is_vision_available() else None + image_processing_class = ChineseCLIPImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = ChineseCLIPFeatureExtractionTester(self, num_channels=4, do_center_crop=True) + self.image_processor_tester = ChineseCLIPImageProcessingTester(self, num_channels=4, do_center_crop=True) self.expected_encoded_image_num_channels = 3 @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_center_crop")) - self.assertTrue(hasattr(feature_extractor, "center_crop")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_convert_rgb")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_center_crop")) + self.assertTrue(hasattr(image_processing, "center_crop")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_batch_feature(self): pass def test_call_pil_four_channels(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False) + image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.expected_encoded_image_num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, + self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) diff --git a/tests/models/clip/test_image_processing_clip.py b/tests/models/clip/test_image_processing_clip.py index 8f29b63bbb55..ef1212a2d952 100644 --- a/tests/models/clip/test_image_processing_clip.py +++ b/tests/models/clip/test_image_processing_clip.py @@ -30,10 +30,10 @@ if is_vision_available(): from PIL import Image - from transformers import CLIPFeatureExtractor + from transformers import CLIPImageProcessor -class CLIPFeatureExtractionTester(unittest.TestCase): +class CLIPImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -68,7 +68,7 @@ def __init__( self.image_std = image_std self.do_convert_rgb = do_convert_rgb - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -115,25 +115,25 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_vision class CLIPFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = CLIPFeatureExtractor if is_vision_available() else None + image_processing_class = CLIPImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = CLIPFeatureExtractionTester(self) + self.image_processor_tester = CLIPImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_center_crop")) - self.assertTrue(hasattr(feature_extractor, "center_crop")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_convert_rgb")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_center_crop")) + self.assertTrue(hasattr(image_processing, "center_crop")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -148,98 +148,98 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False) + image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False, numpify=True) + image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False, torchify=True) + image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) @@ -248,58 +248,58 @@ def test_call_pytorch(self): @require_vision class CLIPFeatureExtractionTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = CLIPFeatureExtractor if is_vision_available() else None + image_processing_class = CLIPImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = CLIPFeatureExtractionTester(self, num_channels=4) + self.image_processor_tester = CLIPImageProcessingTester(self, num_channels=4) self.expected_encoded_image_num_channels = 3 @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_center_crop")) - self.assertTrue(hasattr(feature_extractor, "center_crop")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_convert_rgb")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_center_crop")) + self.assertTrue(hasattr(image_processing, "center_crop")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_batch_feature(self): pass def test_call_pil_four_channels(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False) + image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.expected_encoded_image_num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, + self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) diff --git a/tests/models/conditional_detr/test_image_processing_conditional_detr.py b/tests/models/conditional_detr/test_image_processing_conditional_detr.py index b4e6f46d3e9e..b194f7ba7c4b 100644 --- a/tests/models/conditional_detr/test_image_processing_conditional_detr.py +++ b/tests/models/conditional_detr/test_image_processing_conditional_detr.py @@ -33,10 +33,10 @@ if is_vision_available(): from PIL import Image - from transformers import ConditionalDetrFeatureExtractor + from transformers import ConditionalDetrImageProcessor -class ConditionalDetrFeatureExtractionTester(unittest.TestCase): +class ConditionalDetrImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -69,7 +69,7 @@ def __init__( self.rescale_factor = rescale_factor self.do_pad = do_pad - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -83,7 +83,7 @@ def prepare_feat_extract_dict(self): def get_expected_values(self, image_inputs, batched=False): """ - This function computes the expected height and width when providing images to ConditionalDetrFeatureExtractor, + This function computes the expected height and width when providing images to ConditionalDetrImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: @@ -117,22 +117,22 @@ def get_expected_values(self, image_inputs, batched=False): @require_vision class ConditionalDetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = ConditionalDetrFeatureExtractor if is_vision_available() else None + image_processing_class = ConditionalDetrImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = ConditionalDetrFeatureExtractionTester(self) + self.image_processor_tester = ConditionalDetrImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -149,115 +149,115 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_equivalence_pad_and_create_pixel_mask(self): - # Initialize feature_extractors - feature_extractor_1 = self.feature_extraction_class(**self.feat_extract_dict) - feature_extractor_2 = self.feature_extraction_class(do_resize=False, do_normalize=False, do_rescale=False) + # Initialize image_processings + image_processing_1 = self.image_processing_class(**self.image_processor_dict) + image_processing_2 = self.image_processing_class(do_resize=False, do_normalize=False, do_rescale=False) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test whether the method "pad_and_return_pixel_mask" and calling the feature extractor return the same tensors - encoded_images_with_method = feature_extractor_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt") - encoded_images = feature_extractor_2(image_inputs, return_tensors="pt") + encoded_images_with_method = image_processing_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt") + encoded_images = image_processing_2(image_inputs, return_tensors="pt") self.assertTrue( torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4) @@ -276,8 +276,8 @@ def test_call_pytorch_with_coco_detection_annotations(self): target = {"image_id": 39769, "annotations": target} # encode them - feature_extractor = ConditionalDetrFeatureExtractor.from_pretrained("microsoft/conditional-detr-resnet-50") - encoding = feature_extractor(images=image, annotations=target, return_tensors="pt") + image_processing = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") + encoding = image_processing(images=image, annotations=target, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) @@ -322,8 +322,8 @@ def test_call_pytorch_with_coco_panoptic_annotations(self): masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") # encode them - feature_extractor = ConditionalDetrFeatureExtractor(format="coco_panoptic") - encoding = feature_extractor(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") + image_processing = ConditionalDetrImageProcessor(format="coco_panoptic") + encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) diff --git a/tests/models/convnext/test_image_processing_convnext.py b/tests/models/convnext/test_image_processing_convnext.py index 4fd62fc51d19..db0f9276655d 100644 --- a/tests/models/convnext/test_image_processing_convnext.py +++ b/tests/models/convnext/test_image_processing_convnext.py @@ -31,10 +31,10 @@ if is_vision_available(): from PIL import Image - from transformers import ConvNextFeatureExtractor + from transformers import ConvNextImageProcessor -class ConvNextFeatureExtractionTester(unittest.TestCase): +class ConvNextImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -64,7 +64,7 @@ def __init__( self.image_mean = image_mean self.image_std = image_std - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, @@ -79,23 +79,23 @@ def prepare_feat_extract_dict(self): @require_vision class ConvNextFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = ConvNextFeatureExtractor if is_vision_available() else None + image_processing_class = ConvNextImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = ConvNextFeatureExtractionTester(self) + self.image_processor_tester = ConvNextImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "crop_pct")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "crop_pct")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -108,97 +108,97 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["shortest_edge"], - self.feature_extract_tester.size["shortest_edge"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["shortest_edge"], + self.image_processor_tester.size["shortest_edge"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["shortest_edge"], - self.feature_extract_tester.size["shortest_edge"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["shortest_edge"], + self.image_processor_tester.size["shortest_edge"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["shortest_edge"], - self.feature_extract_tester.size["shortest_edge"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["shortest_edge"], + self.image_processor_tester.size["shortest_edge"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["shortest_edge"], - self.feature_extract_tester.size["shortest_edge"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["shortest_edge"], + self.image_processor_tester.size["shortest_edge"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["shortest_edge"], - self.feature_extract_tester.size["shortest_edge"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["shortest_edge"], + self.image_processor_tester.size["shortest_edge"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["shortest_edge"], - self.feature_extract_tester.size["shortest_edge"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["shortest_edge"], + self.image_processor_tester.size["shortest_edge"], ), ) diff --git a/tests/models/deformable_detr/test_image_processing_deformable_detr.py b/tests/models/deformable_detr/test_image_processing_deformable_detr.py index bc6368953949..70463ded6e12 100644 --- a/tests/models/deformable_detr/test_image_processing_deformable_detr.py +++ b/tests/models/deformable_detr/test_image_processing_deformable_detr.py @@ -33,10 +33,10 @@ if is_vision_available(): from PIL import Image - from transformers import DeformableDetrFeatureExtractor + from transformers import DeformableDetrImageProcessor -class DeformableDetrFeatureExtractionTester(unittest.TestCase): +class DeformableDetrImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -69,7 +69,7 @@ def __init__( self.rescale_factor = rescale_factor self.do_pad = do_pad - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -83,7 +83,7 @@ def prepare_feat_extract_dict(self): def get_expected_values(self, image_inputs, batched=False): """ - This function computes the expected height and width when providing images to DeformableDetrFeatureExtractor, + This function computes the expected height and width when providing images to DeformableDetrImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: @@ -117,24 +117,24 @@ def get_expected_values(self, image_inputs, batched=False): @require_vision class DeformableDetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = DeformableDetrFeatureExtractor if is_vision_available() else None + image_processing_class = DeformableDetrImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = DeformableDetrFeatureExtractionTester(self) + self.image_processor_tester = DeformableDetrImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "do_rescale")) - self.assertTrue(hasattr(feature_extractor, "do_pad")) - self.assertTrue(hasattr(feature_extractor, "size")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "do_rescale")) + self.assertTrue(hasattr(image_processing, "do_pad")) + self.assertTrue(hasattr(image_processing, "size")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -151,116 +151,116 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_equivalence_pad_and_create_pixel_mask(self): - # Initialize feature_extractors - feature_extractor_1 = self.feature_extraction_class(**self.feat_extract_dict) - feature_extractor_2 = self.feature_extraction_class(do_resize=False, do_normalize=False, do_rescale=False) + # Initialize image_processings + image_processing_1 = self.image_processing_class(**self.image_processor_dict) + image_processing_2 = self.image_processing_class(do_resize=False, do_normalize=False, do_rescale=False) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test whether the method "pad_and_return_pixel_mask" and calling the feature extractor return the same tensors - encoded_images_with_method = feature_extractor_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt") - encoded_images = feature_extractor_2(image_inputs, return_tensors="pt") + encoded_images_with_method = image_processing_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt") + encoded_images = image_processing_2(image_inputs, return_tensors="pt") self.assertTrue( torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4) @@ -279,8 +279,8 @@ def test_call_pytorch_with_coco_detection_annotations(self): target = {"image_id": 39769, "annotations": target} # encode them - feature_extractor = DeformableDetrFeatureExtractor() - encoding = feature_extractor(images=image, annotations=target, return_tensors="pt") + image_processing = DeformableDetrImageProcessor() + encoding = image_processing(images=image, annotations=target, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) @@ -325,8 +325,8 @@ def test_call_pytorch_with_coco_panoptic_annotations(self): masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") # encode them - feature_extractor = DeformableDetrFeatureExtractor(format="coco_panoptic") - encoding = feature_extractor(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") + image_processing = DeformableDetrImageProcessor(format="coco_panoptic") + encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) diff --git a/tests/models/deit/test_image_processing_deit.py b/tests/models/deit/test_image_processing_deit.py index db1e42f77109..dcf6ab6d5548 100644 --- a/tests/models/deit/test_image_processing_deit.py +++ b/tests/models/deit/test_image_processing_deit.py @@ -31,10 +31,10 @@ if is_vision_available(): from PIL import Image - from transformers import DeiTFeatureExtractor + from transformers import DeiTImageProcessor -class DeiTFeatureExtractionTester(unittest.TestCase): +class DeiTImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -68,7 +68,7 @@ def __init__( self.image_mean = image_mean self.image_std = image_std - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -84,25 +84,25 @@ def prepare_feat_extract_dict(self): @require_vision class DeiTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = DeiTFeatureExtractor if is_vision_available() else None + image_processing_class = DeiTImageProcessor if is_vision_available() else None test_cast_dtype = True def setUp(self): - self.feature_extract_tester = DeiTFeatureExtractionTester(self) + self.image_processor_tester = DeiTImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_center_crop")) - self.assertTrue(hasattr(feature_extractor, "center_crop")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_center_crop")) + self.assertTrue(hasattr(image_processing, "center_crop")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -117,97 +117,97 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) diff --git a/tests/models/detr/test_image_processing_detr.py b/tests/models/detr/test_image_processing_detr.py index 253ffb7c2972..617982c7ee58 100644 --- a/tests/models/detr/test_image_processing_detr.py +++ b/tests/models/detr/test_image_processing_detr.py @@ -33,10 +33,10 @@ if is_vision_available(): from PIL import Image - from transformers import DetrFeatureExtractor + from transformers import DetrImageProcessor -class DetrFeatureExtractionTester(unittest.TestCase): +class DetrImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -69,7 +69,7 @@ def __init__( self.image_std = image_std self.do_pad = do_pad - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -83,7 +83,7 @@ def prepare_feat_extract_dict(self): def get_expected_values(self, image_inputs, batched=False): """ - This function computes the expected height and width when providing images to DetrFeatureExtractor, + This function computes the expected height and width when providing images to DetrImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: @@ -117,25 +117,25 @@ def get_expected_values(self, image_inputs, batched=False): @require_vision class DetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = DetrFeatureExtractor if is_vision_available() else None + image_processing_class = DetrImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = DetrFeatureExtractionTester(self) + self.image_processor_tester = DetrImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "do_rescale")) - self.assertTrue(hasattr(feature_extractor, "rescale_factor")) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_pad")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "do_rescale")) + self.assertTrue(hasattr(image_processing, "rescale_factor")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_pad")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -152,115 +152,115 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_equivalence_pad_and_create_pixel_mask(self): - # Initialize feature_extractors - feature_extractor_1 = self.feature_extraction_class(**self.feat_extract_dict) - feature_extractor_2 = self.feature_extraction_class(do_resize=False, do_normalize=False, do_rescale=False) + # Initialize image_processings + image_processing_1 = self.image_processing_class(**self.image_processor_dict) + image_processing_2 = self.image_processing_class(do_resize=False, do_normalize=False, do_rescale=False) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test whether the method "pad_and_return_pixel_mask" and calling the feature extractor return the same tensors - encoded_images_with_method = feature_extractor_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt") - encoded_images = feature_extractor_2(image_inputs, return_tensors="pt") + encoded_images_with_method = image_processing_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt") + encoded_images = image_processing_2(image_inputs, return_tensors="pt") self.assertTrue( torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4) @@ -279,8 +279,8 @@ def test_call_pytorch_with_coco_detection_annotations(self): target = {"image_id": 39769, "annotations": target} # encode them - feature_extractor = DetrFeatureExtractor.from_pretrained("facebook/detr-resnet-50") - encoding = feature_extractor(images=image, annotations=target, return_tensors="pt") + image_processing = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50") + encoding = image_processing(images=image, annotations=target, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) @@ -325,8 +325,8 @@ def test_call_pytorch_with_coco_panoptic_annotations(self): masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") # encode them - feature_extractor = DetrFeatureExtractor.from_pretrained("facebook/detr-resnet-50-panoptic") - encoding = feature_extractor(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") + image_processing = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic") + encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) diff --git a/tests/models/donut/test_image_processing_donut.py b/tests/models/donut/test_image_processing_donut.py index 550d166e460d..a78686a2ee56 100644 --- a/tests/models/donut/test_image_processing_donut.py +++ b/tests/models/donut/test_image_processing_donut.py @@ -31,10 +31,10 @@ if is_vision_available(): from PIL import Image - from transformers import DonutFeatureExtractor + from transformers import DonutImageProcessor -class DonutFeatureExtractionTester(unittest.TestCase): +class DonutImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -67,7 +67,7 @@ def __init__( self.image_mean = image_mean self.image_std = image_std - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -84,25 +84,25 @@ def prepare_feat_extract_dict(self): @require_vision class DonutFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = DonutFeatureExtractor if is_vision_available() else None + image_processing_class = DonutImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = DonutFeatureExtractionTester(self) + self.image_processor_tester = DonutImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_thumbnail")) - self.assertTrue(hasattr(feature_extractor, "do_align_long_axis")) - self.assertTrue(hasattr(feature_extractor, "do_pad")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_thumbnail")) + self.assertTrue(hasattr(image_processing, "do_align_long_axis")) + self.assertTrue(hasattr(image_processing, "do_pad")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -120,99 +120,99 @@ def test_batch_feature(self): @is_flaky() def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) @is_flaky() def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) @is_flaky() def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) diff --git a/tests/models/dpt/test_image_processing_dpt.py b/tests/models/dpt/test_image_processing_dpt.py index 0bbeb173e597..ead762241d58 100644 --- a/tests/models/dpt/test_image_processing_dpt.py +++ b/tests/models/dpt/test_image_processing_dpt.py @@ -31,10 +31,10 @@ if is_vision_available(): from PIL import Image - from transformers import DPTFeatureExtractor + from transformers import DPTImageProcessor -class DPTFeatureExtractionTester(unittest.TestCase): +class DPTImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -62,7 +62,7 @@ def __init__( self.image_mean = image_mean self.image_std = image_std - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, @@ -76,22 +76,22 @@ def prepare_feat_extract_dict(self): @require_vision class DPTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = DPTFeatureExtractor if is_vision_available() else None + image_processing_class = DPTImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = DPTFeatureExtractionTester(self) + self.image_processor_tester = DPTImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -101,97 +101,97 @@ def test_feat_extract_from_dict_with_kwargs(self): self.assertEqual(feature_extractor.size, {"height": 42, "width": 42}) def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) diff --git a/tests/models/flava/test_image_processing_flava.py b/tests/models/flava/test_image_processing_flava.py index 28718748200d..0e0a5617a740 100644 --- a/tests/models/flava/test_image_processing_flava.py +++ b/tests/models/flava/test_image_processing_flava.py @@ -31,7 +31,7 @@ if is_vision_available(): import PIL - from transformers import FlavaFeatureExtractor + from transformers import FlavaImageProcessor from transformers.image_utils import PILImageResampling from transformers.models.flava.image_processing_flava import ( FLAVA_CODEBOOK_MEAN, @@ -43,7 +43,7 @@ FLAVA_IMAGE_MEAN = FLAVA_IMAGE_STD = FLAVA_CODEBOOK_MEAN = FLAVA_CODEBOOK_STD = None -class FlavaFeatureExtractionTester(unittest.TestCase): +class FlavaImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -115,7 +115,7 @@ def __init__( self.codebook_image_mean = codebook_image_mean self.codebook_image_std = codebook_image_std - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, @@ -162,37 +162,37 @@ def get_expected_codebook_image_size(self): @require_vision class FlavaFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = FlavaFeatureExtractor if is_vision_available() else None + image_processing_class = FlavaImageProcessor if is_vision_available() else None maxDiff = None def setUp(self): - self.feature_extract_tester = FlavaFeatureExtractionTester(self) + self.image_processor_tester = FlavaImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "resample")) - self.assertTrue(hasattr(feature_extractor, "crop_size")) - self.assertTrue(hasattr(feature_extractor, "do_center_crop")) - self.assertTrue(hasattr(feature_extractor, "do_rescale")) - self.assertTrue(hasattr(feature_extractor, "rescale_factor")) - self.assertTrue(hasattr(feature_extractor, "masking_generator")) - self.assertTrue(hasattr(feature_extractor, "codebook_do_resize")) - self.assertTrue(hasattr(feature_extractor, "codebook_size")) - self.assertTrue(hasattr(feature_extractor, "codebook_resample")) - self.assertTrue(hasattr(feature_extractor, "codebook_do_center_crop")) - self.assertTrue(hasattr(feature_extractor, "codebook_crop_size")) - self.assertTrue(hasattr(feature_extractor, "codebook_do_map_pixels")) - self.assertTrue(hasattr(feature_extractor, "codebook_do_normalize")) - self.assertTrue(hasattr(feature_extractor, "codebook_image_mean")) - self.assertTrue(hasattr(feature_extractor, "codebook_image_std")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "resample")) + self.assertTrue(hasattr(image_processing, "crop_size")) + self.assertTrue(hasattr(image_processing, "do_center_crop")) + self.assertTrue(hasattr(image_processing, "do_rescale")) + self.assertTrue(hasattr(image_processing, "rescale_factor")) + self.assertTrue(hasattr(image_processing, "masking_generator")) + self.assertTrue(hasattr(image_processing, "codebook_do_resize")) + self.assertTrue(hasattr(image_processing, "codebook_size")) + self.assertTrue(hasattr(image_processing, "codebook_resample")) + self.assertTrue(hasattr(image_processing, "codebook_do_center_crop")) + self.assertTrue(hasattr(image_processing, "codebook_crop_size")) + self.assertTrue(hasattr(image_processing, "codebook_do_map_pixels")) + self.assertTrue(hasattr(image_processing, "codebook_do_normalize")) + self.assertTrue(hasattr(image_processing, "codebook_image_mean")) + self.assertTrue(hasattr(image_processing, "codebook_image_std")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -213,29 +213,29 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, PIL.Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt") + encoded_images = image_processing(image_inputs[0], return_tensors="pt") # Test no bool masked pos self.assertFalse("bool_masked_pos" in encoded_images) - expected_height, expected_width = self.feature_extract_tester.get_expected_image_size() + expected_height, expected_width = self.image_processor_tester.get_expected_image_size() self.assertEqual( encoded_images.pixel_values.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt") - expected_height, expected_width = self.feature_extract_tester.get_expected_image_size() + encoded_images = image_processing(image_inputs, return_tensors="pt") + expected_height, expected_width = self.image_processor_tester.get_expected_image_size() # Test no bool masked pos self.assertFalse("bool_masked_pos" in encoded_images) @@ -243,86 +243,86 @@ def test_call_pil(self): self.assertEqual( encoded_images.pixel_values.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def _test_call_framework(self, instance_class, prepare_kwargs): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, **prepare_kwargs) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, **prepare_kwargs) for image in image_inputs: self.assertIsInstance(image, instance_class) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt") + encoded_images = image_processing(image_inputs[0], return_tensors="pt") - expected_height, expected_width = self.feature_extract_tester.get_expected_image_size() + expected_height, expected_width = self.image_processor_tester.get_expected_image_size() self.assertEqual( encoded_images.pixel_values.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) - encoded_images = feature_extractor(image_inputs, return_image_mask=True, return_tensors="pt") + encoded_images = image_processing(image_inputs, return_image_mask=True, return_tensors="pt") - expected_height, expected_width = self.feature_extract_tester.get_expected_image_size() + expected_height, expected_width = self.image_processor_tester.get_expected_image_size() self.assertEqual( encoded_images.pixel_values.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) - expected_height, expected_width = self.feature_extract_tester.get_expected_mask_size() + expected_height, expected_width = self.image_processor_tester.get_expected_mask_size() self.assertEqual( encoded_images.bool_masked_pos.shape, ( - self.feature_extract_tester.batch_size, + self.image_processor_tester.batch_size, expected_height, expected_width, ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_image_size() + expected_height, expected_width = self.image_processor_tester.get_expected_image_size() self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test masking - encoded_images = feature_extractor(image_inputs, return_image_mask=True, return_tensors="pt") + encoded_images = image_processing(image_inputs, return_image_mask=True, return_tensors="pt") - expected_height, expected_width = self.feature_extract_tester.get_expected_image_size() + expected_height, expected_width = self.image_processor_tester.get_expected_image_size() self.assertEqual( encoded_images.pixel_values.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) - expected_height, expected_width = self.feature_extract_tester.get_expected_mask_size() + expected_height, expected_width = self.image_processor_tester.get_expected_mask_size() self.assertEqual( encoded_images.bool_masked_pos.shape, ( - self.feature_extract_tester.batch_size, + self.image_processor_tester.batch_size, expected_height, expected_width, ), @@ -335,39 +335,39 @@ def test_call_pytorch(self): self._test_call_framework(torch.Tensor, prepare_kwargs={"torchify": True}) def test_masking(self): - # Initialize feature_extractor + # Initialize image_processing random.seed(1234) - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_processing = self.image_processing_class(**self.image_processor_dict) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_image_mask=True, return_tensors="pt") + encoded_images = image_processing(image_inputs[0], return_image_mask=True, return_tensors="pt") self.assertEqual(encoded_images.bool_masked_pos.sum().item(), 75) def test_codebook_pixels(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, PIL.Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_codebook_pixels=True, return_tensors="pt") - expected_height, expected_width = self.feature_extract_tester.get_expected_codebook_image_size() + encoded_images = image_processing(image_inputs[0], return_codebook_pixels=True, return_tensors="pt") + expected_height, expected_width = self.image_processor_tester.get_expected_codebook_image_size() self.assertEqual( encoded_images.codebook_pixel_values.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_codebook_pixels=True, return_tensors="pt") - expected_height, expected_width = self.feature_extract_tester.get_expected_codebook_image_size() + encoded_images = image_processing(image_inputs, return_codebook_pixels=True, return_tensors="pt") + expected_height, expected_width = self.image_processor_tester.get_expected_codebook_image_size() self.assertEqual( encoded_images.codebook_pixel_values.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), diff --git a/tests/models/glpn/test_image_processing_glpn.py b/tests/models/glpn/test_image_processing_glpn.py index 31e527761771..07c63ef4fbe0 100644 --- a/tests/models/glpn/test_image_processing_glpn.py +++ b/tests/models/glpn/test_image_processing_glpn.py @@ -31,10 +31,10 @@ if is_vision_available(): from PIL import Image - from transformers import GLPNFeatureExtractor + from transformers import GLPNImageProcessor -class GLPNFeatureExtractionTester(unittest.TestCase): +class GLPNImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -57,7 +57,7 @@ def __init__( self.size_divisor = size_divisor self.do_rescale = do_rescale - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, @@ -69,60 +69,60 @@ def prepare_feat_extract_dict(self): @require_vision class GLPNFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = GLPNFeatureExtractor if is_vision_available() else None + image_processing_class = GLPNImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = GLPNFeatureExtractionTester(self) + self.image_processor_tester = GLPNImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size_divisor")) - self.assertTrue(hasattr(feature_extractor, "resample")) - self.assertTrue(hasattr(feature_extractor, "do_rescale")) + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size_divisor")) + self.assertTrue(hasattr(image_processing, "resample")) + self.assertTrue(hasattr(image_processing, "do_rescale")) def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) - # Test not batched input (GLPNFeatureExtractor doesn't support batching) - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values - self.assertTrue(encoded_images.shape[-1] % self.feature_extract_tester.size_divisor == 0) - self.assertTrue(encoded_images.shape[-2] % self.feature_extract_tester.size_divisor == 0) + # Test not batched input (GLPNImageProcessor doesn't support batching) + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values + self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) + self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) - # Test not batched input (GLPNFeatureExtractor doesn't support batching) - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values - self.assertTrue(encoded_images.shape[-1] % self.feature_extract_tester.size_divisor == 0) - self.assertTrue(encoded_images.shape[-2] % self.feature_extract_tester.size_divisor == 0) + # Test not batched input (GLPNImageProcessor doesn't support batching) + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values + self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) + self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) - # Test not batched input (GLPNFeatureExtractor doesn't support batching) - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values - self.assertTrue(encoded_images.shape[-1] % self.feature_extract_tester.size_divisor == 0) - self.assertTrue(encoded_images.shape[-2] % self.feature_extract_tester.size_divisor == 0) + # Test not batched input (GLPNImageProcessor doesn't support batching) + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values + self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) + self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) diff --git a/tests/models/imagegpt/test_image_processing_imagegpt.py b/tests/models/imagegpt/test_image_processing_imagegpt.py index 465a6015a39a..b3c916241641 100644 --- a/tests/models/imagegpt/test_image_processing_imagegpt.py +++ b/tests/models/imagegpt/test_image_processing_imagegpt.py @@ -34,10 +34,10 @@ if is_vision_available(): from PIL import Image - from transformers import ImageGPTFeatureExtractor + from transformers import ImageGPTImageProcessor -class ImageGPTFeatureExtractionTester(unittest.TestCase): +class ImageGPTImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -61,7 +61,7 @@ def __init__( self.size = size self.do_normalize = do_normalize - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( @@ -80,66 +80,59 @@ def prepare_feat_extract_dict(self): @require_vision class ImageGPTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = ImageGPTFeatureExtractor if is_vision_available() else None + image_processing_class = ImageGPTImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = ImageGPTFeatureExtractionTester(self) + self.image_processor_tester = ImageGPTImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "clusters")) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"height": 18, "width": 18}) - - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42) - self.assertEqual(feature_extractor.size, {"height": 42, "width": 42}) - - def test_feat_extract_to_json_string(self): - feat_extract = self.feature_extraction_class(**self.feat_extract_dict) - obj = json.loads(feat_extract.to_json_string()) - for key, value in self.feat_extract_dict.items(): + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "clusters")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + + def test_image_processor_to_json_string(self): + image_processor = self.image_processing_class(**self.image_processor_dict) + obj = json.loads(image_processor.to_json_string()) + for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(value, obj[key])) else: self.assertEqual(obj[key], value) - def test_feat_extract_to_json_file(self): - feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) + def test_image_processor_to_json_file(self): + image_processor_first = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: - json_file_path = os.path.join(tmpdirname, "feat_extract.json") - feat_extract_first.to_json_file(json_file_path) - feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path).to_dict() + json_file_path = os.path.join(tmpdirname, "image_processor.json") + image_processor_first.to_json_file(json_file_path) + image_processor_second = self.image_processing_class.from_json_file(json_file_path).to_dict() - feat_extract_first = feat_extract_first.to_dict() - for key, value in feat_extract_first.items(): + image_processor_first = image_processor_first.to_dict() + for key, value in image_processor_first.items(): if key == "clusters": - self.assertTrue(np.array_equal(value, feat_extract_second[key])) + self.assertTrue(np.array_equal(value, image_processor_second[key])) else: - self.assertEqual(feat_extract_first[key], value) + self.assertEqual(image_processor_first[key], value) - def test_feat_extract_from_and_save_pretrained(self): - feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) + def test_image_processor_from_and_save_pretrained(self): + image_processor_first = self.image_processing_class(**self.image_processor_dict) with tempfile.TemporaryDirectory() as tmpdirname: - feat_extract_first.save_pretrained(tmpdirname) - feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname).to_dict() + image_processor_first.save_pretrained(tmpdirname) + image_processor_second = self.image_processing_class.from_pretrained(tmpdirname).to_dict() - feat_extract_first = feat_extract_first.to_dict() - for key, value in feat_extract_first.items(): + image_processor_first = image_processor_first.to_dict() + for key, value in image_processor_first.items(): if key == "clusters": - self.assertTrue(np.array_equal(value, feat_extract_second[key])) + self.assertTrue(np.array_equal(value, image_processor_second[key])) else: - self.assertEqual(feat_extract_first[key], value) + self.assertEqual(image_processor_first[key], value) @unittest.skip("ImageGPT requires clusters at initialization") def test_init_without_params(self): @@ -159,15 +152,15 @@ def prepare_images(): @require_vision @require_torch -class ImageGPTFeatureExtractorIntegrationTest(unittest.TestCase): +class ImageGPTImageProcessorIntegrationTest(unittest.TestCase): @slow def test_image(self): - feature_extractor = ImageGPTFeatureExtractor.from_pretrained("openai/imagegpt-small") + image_processing = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small") images = prepare_images() # test non-batched - encoding = feature_extractor(images[0], return_tensors="pt") + encoding = image_processing(images[0], return_tensors="pt") self.assertIsInstance(encoding.input_ids, torch.LongTensor) self.assertEqual(encoding.input_ids.shape, (1, 1024)) @@ -176,7 +169,7 @@ def test_image(self): self.assertEqual(encoding.input_ids[0, :3].tolist(), expected_slice) # test batched - encoding = feature_extractor(images, return_tensors="pt") + encoding = image_processing(images, return_tensors="pt") self.assertIsInstance(encoding.input_ids, torch.LongTensor) self.assertEqual(encoding.input_ids.shape, (2, 1024)) diff --git a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py index 4423d33376e4..d786203361ea 100644 --- a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py +++ b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py @@ -31,10 +31,10 @@ if is_pytesseract_available(): from PIL import Image - from transformers import LayoutLMv2FeatureExtractor + from transformers import LayoutLMv2ImageProcessor -class LayoutLMv2FeatureExtractionTester(unittest.TestCase): +class LayoutLMv2ImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -58,7 +58,7 @@ def __init__( self.size = size self.apply_ocr = apply_ocr - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @@ -66,20 +66,20 @@ def prepare_feat_extract_dict(self): @require_pytesseract class LayoutLMv2FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = LayoutLMv2FeatureExtractor if is_pytesseract_available() else None + image_processing_class = LayoutLMv2ImageProcessor if is_pytesseract_available() else None def setUp(self): - self.feature_extract_tester = LayoutLMv2FeatureExtractionTester(self) + self.image_processor_tester = LayoutLMv2ImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "apply_ocr")) + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "apply_ocr")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -92,22 +92,22 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoding = feature_extractor(image_inputs[0], return_tensors="pt") + encoding = image_processing(image_inputs[0], return_tensors="pt") self.assertEqual( encoding.pixel_values.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) @@ -115,84 +115,84 @@ def test_call_pil(self): self.assertIsInstance(encoding.boxes, list) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) def test_layoutlmv2_integration_test(self): # with apply_OCR = True - feature_extractor = LayoutLMv2FeatureExtractor() + image_processing = LayoutLMv2ImageProcessor() from datasets import load_dataset @@ -200,7 +200,7 @@ def test_layoutlmv2_integration_test(self): image = Image.open(ds[0]["file"]).convert("RGB") - encoding = feature_extractor(image, return_tensors="pt") + encoding = image_processing(image, return_tensors="pt") self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224)) self.assertEqual(len(encoding.words), len(encoding.boxes)) @@ -215,8 +215,8 @@ def test_layoutlmv2_integration_test(self): self.assertListEqual(encoding.boxes, expected_boxes) # with apply_OCR = False - feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False) + image_processing = LayoutLMv2ImageProcessor(apply_ocr=False) - encoding = feature_extractor(image, return_tensors="pt") + encoding = image_processing(image, return_tensors="pt") self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224)) diff --git a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py index 829fc8d79dde..7268be38de07 100644 --- a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py +++ b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py @@ -31,10 +31,10 @@ if is_pytesseract_available(): from PIL import Image - from transformers import LayoutLMv3FeatureExtractor + from transformers import LayoutLMv3ImageProcessor -class LayoutLMv3FeatureExtractionTester(unittest.TestCase): +class LayoutLMv3ImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -58,7 +58,7 @@ def __init__( self.size = size self.apply_ocr = apply_ocr - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @@ -66,20 +66,20 @@ def prepare_feat_extract_dict(self): @require_pytesseract class LayoutLMv3FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = LayoutLMv3FeatureExtractor if is_pytesseract_available() else None + image_processing_class = LayoutLMv3ImageProcessor if is_pytesseract_available() else None def setUp(self): - self.feature_extract_tester = LayoutLMv3FeatureExtractionTester(self) + self.image_processor_tester = LayoutLMv3ImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "apply_ocr")) + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "apply_ocr")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -92,22 +92,22 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoding = feature_extractor(image_inputs[0], return_tensors="pt") + encoding = image_processing(image_inputs[0], return_tensors="pt") self.assertEqual( encoding.pixel_values.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) @@ -115,84 +115,84 @@ def test_call_pil(self): self.assertIsInstance(encoding.boxes, list) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) def test_LayoutLMv3_integration_test(self): # with apply_OCR = True - feature_extractor = LayoutLMv3FeatureExtractor() + image_processing = LayoutLMv3ImageProcessor() from datasets import load_dataset @@ -200,7 +200,7 @@ def test_LayoutLMv3_integration_test(self): image = Image.open(ds[0]["file"]).convert("RGB") - encoding = feature_extractor(image, return_tensors="pt") + encoding = image_processing(image, return_tensors="pt") self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224)) self.assertEqual(len(encoding.words), len(encoding.boxes)) @@ -215,8 +215,8 @@ def test_LayoutLMv3_integration_test(self): self.assertListEqual(encoding.boxes, expected_boxes) # with apply_OCR = False - feature_extractor = LayoutLMv3FeatureExtractor(apply_ocr=False) + image_processing = LayoutLMv3ImageProcessor(apply_ocr=False) - encoding = feature_extractor(image, return_tensors="pt") + encoding = image_processing(image, return_tensors="pt") self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224)) diff --git a/tests/models/levit/test_image_processing_levit.py b/tests/models/levit/test_image_processing_levit.py index 76f3c66e1ade..5a2f02d7a2c7 100644 --- a/tests/models/levit/test_image_processing_levit.py +++ b/tests/models/levit/test_image_processing_levit.py @@ -31,10 +31,10 @@ if is_vision_available(): from PIL import Image - from transformers import LevitFeatureExtractor + from transformers import LevitImageProcessor -class LevitFeatureExtractionTester(unittest.TestCase): +class LevitImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -67,7 +67,7 @@ def __init__( self.image_mean = image_mean self.image_std = image_std - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, @@ -83,23 +83,23 @@ def prepare_feat_extract_dict(self): @require_vision class LevitFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = LevitFeatureExtractor if is_vision_available() else None + image_processing_class = LevitImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = LevitFeatureExtractionTester(self) + self.image_processor_tester = LevitImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "do_center_crop")) - self.assertTrue(hasattr(feature_extractor, "size")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "do_center_crop")) + self.assertTrue(hasattr(image_processing, "size")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -114,97 +114,97 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) diff --git a/tests/models/maskformer/test_image_processing_maskformer.py b/tests/models/maskformer/test_image_processing_maskformer.py index f8ddf8c9dc03..1c455692f16d 100644 --- a/tests/models/maskformer/test_image_processing_maskformer.py +++ b/tests/models/maskformer/test_image_processing_maskformer.py @@ -31,7 +31,7 @@ import torch if is_vision_available(): - from transformers import MaskFormerFeatureExtractor + from transformers import MaskFormerImageProcessor from transformers.models.maskformer.image_processing_maskformer import binary_mask_to_rle from transformers.models.maskformer.modeling_maskformer import MaskFormerForInstanceSegmentationOutput @@ -39,7 +39,7 @@ from PIL import Image -class MaskFormerFeatureExtractionTester(unittest.TestCase): +class MaskFormerImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -77,7 +77,7 @@ def __init__( self.reduce_labels = reduce_labels self.ignore_index = ignore_index - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -92,7 +92,7 @@ def prepare_feat_extract_dict(self): def get_expected_values(self, image_inputs, batched=False): """ - This function computes the expected height and width when providing images to MaskFormerFeatureExtractor, + This function computes the expected height and width when providing images to MaskFormerImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: @@ -133,25 +133,25 @@ def get_fake_maskformer_outputs(self): @require_vision class MaskFormerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = MaskFormerFeatureExtractor if (is_vision_available() and is_torch_available()) else None + image_processing_class = MaskFormerImageProcessor if (is_vision_available() and is_torch_available()) else None def setUp(self): - self.feature_extract_tester = MaskFormerFeatureExtractionTester(self) + self.image_processor_tester = MaskFormerImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "max_size")) - self.assertTrue(hasattr(feature_extractor, "ignore_index")) - self.assertTrue(hasattr(feature_extractor, "num_labels")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "max_size")) + self.assertTrue(hasattr(image_processing, "ignore_index")) + self.assertTrue(hasattr(image_processing, "num_labels")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -168,117 +168,117 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_equivalence_pad_and_create_pixel_mask(self): - # Initialize feature_extractors - feature_extractor_1 = self.feature_extraction_class(**self.feat_extract_dict) - feature_extractor_2 = self.feature_extraction_class( - do_resize=False, do_normalize=False, do_rescale=False, num_labels=self.feature_extract_tester.num_classes + # Initialize image_processings + image_processing_1 = self.image_processing_class(**self.image_processor_dict) + image_processing_2 = self.image_processing_class( + do_resize=False, do_normalize=False, do_rescale=False, num_labels=self.image_processor_tester.num_classes ) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test whether the method "pad_and_return_pixel_mask" and calling the feature extractor return the same tensors - encoded_images_with_method = feature_extractor_1.encode_inputs(image_inputs, return_tensors="pt") - encoded_images = feature_extractor_2(image_inputs, return_tensors="pt") + encoded_images_with_method = image_processing_1.encode_inputs(image_inputs, return_tensors="pt") + encoded_images = image_processing_2(image_inputs, return_tensors="pt") self.assertTrue( torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4) @@ -287,15 +287,15 @@ def test_equivalence_pad_and_create_pixel_mask(self): torch.allclose(encoded_images_with_method["pixel_mask"], encoded_images["pixel_mask"], atol=1e-4) ) - def comm_get_feature_extractor_inputs( + def comm_get_image_processing_inputs( self, with_segmentation_maps=False, is_instance_map=False, segmentation_type="np" ): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + image_processing = self.image_processing_class(**self.image_processor_dict) # prepare image and target - num_labels = self.feature_extract_tester.num_labels + num_labels = self.image_processor_tester.num_labels annotations = None instance_id_to_semantic_id = None - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) if with_segmentation_maps: high = num_labels if is_instance_map: @@ -309,7 +309,7 @@ def comm_get_feature_extractor_inputs( if segmentation_type == "pil": annotations = [Image.fromarray(annotation) for annotation in annotations] - inputs = feature_extractor( + inputs = image_processing( image_inputs, annotations, return_tensors="pt", @@ -326,10 +326,10 @@ def test_with_size_divisor(self): size_divisors = [8, 16, 32] weird_input_sizes = [(407, 802), (582, 1094)] for size_divisor in size_divisors: - feat_extract_dict = {**self.feat_extract_dict, **{"size_divisor": size_divisor}} - feature_extractor = self.feature_extraction_class(**feat_extract_dict) + image_processor_dict = {**self.image_processor_dict, **{"size_divisor": size_divisor}} + image_processing = self.image_processing_class(**image_processor_dict) for weird_input_size in weird_input_sizes: - inputs = feature_extractor([np.ones((3, *weird_input_size))], return_tensors="pt") + inputs = image_processing([np.ones((3, *weird_input_size))], return_tensors="pt") pixel_values = inputs["pixel_values"] # check if divisible self.assertTrue((pixel_values.shape[-1] % size_divisor) == 0) @@ -337,7 +337,7 @@ def test_with_size_divisor(self): def test_call_with_segmentation_maps(self): def common(is_instance_map=False, segmentation_type=None): - inputs = self.comm_get_feature_extractor_inputs( + inputs = self.comm_get_image_processing_inputs( with_segmentation_maps=True, is_instance_map=is_instance_map, segmentation_type=segmentation_type ) @@ -390,10 +390,10 @@ def get_instance_segmentation_and_mapping(annotation): instance_seg2, inst2class2 = get_instance_segmentation_and_mapping(annotation2) # create a feature extractor - feature_extractor = MaskFormerFeatureExtractor(reduce_labels=True, ignore_index=255, size=(512, 512)) + image_processing = MaskFormerImageProcessor(reduce_labels=True, ignore_index=255, size=(512, 512)) # prepare the images and annotations - inputs = feature_extractor( + inputs = image_processing( [image1, image2], [instance_seg1, instance_seg2], instance_id_to_semantic_id=[inst2class1, inst2class2], @@ -433,10 +433,10 @@ def test_integration_semantic_segmentation(self): ) # create a feature extractor - feature_extractor = MaskFormerFeatureExtractor(reduce_labels=True, ignore_index=255, size=(512, 512)) + image_processing = MaskFormerImageProcessor(reduce_labels=True, ignore_index=255, size=(512, 512)) # prepare the images and annotations - inputs = feature_extractor( + inputs = image_processing( [image1, image2], [annotation1, annotation2], return_tensors="pt", @@ -490,11 +490,11 @@ def create_panoptic_map(annotation, segments_info): panoptic_map2, inst2class2 = create_panoptic_map(annotation2, segments_info2) # create a feature extractor - feature_extractor = MaskFormerFeatureExtractor(ignore_index=0, do_resize=False) + image_processing = MaskFormerImageProcessor(ignore_index=0, do_resize=False) # prepare the images and annotations pixel_values_list = [np.moveaxis(np.array(image1), -1, 0), np.moveaxis(np.array(image2), -1, 0)] - inputs = feature_extractor.encode_inputs( + inputs = image_processing.encode_inputs( pixel_values_list, [panoptic_map1, panoptic_map2], instance_id_to_semantic_id=[inst2class1, inst2class2], @@ -535,17 +535,17 @@ def test_binary_mask_to_rle(self): self.assertEqual(rle[1], 45) def test_post_process_segmentation(self): - fature_extractor = self.feature_extraction_class(num_labels=self.feature_extract_tester.num_classes) - outputs = self.feature_extract_tester.get_fake_maskformer_outputs() + fature_extractor = self.image_processing_class(num_labels=self.image_processor_tester.num_classes) + outputs = self.image_processor_tester.get_fake_maskformer_outputs() segmentation = fature_extractor.post_process_segmentation(outputs) self.assertEqual( segmentation.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_classes, - self.feature_extract_tester.height, - self.feature_extract_tester.width, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_classes, + self.image_processor_tester.height, + self.image_processor_tester.width, ), ) @@ -554,41 +554,41 @@ def test_post_process_segmentation(self): self.assertEqual( segmentation.shape, - (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_classes, *target_size), + (self.image_processor_tester.batch_size, self.image_processor_tester.num_classes, *target_size), ) def test_post_process_semantic_segmentation(self): - fature_extractor = self.feature_extraction_class(num_labels=self.feature_extract_tester.num_classes) - outputs = self.feature_extract_tester.get_fake_maskformer_outputs() + fature_extractor = self.image_processing_class(num_labels=self.image_processor_tester.num_classes) + outputs = self.image_processor_tester.get_fake_maskformer_outputs() segmentation = fature_extractor.post_process_semantic_segmentation(outputs) - self.assertEqual(len(segmentation), self.feature_extract_tester.batch_size) + self.assertEqual(len(segmentation), self.image_processor_tester.batch_size) self.assertEqual( segmentation[0].shape, ( - self.feature_extract_tester.height, - self.feature_extract_tester.width, + self.image_processor_tester.height, + self.image_processor_tester.width, ), ) - target_sizes = [(1, 4) for i in range(self.feature_extract_tester.batch_size)] + target_sizes = [(1, 4) for i in range(self.image_processor_tester.batch_size)] segmentation = fature_extractor.post_process_semantic_segmentation(outputs, target_sizes=target_sizes) self.assertEqual(segmentation[0].shape, target_sizes[0]) def test_post_process_panoptic_segmentation(self): - feature_extractor = self.feature_extraction_class(num_labels=self.feature_extract_tester.num_classes) - outputs = self.feature_extract_tester.get_fake_maskformer_outputs() - segmentation = feature_extractor.post_process_panoptic_segmentation(outputs, threshold=0) + image_processing = self.image_processing_class(num_labels=self.image_processor_tester.num_classes) + outputs = self.image_processor_tester.get_fake_maskformer_outputs() + segmentation = image_processing.post_process_panoptic_segmentation(outputs, threshold=0) - self.assertTrue(len(segmentation) == self.feature_extract_tester.batch_size) + self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size) for el in segmentation: self.assertTrue("segmentation" in el) self.assertTrue("segments_info" in el) self.assertEqual(type(el["segments_info"]), list) self.assertEqual( - el["segmentation"].shape, (self.feature_extract_tester.height, self.feature_extract_tester.width) + el["segmentation"].shape, (self.image_processor_tester.height, self.image_processor_tester.width) ) def test_post_process_label_fusing(self): diff --git a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py index 383f91c554f8..c2e19d2d5472 100644 --- a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py +++ b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py @@ -31,10 +31,10 @@ if is_vision_available(): from PIL import Image - from transformers import MobileNetV1FeatureExtractor + from transformers import MobileNetV1ImageProcessor -class MobileNetV1FeatureExtractionTester(unittest.TestCase): +class MobileNetV1ImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -61,7 +61,7 @@ def __init__( self.do_center_crop = do_center_crop self.crop_size = crop_size - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -74,21 +74,21 @@ def prepare_feat_extract_dict(self): @require_vision class MobileNetV1FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = MobileNetV1FeatureExtractor if is_vision_available() else None + image_processing_class = MobileNetV1ImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = MobileNetV1FeatureExtractionTester(self) + self.image_processor_tester = MobileNetV1ImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_center_crop")) - self.assertTrue(hasattr(feature_extractor, "center_crop")) + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_center_crop")) + self.assertTrue(hasattr(image_processing, "center_crop")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -103,97 +103,97 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) diff --git a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py index e207932e38e0..425a715cdc8b 100644 --- a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py +++ b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py @@ -31,10 +31,10 @@ if is_vision_available(): from PIL import Image - from transformers import MobileNetV2FeatureExtractor + from transformers import MobileNetV2ImageProcessor -class MobileNetV2FeatureExtractionTester(unittest.TestCase): +class MobileNetV2ImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -61,7 +61,7 @@ def __init__( self.do_center_crop = do_center_crop self.crop_size = crop_size - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -74,126 +74,117 @@ def prepare_feat_extract_dict(self): @require_vision class MobileNetV2FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = MobileNetV2FeatureExtractor if is_vision_available() else None + image_processing_class = MobileNetV2ImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = MobileNetV2FeatureExtractionTester(self) + self.image_processor_tester = MobileNetV2ImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_center_crop")) - self.assertTrue(hasattr(feature_extractor, "crop_size")) - - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"shortest_edge": 20}) - self.assertEqual(feature_extractor.crop_size, {"height": 18, "width": 18}) - - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42, crop_size=84) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42}) - self.assertEqual(feature_extractor.crop_size, {"height": 84, "width": 84}) + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_center_crop")) + self.assertTrue(hasattr(image_processing, "center_crop")) def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) diff --git a/tests/models/mobilevit/test_image_processing_mobilevit.py b/tests/models/mobilevit/test_image_processing_mobilevit.py index a22fc2c1d541..1b3dd2ef2e6b 100644 --- a/tests/models/mobilevit/test_image_processing_mobilevit.py +++ b/tests/models/mobilevit/test_image_processing_mobilevit.py @@ -31,10 +31,10 @@ if is_vision_available(): from PIL import Image - from transformers import MobileViTFeatureExtractor + from transformers import MobileViTImageProcessor -class MobileViTFeatureExtractionTester(unittest.TestCase): +class MobileViTImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -63,7 +63,7 @@ def __init__( self.crop_size = crop_size self.do_flip_channel_order = do_flip_channel_order - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -77,22 +77,22 @@ def prepare_feat_extract_dict(self): @require_vision class MobileViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = MobileViTFeatureExtractor if is_vision_available() else None + image_processing_class = MobileViTImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = MobileViTFeatureExtractionTester(self) + self.image_processor_tester = MobileViTImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_center_crop")) - self.assertTrue(hasattr(feature_extractor, "center_crop")) - self.assertTrue(hasattr(feature_extractor, "do_flip_channel_order")) + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_center_crop")) + self.assertTrue(hasattr(image_processing, "center_crop")) + self.assertTrue(hasattr(image_processing, "do_flip_channel_order")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -107,97 +107,97 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) diff --git a/tests/models/owlvit/test_image_processing_owlvit.py b/tests/models/owlvit/test_image_processing_owlvit.py index bf2cd8d666d2..395ad32d942c 100644 --- a/tests/models/owlvit/test_image_processing_owlvit.py +++ b/tests/models/owlvit/test_image_processing_owlvit.py @@ -31,10 +31,10 @@ if is_vision_available(): from PIL import Image - from transformers import OwlViTFeatureExtractor + from transformers import OwlViTImageProcessor -class OwlViTFeatureExtractionTester(unittest.TestCase): +class OwlViTImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -67,7 +67,7 @@ def __init__( self.image_std = image_std self.do_convert_rgb = do_convert_rgb - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -84,25 +84,25 @@ def prepare_feat_extract_dict(self): @require_vision class OwlViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = OwlViTFeatureExtractor if is_vision_available() else None + image_processing_class = OwlViTImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = OwlViTFeatureExtractionTester(self) + self.image_processor_tester = OwlViTImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_center_crop")) - self.assertTrue(hasattr(feature_extractor, "center_crop")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_convert_rgb")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_center_crop")) + self.assertTrue(hasattr(image_processing, "center_crop")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -114,98 +114,98 @@ def test_feat_extract_from_dict_with_kwargs(self): self.assertEqual(feature_extractor.crop_size, {"height": 84, "width": 84}) def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) diff --git a/tests/models/poolformer/test_image_processing_poolformer.py b/tests/models/poolformer/test_image_processing_poolformer.py index 47e583a3211a..1ac97c7309ac 100644 --- a/tests/models/poolformer/test_image_processing_poolformer.py +++ b/tests/models/poolformer/test_image_processing_poolformer.py @@ -30,10 +30,10 @@ if is_vision_available(): from PIL import Image - from transformers import PoolFormerFeatureExtractor + from transformers import PoolFormerImageProcessor -class PoolFormerFeatureExtractionTester(unittest.TestCase): +class PoolFormerImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -64,7 +64,7 @@ def __init__( self.image_mean = image_mean self.image_std = image_std - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, @@ -80,23 +80,23 @@ def prepare_feat_extract_dict(self): @require_vision class PoolFormerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = PoolFormerFeatureExtractor if is_vision_available() else None + image_processing_class = PoolFormerImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = PoolFormerFeatureExtractionTester(self) + self.image_processor_tester = PoolFormerImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize_and_center_crop")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "crop_pct")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize_and_center_crop")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "crop_pct")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -111,98 +111,98 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) diff --git a/tests/models/segformer/test_image_processing_segformer.py b/tests/models/segformer/test_image_processing_segformer.py index a104fc2f4835..437a585a7849 100644 --- a/tests/models/segformer/test_image_processing_segformer.py +++ b/tests/models/segformer/test_image_processing_segformer.py @@ -32,10 +32,10 @@ if is_vision_available(): from PIL import Image - from transformers import SegformerFeatureExtractor + from transformers import SegformerImageProcessor -class SegformerFeatureExtractionTester(unittest.TestCase): +class SegformerImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -63,7 +63,7 @@ def __init__( self.image_std = image_std self.do_reduce_labels = do_reduce_labels - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -98,23 +98,23 @@ def prepare_semantic_batch_inputs(): @require_vision class SegformerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = SegformerFeatureExtractor if is_vision_available() else None + image_processing_class = SegformerImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = SegformerFeatureExtractionTester(self) + self.image_processor_tester = SegformerImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_reduce_labels")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_reduce_labels")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -131,128 +131,128 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) def test_call_segmentation_maps(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) maps = [] for image in image_inputs: self.assertIsInstance(image, torch.Tensor) maps.append(torch.zeros(image.shape[-2:]).long()) # Test not batched input - encoding = feature_extractor(image_inputs[0], maps[0], return_tensors="pt") + encoding = image_processing(image_inputs[0], maps[0], return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) @@ -260,22 +260,22 @@ def test_call_segmentation_maps(self): self.assertTrue(encoding["labels"].max().item() <= 255) # Test batched - encoding = feature_extractor(image_inputs, maps, return_tensors="pt") + encoding = image_processing(image_inputs, maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) @@ -285,22 +285,22 @@ def test_call_segmentation_maps(self): # Test not batched input (PIL images) image, segmentation_map = prepare_semantic_single_inputs() - encoding = feature_extractor(image, segmentation_map, return_tensors="pt") + encoding = image_processing(image, segmentation_map, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 1, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) @@ -310,22 +310,22 @@ def test_call_segmentation_maps(self): # Test batched input (PIL images) images, segmentation_maps = prepare_semantic_batch_inputs() - encoding = feature_extractor(images, segmentation_maps, return_tensors="pt") + encoding = image_processing(images, segmentation_maps, return_tensors="pt") self.assertEqual( encoding["pixel_values"].shape, ( 2, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) self.assertEqual( encoding["labels"].shape, ( 2, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) self.assertEqual(encoding["labels"].dtype, torch.long) @@ -333,16 +333,16 @@ def test_call_segmentation_maps(self): self.assertTrue(encoding["labels"].max().item() <= 255) def test_reduce_labels(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 image, map = prepare_semantic_single_inputs() - encoding = feature_extractor(image, map, return_tensors="pt") + encoding = image_processing(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 150) - feature_extractor.reduce_labels = True - encoding = feature_extractor(image, map, return_tensors="pt") + image_processing.reduce_labels = True + encoding = image_processing(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) diff --git a/tests/models/videomae/test_image_processing_videomae.py b/tests/models/videomae/test_image_processing_videomae.py index 025c39ef97f8..0a6a004208c6 100644 --- a/tests/models/videomae/test_image_processing_videomae.py +++ b/tests/models/videomae/test_image_processing_videomae.py @@ -31,10 +31,10 @@ if is_vision_available(): from PIL import Image - from transformers import VideoMAEFeatureExtractor + from transformers import VideoMAEImageProcessor -class VideoMAEFeatureExtractionTester(unittest.TestCase): +class VideoMAEImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -68,7 +68,7 @@ def __init__( self.image_std = image_std self.crop_size = crop_size - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, @@ -83,23 +83,23 @@ def prepare_feat_extract_dict(self): @require_vision class VideoMAEFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = VideoMAEFeatureExtractor if is_vision_available() else None + image_processing_class = VideoMAEImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = VideoMAEFeatureExtractionTester(self) + self.image_processor_tester = VideoMAEImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "do_center_crop")) - self.assertTrue(hasattr(feature_extractor, "size")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "do_center_crop")) + self.assertTrue(hasattr(image_processing, "size")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -114,106 +114,106 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL videos - video_inputs = prepare_video_inputs(self.feature_extract_tester, equal_resolution=False) + video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], Image.Image) # Test not batched input - encoded_videos = feature_extractor(video_inputs[0], return_tensors="pt").pixel_values + encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, - self.feature_extract_tester.num_frames, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_frames, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_videos = feature_extractor(video_inputs, return_tensors="pt").pixel_values + encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_frames, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_frames, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - video_inputs = prepare_video_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # Test not batched input - encoded_videos = feature_extractor(video_inputs[0], return_tensors="pt").pixel_values + encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, - self.feature_extract_tester.num_frames, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_frames, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_videos = feature_extractor(video_inputs, return_tensors="pt").pixel_values + encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_frames, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_frames, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - video_inputs = prepare_video_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + video_inputs = prepare_video_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], torch.Tensor) # Test not batched input - encoded_videos = feature_extractor(video_inputs[0], return_tensors="pt").pixel_values + encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, - self.feature_extract_tester.num_frames, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.num_frames, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) # Test batched - encoded_videos = feature_extractor(video_inputs, return_tensors="pt").pixel_values + encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_frames, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.crop_size["height"], - self.feature_extract_tester.crop_size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_frames, + self.image_processor_tester.num_channels, + self.image_processor_tester.crop_size["height"], + self.image_processor_tester.crop_size["width"], ), ) diff --git a/tests/models/vilt/test_image_processing_vilt.py b/tests/models/vilt/test_image_processing_vilt.py index 5d7be90a7475..86e2e980e684 100644 --- a/tests/models/vilt/test_image_processing_vilt.py +++ b/tests/models/vilt/test_image_processing_vilt.py @@ -31,10 +31,10 @@ if is_vision_available(): from PIL import Image - from transformers import ViltFeatureExtractor + from transformers import ViltImageProcessor -class ViltFeatureExtractionTester(unittest.TestCase): +class ViltImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -64,7 +64,7 @@ def __init__( self.image_mean = image_mean self.image_std = image_std - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, @@ -76,7 +76,7 @@ def prepare_feat_extract_dict(self): def get_expected_values(self, image_inputs, batched=False): """ - This function computes the expected height and width when providing images to ViltFeatureExtractor, + This function computes the expected height and width when providing images to ViltImageProcessor, assuming do_resize is set to True with a scalar size and size_divisor. """ if not batched: @@ -119,23 +119,23 @@ def get_expected_values(self, image_inputs, batched=False): @require_vision class ViltFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = ViltFeatureExtractor if is_vision_available() else None + image_processing_class = ViltImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = ViltFeatureExtractionTester(self) + self.image_processor_tester = ViltImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "size_divisor")) + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + self.assertTrue(hasattr(image_processing, "size_divisor")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -148,110 +148,110 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_equivalence_pad_and_create_pixel_mask(self): - # Initialize feature_extractors - feature_extractor_1 = self.feature_extraction_class(**self.feat_extract_dict) - feature_extractor_2 = self.feature_extraction_class(do_resize=False, do_normalize=False, do_rescale=False) + # Initialize image_processings + image_processing_1 = self.image_processing_class(**self.image_processor_dict) + image_processing_2 = self.image_processing_class(do_resize=False, do_normalize=False, do_rescale=False) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test whether the method "pad_and_return_pixel_mask" and calling the feature extractor return the same tensors - encoded_images_with_method = feature_extractor_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt") - encoded_images = feature_extractor_2(image_inputs, return_tensors="pt") + encoded_images_with_method = image_processing_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt") + encoded_images = image_processing_2(image_inputs, return_tensors="pt") self.assertTrue( torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4) diff --git a/tests/models/vit/test_image_processing_vit.py b/tests/models/vit/test_image_processing_vit.py index a0db60887e40..1fc46c7cf922 100644 --- a/tests/models/vit/test_image_processing_vit.py +++ b/tests/models/vit/test_image_processing_vit.py @@ -31,10 +31,10 @@ if is_vision_available(): from PIL import Image - from transformers import ViTFeatureExtractor + from transformers import ViTImageProcessor -class ViTFeatureExtractionTester(unittest.TestCase): +class ViTImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -62,7 +62,7 @@ def __init__( self.image_mean = image_mean self.image_std = image_std - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, @@ -76,22 +76,22 @@ def prepare_feat_extract_dict(self): @require_vision class ViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = ViTFeatureExtractor if is_vision_available() else None + image_processing_class = ViTImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = ViTFeatureExtractionTester(self) + self.image_processor_tester = ViTImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -104,97 +104,97 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) diff --git a/tests/models/yolos/test_image_processing_yolos.py b/tests/models/yolos/test_image_processing_yolos.py index 4e22baa4d668..27d4657904cc 100644 --- a/tests/models/yolos/test_image_processing_yolos.py +++ b/tests/models/yolos/test_image_processing_yolos.py @@ -33,10 +33,10 @@ if is_vision_available(): from PIL import Image - from transformers import YolosFeatureExtractor + from transformers import YolosImageProcessor -class YolosFeatureExtractionTester(unittest.TestCase): +class YolosImageProcessingTester(unittest.TestCase): def __init__( self, parent, @@ -69,7 +69,7 @@ def __init__( self.rescale_factor = rescale_factor self.do_pad = do_pad - def prepare_feat_extract_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -83,7 +83,7 @@ def prepare_feat_extract_dict(self): def get_expected_values(self, image_inputs, batched=False): """ - This function computes the expected height and width when providing images to YolosFeatureExtractor, + This function computes the expected height and width when providing images to YolosImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: @@ -117,22 +117,22 @@ def get_expected_values(self, image_inputs, batched=False): @require_vision class YolosFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = YolosFeatureExtractor if is_vision_available() else None + image_processing_class = YolosImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = YolosFeatureExtractionTester(self) + self.image_processor_tester = YolosImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) def test_feat_extract_from_dict_with_kwargs(self): feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) @@ -149,115 +149,115 @@ def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processing + image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs) self.assertEqual( encoded_images.shape, - (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values - expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + expected_height, expected_width = self.image_processor_tester.get_expected_values(image_inputs, batched=True) self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_equivalence_padding(self): - # Initialize feature_extractors - feature_extractor_1 = self.feature_extraction_class(**self.feat_extract_dict) - feature_extractor_2 = self.feature_extraction_class(do_resize=False, do_normalize=False, do_rescale=False) + # Initialize image_processings + image_processing_1 = self.image_processing_class(**self.image_processor_dict) + image_processing_2 = self.image_processing_class(do_resize=False, do_normalize=False, do_rescale=False) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test whether the method "pad" and calling the feature extractor return the same tensors - encoded_images_with_method = feature_extractor_1.pad(image_inputs, return_tensors="pt") - encoded_images = feature_extractor_2(image_inputs, return_tensors="pt") + encoded_images_with_method = image_processing_1.pad(image_inputs, return_tensors="pt") + encoded_images = image_processing_2(image_inputs, return_tensors="pt") self.assertTrue( torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4) @@ -273,8 +273,8 @@ def test_call_pytorch_with_coco_detection_annotations(self): target = {"image_id": 39769, "annotations": target} # encode them - feature_extractor = YolosFeatureExtractor.from_pretrained("hustvl/yolos-small") - encoding = feature_extractor(images=image, annotations=target, return_tensors="pt") + image_processing = YolosImageProcessor.from_pretrained("hustvl/yolos-small") + encoding = image_processing(images=image, annotations=target, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) @@ -319,8 +319,8 @@ def test_call_pytorch_with_coco_panoptic_annotations(self): masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") # encode them - feature_extractor = YolosFeatureExtractor(format="coco_panoptic") - encoding = feature_extractor(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") + image_processing = YolosImageProcessor(format="coco_panoptic") + encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) From 04fc9521b55c1df94b43bcab60bf4b67130c6500 Mon Sep 17 00:00:00 2001 From: bofeng huang Date: Sat, 31 Dec 2022 07:13:39 +0100 Subject: [PATCH 06/15] Add generate kwargs to `AutomaticSpeechRecognitionPipeline` (#20952) * Add generate kwargs to AutomaticSpeechRecognitionPipeline * Add test for generation kwargs From 84a64109a1c93be7f801b76fb9aff668ca8aafc8 Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Wed, 4 Jan 2023 14:29:48 +0000 Subject: [PATCH 07/15] Update image processor parameters if creating with kwargs (#20866) * Update parameters if creating with kwargs * Shallow copy to prevent mutating input * Pass all args in constructor dict - warnings in init * Fix typo --- .../test_image_processing_imagegpt.py | 15 +++++++++---- .../test_image_processing_mobilenet_v2.py | 21 +++++++++++++------ 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/tests/models/imagegpt/test_image_processing_imagegpt.py b/tests/models/imagegpt/test_image_processing_imagegpt.py index b3c916241641..5ecf8f90a9a4 100644 --- a/tests/models/imagegpt/test_image_processing_imagegpt.py +++ b/tests/models/imagegpt/test_image_processing_imagegpt.py @@ -96,10 +96,17 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_normalize")) - def test_image_processor_to_json_string(self): - image_processor = self.image_processing_class(**self.image_processor_dict) - obj = json.loads(image_processor.to_json_string()) - for key, value in self.image_processor_dict.items(): + def test_feat_extract_from_dict_with_kwargs(self): + feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) + self.assertEqual(feature_extractor.size, {"height": 18, "width": 18}) + + feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42) + self.assertEqual(feature_extractor.size, {"height": 42, "width": 42}) + + def test_feat_extract_to_json_string(self): + feat_extract = self.feature_extraction_class(**self.feat_extract_dict) + obj = json.loads(feat_extract.to_json_string()) + for key, value in self.feat_extract_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(value, obj[key])) else: diff --git a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py index 425a715cdc8b..70874da299fb 100644 --- a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py +++ b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py @@ -83,12 +83,21 @@ def setUp(self): def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() - def test_image_processor_properties(self): - image_processing = self.image_processing_class(**self.image_processor_dict) - self.assertTrue(hasattr(image_processing, "do_resize")) - self.assertTrue(hasattr(image_processing, "size")) - self.assertTrue(hasattr(image_processing, "do_center_crop")) - self.assertTrue(hasattr(image_processing, "center_crop")) + def test_feat_extract_properties(self): + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + self.assertTrue(hasattr(feature_extractor, "do_resize")) + self.assertTrue(hasattr(feature_extractor, "size")) + self.assertTrue(hasattr(feature_extractor, "do_center_crop")) + self.assertTrue(hasattr(feature_extractor, "crop_size")) + + def test_feat_extract_from_dict_with_kwargs(self): + feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) + self.assertEqual(feature_extractor.size, {"shortest_edge": 20}) + self.assertEqual(feature_extractor.crop_size, {"height": 18, "width": 18}) + + feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42, crop_size=84) + self.assertEqual(feature_extractor.size, {"shortest_edge": 42}) + self.assertEqual(feature_extractor.crop_size, {"height": 84, "width": 84}) def test_batch_feature(self): pass From 4619ab04320465feca0c829b3bf623b5140f4024 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Thu, 15 Dec 2022 16:52:58 +0000 Subject: [PATCH 08/15] Rename tester class --- tests/models/beit/test_image_processing_beit.py | 2 +- .../models/chinese_clip/test_image_processing_chinese_clip.py | 4 ++-- tests/models/clip/test_image_processing_clip.py | 4 ++-- .../test_image_processing_conditional_detr.py | 2 +- tests/models/convnext/test_image_processing_convnext.py | 2 +- .../deformable_detr/test_image_processing_deformable_detr.py | 2 +- tests/models/deit/test_image_processing_deit.py | 2 +- tests/models/detr/test_image_processing_detr.py | 2 +- tests/models/donut/test_image_processing_donut.py | 2 +- tests/models/dpt/test_image_processing_dpt.py | 2 +- tests/models/flava/test_image_processing_flava.py | 2 +- tests/models/glpn/test_image_processing_glpn.py | 2 +- tests/models/imagegpt/test_image_processing_imagegpt.py | 2 +- tests/models/layoutlmv2/test_image_processing_layoutlmv2.py | 2 +- tests/models/layoutlmv3/test_image_processing_layoutlmv3.py | 2 +- tests/models/levit/test_image_processing_levit.py | 2 +- tests/models/maskformer/test_image_processing_maskformer.py | 2 +- .../models/mobilenet_v1/test_image_processing_mobilenet_v1.py | 2 +- .../models/mobilenet_v2/test_image_processing_mobilenet_v2.py | 2 +- tests/models/mobilevit/test_image_processing_mobilevit.py | 2 +- tests/models/owlvit/test_image_processing_owlvit.py | 2 +- tests/models/poolformer/test_image_processing_poolformer.py | 2 +- tests/models/segformer/test_image_processing_segformer.py | 2 +- tests/models/videomae/test_image_processing_videomae.py | 2 +- tests/models/vilt/test_image_processing_vilt.py | 2 +- tests/models/vit/test_image_processing_vit.py | 2 +- tests/models/yolos/test_image_processing_yolos.py | 2 +- 27 files changed, 29 insertions(+), 29 deletions(-) diff --git a/tests/models/beit/test_image_processing_beit.py b/tests/models/beit/test_image_processing_beit.py index d5844afa498b..0d7df5bd32b7 100644 --- a/tests/models/beit/test_image_processing_beit.py +++ b/tests/models/beit/test_image_processing_beit.py @@ -105,7 +105,7 @@ def prepare_semantic_batch_inputs(): @require_torch @require_vision -class BeitFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class BeitImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = BeitImageProcessor if is_vision_available() else None diff --git a/tests/models/chinese_clip/test_image_processing_chinese_clip.py b/tests/models/chinese_clip/test_image_processing_chinese_clip.py index 217d037597ee..9c22d9e8a010 100644 --- a/tests/models/chinese_clip/test_image_processing_chinese_clip.py +++ b/tests/models/chinese_clip/test_image_processing_chinese_clip.py @@ -113,7 +113,7 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_torch @require_vision -class ChineseCLIPFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ChineseCLIPImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = ChineseCLIPImageProcessor if is_vision_available() else None @@ -246,7 +246,7 @@ def test_call_pytorch(self): @require_torch @require_vision -class ChineseCLIPFeatureExtractionTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ChineseCLIPImageProcessingTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = ChineseCLIPImageProcessor if is_vision_available() else None diff --git a/tests/models/clip/test_image_processing_clip.py b/tests/models/clip/test_image_processing_clip.py index ef1212a2d952..3fa1b1eba5b7 100644 --- a/tests/models/clip/test_image_processing_clip.py +++ b/tests/models/clip/test_image_processing_clip.py @@ -113,7 +113,7 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_torch @require_vision -class CLIPFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class CLIPImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = CLIPImageProcessor if is_vision_available() else None @@ -246,7 +246,7 @@ def test_call_pytorch(self): @require_torch @require_vision -class CLIPFeatureExtractionTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): +class CLIPImageProcessingTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = CLIPImageProcessor if is_vision_available() else None diff --git a/tests/models/conditional_detr/test_image_processing_conditional_detr.py b/tests/models/conditional_detr/test_image_processing_conditional_detr.py index b194f7ba7c4b..9867f8829c3d 100644 --- a/tests/models/conditional_detr/test_image_processing_conditional_detr.py +++ b/tests/models/conditional_detr/test_image_processing_conditional_detr.py @@ -115,7 +115,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class ConditionalDetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ConditionalDetrImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = ConditionalDetrImageProcessor if is_vision_available() else None diff --git a/tests/models/convnext/test_image_processing_convnext.py b/tests/models/convnext/test_image_processing_convnext.py index db0f9276655d..f63f61604d0a 100644 --- a/tests/models/convnext/test_image_processing_convnext.py +++ b/tests/models/convnext/test_image_processing_convnext.py @@ -77,7 +77,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class ConvNextFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ConvNextImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = ConvNextImageProcessor if is_vision_available() else None diff --git a/tests/models/deformable_detr/test_image_processing_deformable_detr.py b/tests/models/deformable_detr/test_image_processing_deformable_detr.py index 70463ded6e12..60a1965fc343 100644 --- a/tests/models/deformable_detr/test_image_processing_deformable_detr.py +++ b/tests/models/deformable_detr/test_image_processing_deformable_detr.py @@ -115,7 +115,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class DeformableDetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class DeformableDetrImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = DeformableDetrImageProcessor if is_vision_available() else None diff --git a/tests/models/deit/test_image_processing_deit.py b/tests/models/deit/test_image_processing_deit.py index dcf6ab6d5548..441960d9b288 100644 --- a/tests/models/deit/test_image_processing_deit.py +++ b/tests/models/deit/test_image_processing_deit.py @@ -82,7 +82,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class DeiTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class DeiTImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = DeiTImageProcessor if is_vision_available() else None test_cast_dtype = True diff --git a/tests/models/detr/test_image_processing_detr.py b/tests/models/detr/test_image_processing_detr.py index 617982c7ee58..678addf48e22 100644 --- a/tests/models/detr/test_image_processing_detr.py +++ b/tests/models/detr/test_image_processing_detr.py @@ -115,7 +115,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class DetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class DetrImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = DetrImageProcessor if is_vision_available() else None diff --git a/tests/models/donut/test_image_processing_donut.py b/tests/models/donut/test_image_processing_donut.py index a78686a2ee56..9b2fd36a2d9c 100644 --- a/tests/models/donut/test_image_processing_donut.py +++ b/tests/models/donut/test_image_processing_donut.py @@ -82,7 +82,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class DonutFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class DonutImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = DonutImageProcessor if is_vision_available() else None diff --git a/tests/models/dpt/test_image_processing_dpt.py b/tests/models/dpt/test_image_processing_dpt.py index ead762241d58..85a5b1560ecc 100644 --- a/tests/models/dpt/test_image_processing_dpt.py +++ b/tests/models/dpt/test_image_processing_dpt.py @@ -74,7 +74,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class DPTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class DPTImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = DPTImageProcessor if is_vision_available() else None diff --git a/tests/models/flava/test_image_processing_flava.py b/tests/models/flava/test_image_processing_flava.py index 0e0a5617a740..21373f068298 100644 --- a/tests/models/flava/test_image_processing_flava.py +++ b/tests/models/flava/test_image_processing_flava.py @@ -160,7 +160,7 @@ def get_expected_codebook_image_size(self): @require_torch @require_vision -class FlavaFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class FlavaImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = FlavaImageProcessor if is_vision_available() else None maxDiff = None diff --git a/tests/models/glpn/test_image_processing_glpn.py b/tests/models/glpn/test_image_processing_glpn.py index 07c63ef4fbe0..ac726d348096 100644 --- a/tests/models/glpn/test_image_processing_glpn.py +++ b/tests/models/glpn/test_image_processing_glpn.py @@ -67,7 +67,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class GLPNFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class GLPNImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = GLPNImageProcessor if is_vision_available() else None diff --git a/tests/models/imagegpt/test_image_processing_imagegpt.py b/tests/models/imagegpt/test_image_processing_imagegpt.py index 5ecf8f90a9a4..784f436d0c82 100644 --- a/tests/models/imagegpt/test_image_processing_imagegpt.py +++ b/tests/models/imagegpt/test_image_processing_imagegpt.py @@ -78,7 +78,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class ImageGPTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ImageGPTImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = ImageGPTImageProcessor if is_vision_available() else None diff --git a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py index d786203361ea..eec085678952 100644 --- a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py +++ b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py @@ -64,7 +64,7 @@ def prepare_image_processor_dict(self): @require_torch @require_pytesseract -class LayoutLMv2FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class LayoutLMv2ImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = LayoutLMv2ImageProcessor if is_pytesseract_available() else None diff --git a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py index 7268be38de07..c818c03d231e 100644 --- a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py +++ b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py @@ -64,7 +64,7 @@ def prepare_image_processor_dict(self): @require_torch @require_pytesseract -class LayoutLMv3FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class LayoutLMv3ImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = LayoutLMv3ImageProcessor if is_pytesseract_available() else None diff --git a/tests/models/levit/test_image_processing_levit.py b/tests/models/levit/test_image_processing_levit.py index 5a2f02d7a2c7..eb05d93a2793 100644 --- a/tests/models/levit/test_image_processing_levit.py +++ b/tests/models/levit/test_image_processing_levit.py @@ -81,7 +81,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class LevitFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class LevitImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = LevitImageProcessor if is_vision_available() else None diff --git a/tests/models/maskformer/test_image_processing_maskformer.py b/tests/models/maskformer/test_image_processing_maskformer.py index 1c455692f16d..cd55a3aba92c 100644 --- a/tests/models/maskformer/test_image_processing_maskformer.py +++ b/tests/models/maskformer/test_image_processing_maskformer.py @@ -131,7 +131,7 @@ def get_fake_maskformer_outputs(self): @require_torch @require_vision -class MaskFormerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class MaskFormerImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = MaskFormerImageProcessor if (is_vision_available() and is_torch_available()) else None diff --git a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py index c2e19d2d5472..e244a5bf7695 100644 --- a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py +++ b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py @@ -72,7 +72,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class MobileNetV1FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class MobileNetV1ImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = MobileNetV1ImageProcessor if is_vision_available() else None diff --git a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py index 70874da299fb..df0a63d4fb5f 100644 --- a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py +++ b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py @@ -72,7 +72,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class MobileNetV2FeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class MobileNetV2ImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = MobileNetV2ImageProcessor if is_vision_available() else None diff --git a/tests/models/mobilevit/test_image_processing_mobilevit.py b/tests/models/mobilevit/test_image_processing_mobilevit.py index 1b3dd2ef2e6b..c94c9a56e88e 100644 --- a/tests/models/mobilevit/test_image_processing_mobilevit.py +++ b/tests/models/mobilevit/test_image_processing_mobilevit.py @@ -75,7 +75,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class MobileViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class MobileViTImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = MobileViTImageProcessor if is_vision_available() else None diff --git a/tests/models/owlvit/test_image_processing_owlvit.py b/tests/models/owlvit/test_image_processing_owlvit.py index 395ad32d942c..9cda128e390c 100644 --- a/tests/models/owlvit/test_image_processing_owlvit.py +++ b/tests/models/owlvit/test_image_processing_owlvit.py @@ -82,7 +82,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class OwlViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class OwlViTImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = OwlViTImageProcessor if is_vision_available() else None diff --git a/tests/models/poolformer/test_image_processing_poolformer.py b/tests/models/poolformer/test_image_processing_poolformer.py index 1ac97c7309ac..3fc836cf018b 100644 --- a/tests/models/poolformer/test_image_processing_poolformer.py +++ b/tests/models/poolformer/test_image_processing_poolformer.py @@ -78,7 +78,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class PoolFormerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class PoolFormerImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = PoolFormerImageProcessor if is_vision_available() else None diff --git a/tests/models/segformer/test_image_processing_segformer.py b/tests/models/segformer/test_image_processing_segformer.py index 437a585a7849..b99237cfa4e2 100644 --- a/tests/models/segformer/test_image_processing_segformer.py +++ b/tests/models/segformer/test_image_processing_segformer.py @@ -96,7 +96,7 @@ def prepare_semantic_batch_inputs(): @require_torch @require_vision -class SegformerFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class SegformerImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = SegformerImageProcessor if is_vision_available() else None diff --git a/tests/models/videomae/test_image_processing_videomae.py b/tests/models/videomae/test_image_processing_videomae.py index 0a6a004208c6..fe1d35a1a5a8 100644 --- a/tests/models/videomae/test_image_processing_videomae.py +++ b/tests/models/videomae/test_image_processing_videomae.py @@ -81,7 +81,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class VideoMAEFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class VideoMAEImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = VideoMAEImageProcessor if is_vision_available() else None diff --git a/tests/models/vilt/test_image_processing_vilt.py b/tests/models/vilt/test_image_processing_vilt.py index 86e2e980e684..58119716332e 100644 --- a/tests/models/vilt/test_image_processing_vilt.py +++ b/tests/models/vilt/test_image_processing_vilt.py @@ -117,7 +117,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class ViltFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ViltImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = ViltImageProcessor if is_vision_available() else None diff --git a/tests/models/vit/test_image_processing_vit.py b/tests/models/vit/test_image_processing_vit.py index 1fc46c7cf922..8c9326d29537 100644 --- a/tests/models/vit/test_image_processing_vit.py +++ b/tests/models/vit/test_image_processing_vit.py @@ -74,7 +74,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class ViTFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ViTImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = ViTImageProcessor if is_vision_available() else None diff --git a/tests/models/yolos/test_image_processing_yolos.py b/tests/models/yolos/test_image_processing_yolos.py index 27d4657904cc..e0019074766b 100644 --- a/tests/models/yolos/test_image_processing_yolos.py +++ b/tests/models/yolos/test_image_processing_yolos.py @@ -115,7 +115,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class YolosFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class YolosImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): image_processing_class = YolosImageProcessor if is_vision_available() else None From a7be30e5cd755b31b372022a2a9502904018d44e Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Wed, 4 Jan 2023 18:35:13 +0100 Subject: [PATCH 09/15] Rebase and tidy up --- .../models/beit/test_image_processing_beit.py | 22 +-- .../models/blip/test_image_processing_blip.py | 146 +++++++++--------- .../test_image_processing_chinese_clip.py | 16 +- .../models/clip/test_image_processing_clip.py | 16 +- .../test_image_processing_conditional_detr.py | 18 +-- .../test_image_processing_convnext.py | 10 +- .../test_image_processing_deformable_detr.py | 18 +-- .../models/deit/test_image_processing_deit.py | 16 +- .../models/detr/test_image_processing_detr.py | 18 +-- .../donut/test_image_processing_donut.py | 14 +- tests/models/dpt/test_image_processing_dpt.py | 10 +- .../flava/test_image_processing_flava.py | 26 ++-- .../test_image_processing_imagegpt.py | 18 +-- .../test_image_processing_layoutlmv2.py | 10 +- .../test_image_processing_layoutlmv3.py | 10 +- .../levit/test_image_processing_levit.py | 16 +- .../test_image_processing_maskformer.py | 32 ++-- .../test_image_processing_mobilenet_v1.py | 16 +- .../test_image_processing_mobilenet_v2.py | 30 ++-- .../test_image_processing_mobilevit.py | 16 +- .../owlvit/test_image_processing_owlvit.py | 16 +- .../test_image_processing_poolformer.py | 16 +- .../test_image_processing_segformer.py | 16 +- .../swin2sr/test_image_processing_swin2sr.py | 54 +++---- .../test_image_processing_videomae.py | 16 +- .../models/vilt/test_image_processing_vilt.py | 12 +- tests/models/vit/test_image_processing_vit.py | 10 +- .../yolos/test_image_processing_yolos.py | 18 +-- 28 files changed, 318 insertions(+), 318 deletions(-) diff --git a/tests/models/beit/test_image_processing_beit.py b/tests/models/beit/test_image_processing_beit.py index 0d7df5bd32b7..06ba688bb359 100644 --- a/tests/models/beit/test_image_processing_beit.py +++ b/tests/models/beit/test_image_processing_beit.py @@ -126,18 +126,18 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"height": 20, "width": 20}) - self.assertEqual(feature_extractor.crop_size, {"height": 18, "width": 18}) - self.assertEqual(feature_extractor.do_reduce_labels, False) - - feature_extractor = self.feature_extraction_class.from_dict( - self.feat_extract_dict, size=42, crop_size=84, reduce_labels=True + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"height": 20, "width": 20}) + self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) + self.assertEqual(image_processor.do_reduce_labels, False) + + image_processor = self.image_processing_class.from_dict( + self.image_proc_dict, size=42, crop_size=84, reduce_labels=True ) - self.assertEqual(feature_extractor.size, {"height": 42, "width": 42}) - self.assertEqual(feature_extractor.crop_size, {"height": 84, "width": 84}) - self.assertEqual(feature_extractor.do_reduce_labels, True) + self.assertEqual(image_processor.size, {"height": 42, "width": 42}) + self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) + self.assertEqual(image_processor.do_reduce_labels, True) def test_batch_feature(self): pass diff --git a/tests/models/blip/test_image_processing_blip.py b/tests/models/blip/test_image_processing_blip.py index ea31038b14ab..ce1298768c34 100644 --- a/tests/models/blip/test_image_processing_blip.py +++ b/tests/models/blip/test_image_processing_blip.py @@ -65,7 +65,7 @@ def __init__( self.do_pad = do_pad self.do_convert_rgb = do_convert_rgb - def prepare_feat_extract_dict(self): + def prepare_image_proc_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -111,120 +111,120 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_vision class BlipImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = BlipImageProcessor if is_vision_available() else None + image_processing_class = BlipImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = BlipImageProcessingTester(self) + self.image_processor_tester = BlipImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_convert_rgb")) + def image_proc_dict(self): + return self.image_processor_tester.prepare_image_proc_dict() + + def test_image_processor_properties(self): + image_processor = self.image_processing_class(**self.image_proc_dict) + self.assertTrue(hasattr(image_processor, "do_resize")) + self.assertTrue(hasattr(image_processor, "size")) + self.assertTrue(hasattr(image_processor, "do_normalize")) + self.assertTrue(hasattr(image_processor, "image_mean")) + self.assertTrue(hasattr(image_processor, "image_std")) + self.assertTrue(hasattr(image_processor, "do_convert_rgb")) def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processor + image_processor = self.image_processing_class(**self.image_proc_dict) # create random PIL images - image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False) + image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processor + image_processor = self.image_processing_class(**self.image_proc_dict) # create random numpy tensors - image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False, numpify=True) + image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processor + image_processor = self.image_processing_class(**self.image_proc_dict) # create random PyTorch tensors - image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False, torchify=True) + image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.batch_size, + self.image_processor_tester.num_channels, + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) @@ -233,56 +233,56 @@ def test_call_pytorch(self): @require_vision class BlipImageProcessingTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = BlipImageProcessor if is_vision_available() else None + image_processing_class = BlipImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = BlipImageProcessingTester(self, num_channels=4) + self.image_processor_tester = BlipImageProcessingTester(self, num_channels=4) self.expected_encoded_image_num_channels = 3 @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() - - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_convert_rgb")) + def image_proc_dict(self): + return self.image_processor_tester.prepare_image_proc_dict() + + def test_image_processor_properties(self): + image_processor = self.image_processing_class(**self.image_proc_dict) + self.assertTrue(hasattr(image_processor, "do_resize")) + self.assertTrue(hasattr(image_processor, "size")) + self.assertTrue(hasattr(image_processor, "do_normalize")) + self.assertTrue(hasattr(image_processor, "image_mean")) + self.assertTrue(hasattr(image_processor, "image_std")) + self.assertTrue(hasattr(image_processor, "do_convert_rgb")) def test_batch_feature(self): pass def test_call_pil_four_channels(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processor + image_processor = self.image_processing_class(**self.image_proc_dict) # create random PIL images - image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False) + image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.expected_encoded_image_num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, + self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_processor_tester.size["height"], + self.image_processor_tester.size["width"], ), ) diff --git a/tests/models/chinese_clip/test_image_processing_chinese_clip.py b/tests/models/chinese_clip/test_image_processing_chinese_clip.py index 9c22d9e8a010..d1a6bbc9eb32 100644 --- a/tests/models/chinese_clip/test_image_processing_chinese_clip.py +++ b/tests/models/chinese_clip/test_image_processing_chinese_clip.py @@ -135,14 +135,14 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"height": 224, "width": 224}) - self.assertEqual(feature_extractor.crop_size, {"height": 18, "width": 18}) - - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42, crop_size=84) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42}) - self.assertEqual(feature_extractor.crop_size, {"height": 84, "width": 84}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"height": 224, "width": 224}) + self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) + + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + self.assertEqual(image_processor.size, {"shortest_edge": 42}) + self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_batch_feature(self): pass diff --git a/tests/models/clip/test_image_processing_clip.py b/tests/models/clip/test_image_processing_clip.py index 3fa1b1eba5b7..d2d210d2faa6 100644 --- a/tests/models/clip/test_image_processing_clip.py +++ b/tests/models/clip/test_image_processing_clip.py @@ -135,14 +135,14 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"shortest_edge": 20}) - self.assertEqual(feature_extractor.crop_size, {"height": 18, "width": 18}) - - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42, crop_size=84) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42}) - self.assertEqual(feature_extractor.crop_size, {"height": 84, "width": 84}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"shortest_edge": 20}) + self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) + + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + self.assertEqual(image_processor.size, {"shortest_edge": 42}) + self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_batch_feature(self): pass diff --git a/tests/models/conditional_detr/test_image_processing_conditional_detr.py b/tests/models/conditional_detr/test_image_processing_conditional_detr.py index 9867f8829c3d..9efd73211bfd 100644 --- a/tests/models/conditional_detr/test_image_processing_conditional_detr.py +++ b/tests/models/conditional_detr/test_image_processing_conditional_detr.py @@ -134,16 +134,16 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"shortest_edge": 18, "longest_edge": 1333}) - self.assertEqual(feature_extractor.do_pad, True) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) + self.assertEqual(image_processor.do_pad, True) - feature_extractor = self.feature_extraction_class.from_dict( - self.feat_extract_dict, size=42, max_size=84, pad_and_return_pixel_mask=False + image_processor = self.image_processing_class.from_dict( + self.image_proc_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42, "longest_edge": 84}) - self.assertEqual(feature_extractor.do_pad, False) + self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) + self.assertEqual(image_processor.do_pad, False) def test_batch_feature(self): pass @@ -255,7 +255,7 @@ def test_equivalence_pad_and_create_pixel_mask(self): for image in image_inputs: self.assertIsInstance(image, torch.Tensor) - # Test whether the method "pad_and_return_pixel_mask" and calling the feature extractor return the same tensors + # Test whether the method "pad_and_return_pixel_mask" and calling the image processor return the same tensors encoded_images_with_method = image_processing_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt") encoded_images = image_processing_2(image_inputs, return_tensors="pt") diff --git a/tests/models/convnext/test_image_processing_convnext.py b/tests/models/convnext/test_image_processing_convnext.py index f63f61604d0a..0aecf2d20d25 100644 --- a/tests/models/convnext/test_image_processing_convnext.py +++ b/tests/models/convnext/test_image_processing_convnext.py @@ -97,12 +97,12 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"shortest_edge": 20}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"shortest_edge": 20}) - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42}) + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + self.assertEqual(image_processor.size, {"shortest_edge": 42}) def test_batch_feature(self): pass diff --git a/tests/models/deformable_detr/test_image_processing_deformable_detr.py b/tests/models/deformable_detr/test_image_processing_deformable_detr.py index 60a1965fc343..0d2068b37037 100644 --- a/tests/models/deformable_detr/test_image_processing_deformable_detr.py +++ b/tests/models/deformable_detr/test_image_processing_deformable_detr.py @@ -136,16 +136,16 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "size")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"shortest_edge": 18, "longest_edge": 1333}) - self.assertEqual(feature_extractor.do_pad, True) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) + self.assertEqual(image_processor.do_pad, True) - feature_extractor = self.feature_extraction_class.from_dict( - self.feat_extract_dict, size=42, max_size=84, pad_and_return_pixel_mask=False + image_processor = self.image_processing_class.from_dict( + self.image_proc_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42, "longest_edge": 84}) - self.assertEqual(feature_extractor.do_pad, False) + self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) + self.assertEqual(image_processor.do_pad, False) def test_batch_feature(self): pass @@ -258,7 +258,7 @@ def test_equivalence_pad_and_create_pixel_mask(self): for image in image_inputs: self.assertIsInstance(image, torch.Tensor) - # Test whether the method "pad_and_return_pixel_mask" and calling the feature extractor return the same tensors + # Test whether the method "pad_and_return_pixel_mask" and calling the image processor return the same tensors encoded_images_with_method = image_processing_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt") encoded_images = image_processing_2(image_inputs, return_tensors="pt") diff --git a/tests/models/deit/test_image_processing_deit.py b/tests/models/deit/test_image_processing_deit.py index 441960d9b288..933c781f3e63 100644 --- a/tests/models/deit/test_image_processing_deit.py +++ b/tests/models/deit/test_image_processing_deit.py @@ -104,14 +104,14 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"height": 20, "width": 20}) - self.assertEqual(feature_extractor.crop_size, {"height": 18, "width": 18}) - - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42, crop_size=84) - self.assertEqual(feature_extractor.size, {"height": 42, "width": 42}) - self.assertEqual(feature_extractor.crop_size, {"height": 84, "width": 84}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"height": 20, "width": 20}) + self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) + + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + self.assertEqual(image_processor.size, {"height": 42, "width": 42}) + self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_batch_feature(self): pass diff --git a/tests/models/detr/test_image_processing_detr.py b/tests/models/detr/test_image_processing_detr.py index 678addf48e22..019d5aa99f44 100644 --- a/tests/models/detr/test_image_processing_detr.py +++ b/tests/models/detr/test_image_processing_detr.py @@ -137,16 +137,16 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_pad")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"shortest_edge": 18, "longest_edge": 1333}) - self.assertEqual(feature_extractor.do_pad, True) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) + self.assertEqual(image_processor.do_pad, True) - feature_extractor = self.feature_extraction_class.from_dict( - self.feat_extract_dict, size=42, max_size=84, pad_and_return_pixel_mask=False + image_processor = self.image_processing_class.from_dict( + self.image_proc_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42, "longest_edge": 84}) - self.assertEqual(feature_extractor.do_pad, False) + self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) + self.assertEqual(image_processor.do_pad, False) def test_batch_feature(self): pass @@ -258,7 +258,7 @@ def test_equivalence_pad_and_create_pixel_mask(self): for image in image_inputs: self.assertIsInstance(image, torch.Tensor) - # Test whether the method "pad_and_return_pixel_mask" and calling the feature extractor return the same tensors + # Test whether the method "pad_and_return_pixel_mask" and calling the image processor return the same tensors encoded_images_with_method = image_processing_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt") encoded_images = image_processing_2(image_inputs, return_tensors="pt") diff --git a/tests/models/donut/test_image_processing_donut.py b/tests/models/donut/test_image_processing_donut.py index 9b2fd36a2d9c..ab96204a3660 100644 --- a/tests/models/donut/test_image_processing_donut.py +++ b/tests/models/donut/test_image_processing_donut.py @@ -104,16 +104,16 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"height": 18, "width": 20}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"height": 18, "width": 20}) - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42) - self.assertEqual(feature_extractor.size, {"height": 42, "width": 42}) + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + self.assertEqual(image_processor.size, {"height": 42, "width": 42}) # Previous config had dimensions in (width, height) order - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=(42, 84)) - self.assertEqual(feature_extractor.size, {"height": 84, "width": 42}) + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=(42, 84)) + self.assertEqual(image_processor.size, {"height": 84, "width": 42}) def test_batch_feature(self): pass diff --git a/tests/models/dpt/test_image_processing_dpt.py b/tests/models/dpt/test_image_processing_dpt.py index 85a5b1560ecc..ab0c84eb9e91 100644 --- a/tests/models/dpt/test_image_processing_dpt.py +++ b/tests/models/dpt/test_image_processing_dpt.py @@ -93,12 +93,12 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"height": 18, "width": 18}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"height": 18, "width": 18}) - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42) - self.assertEqual(feature_extractor.size, {"height": 42, "width": 42}) + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_call_pil(self): # Initialize image_processing diff --git a/tests/models/flava/test_image_processing_flava.py b/tests/models/flava/test_image_processing_flava.py index 21373f068298..4b6210301916 100644 --- a/tests/models/flava/test_image_processing_flava.py +++ b/tests/models/flava/test_image_processing_flava.py @@ -194,20 +194,20 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "codebook_image_mean")) self.assertTrue(hasattr(image_processing, "codebook_image_std")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"height": 224, "width": 224}) - self.assertEqual(feature_extractor.crop_size, {"height": 224, "width": 224}) - self.assertEqual(feature_extractor.codebook_size, {"height": 112, "width": 112}) - self.assertEqual(feature_extractor.codebook_crop_size, {"height": 112, "width": 112}) - - feature_extractor = self.feature_extraction_class.from_dict( - self.feat_extract_dict, size=42, crop_size=84, codebook_size=33, codebook_crop_size=66 + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"height": 224, "width": 224}) + self.assertEqual(image_processor.crop_size, {"height": 224, "width": 224}) + self.assertEqual(image_processor.codebook_size, {"height": 112, "width": 112}) + self.assertEqual(image_processor.codebook_crop_size, {"height": 112, "width": 112}) + + image_processor = self.image_processing_class.from_dict( + self.image_proc_dict, size=42, crop_size=84, codebook_size=33, codebook_crop_size=66 ) - self.assertEqual(feature_extractor.size, {"height": 42, "width": 42}) - self.assertEqual(feature_extractor.crop_size, {"height": 84, "width": 84}) - self.assertEqual(feature_extractor.codebook_size, {"height": 33, "width": 33}) - self.assertEqual(feature_extractor.codebook_crop_size, {"height": 66, "width": 66}) + self.assertEqual(image_processor.size, {"height": 42, "width": 42}) + self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) + self.assertEqual(image_processor.codebook_size, {"height": 33, "width": 33}) + self.assertEqual(image_processor.codebook_crop_size, {"height": 66, "width": 66}) def test_batch_feature(self): pass diff --git a/tests/models/imagegpt/test_image_processing_imagegpt.py b/tests/models/imagegpt/test_image_processing_imagegpt.py index 784f436d0c82..1ca38424d311 100644 --- a/tests/models/imagegpt/test_image_processing_imagegpt.py +++ b/tests/models/imagegpt/test_image_processing_imagegpt.py @@ -96,17 +96,17 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_normalize")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"height": 18, "width": 18}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"height": 18, "width": 18}) - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42) - self.assertEqual(feature_extractor.size, {"height": 42, "width": 42}) + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + self.assertEqual(image_processor.size, {"height": 42, "width": 42}) - def test_feat_extract_to_json_string(self): - feat_extract = self.feature_extraction_class(**self.feat_extract_dict) - obj = json.loads(feat_extract.to_json_string()) - for key, value in self.feat_extract_dict.items(): + def test_image_processor_to_json_string(self): + image_processor = self.image_processing_class(**self.image_proc_dict) + obj = json.loads(image_processor.to_json_string()) + for key, value in self.image_proc_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(value, obj[key])) else: diff --git a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py index eec085678952..f700b0a4d23b 100644 --- a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py +++ b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py @@ -81,12 +81,12 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "apply_ocr")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"height": 18, "width": 18}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"height": 18, "width": 18}) - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42) - self.assertEqual(feature_extractor.size, {"height": 42, "width": 42}) + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_batch_feature(self): pass diff --git a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py index c818c03d231e..67e1f39978b6 100644 --- a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py +++ b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py @@ -81,12 +81,12 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "apply_ocr")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"height": 18, "width": 18}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"height": 18, "width": 18}) - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42) - self.assertEqual(feature_extractor.size, {"height": 42, "width": 42}) + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_batch_feature(self): pass diff --git a/tests/models/levit/test_image_processing_levit.py b/tests/models/levit/test_image_processing_levit.py index eb05d93a2793..c64a917142e3 100644 --- a/tests/models/levit/test_image_processing_levit.py +++ b/tests/models/levit/test_image_processing_levit.py @@ -101,14 +101,14 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "size")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"shortest_edge": 18}) - self.assertEqual(feature_extractor.crop_size, {"height": 18, "width": 18}) - - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42, crop_size=84) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42}) - self.assertEqual(feature_extractor.crop_size, {"height": 84, "width": 84}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"shortest_edge": 18}) + self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) + + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + self.assertEqual(image_processor.size, {"shortest_edge": 42}) + self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_batch_feature(self): pass diff --git a/tests/models/maskformer/test_image_processing_maskformer.py b/tests/models/maskformer/test_image_processing_maskformer.py index cd55a3aba92c..e480c1f128e2 100644 --- a/tests/models/maskformer/test_image_processing_maskformer.py +++ b/tests/models/maskformer/test_image_processing_maskformer.py @@ -153,16 +153,16 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "ignore_index")) self.assertTrue(hasattr(image_processing, "num_labels")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"shortest_edge": 32, "longest_edge": 1333}) - self.assertEqual(feature_extractor.size_divisor, 0) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"shortest_edge": 32, "longest_edge": 1333}) + self.assertEqual(image_processor.size_divisor, 0) - feature_extractor = self.feature_extraction_class.from_dict( - self.feat_extract_dict, size=42, max_size=84, size_divisibility=8 + image_processor = self.image_processing_class.from_dict( + self.image_proc_dict, size=42, max_size=84, size_divisibility=8 ) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42, "longest_edge": 84}) - self.assertEqual(feature_extractor.size_divisor, 8) + self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) + self.assertEqual(image_processor.size_divisor, 8) def test_batch_feature(self): pass @@ -276,7 +276,7 @@ def test_equivalence_pad_and_create_pixel_mask(self): for image in image_inputs: self.assertIsInstance(image, torch.Tensor) - # Test whether the method "pad_and_return_pixel_mask" and calling the feature extractor return the same tensors + # Test whether the method "pad_and_return_pixel_mask" and calling the image processor return the same tensors encoded_images_with_method = image_processing_1.encode_inputs(image_inputs, return_tensors="pt") encoded_images = image_processing_2(image_inputs, return_tensors="pt") @@ -389,7 +389,7 @@ def get_instance_segmentation_and_mapping(annotation): instance_seg1, inst2class1 = get_instance_segmentation_and_mapping(annotation1) instance_seg2, inst2class2 = get_instance_segmentation_and_mapping(annotation2) - # create a feature extractor + # create a image processor image_processing = MaskFormerImageProcessor(reduce_labels=True, ignore_index=255, size=(512, 512)) # prepare the images and annotations @@ -432,7 +432,7 @@ def test_integration_semantic_segmentation(self): hf_hub_download(repo_id=repo_id, filename="semantic_segmentation_annotation_2.png", repo_type="dataset") ) - # create a feature extractor + # create a image processor image_processing = MaskFormerImageProcessor(reduce_labels=True, ignore_index=255, size=(512, 512)) # prepare the images and annotations @@ -489,7 +489,7 @@ def create_panoptic_map(annotation, segments_info): panoptic_map1, inst2class1 = create_panoptic_map(annotation1, segments_info1) panoptic_map2, inst2class2 = create_panoptic_map(annotation2, segments_info2) - # create a feature extractor + # create a image processor image_processing = MaskFormerImageProcessor(ignore_index=0, do_resize=False) # prepare the images and annotations @@ -592,15 +592,15 @@ def test_post_process_panoptic_segmentation(self): ) def test_post_process_label_fusing(self): - feature_extractor = self.feature_extraction_class(num_labels=self.feature_extract_tester.num_classes) - outputs = self.feature_extract_tester.get_fake_maskformer_outputs() + image_processor = self.image_processing_class(num_labels=self.image_processor_tester.num_classes) + outputs = self.image_processor_tester.get_fake_maskformer_outputs() - segmentation = feature_extractor.post_process_panoptic_segmentation( + segmentation = image_processor.post_process_panoptic_segmentation( outputs, threshold=0, mask_threshold=0, overlap_mask_area_threshold=0 ) unfused_segments = [el["segments_info"] for el in segmentation] - fused_segmentation = feature_extractor.post_process_panoptic_segmentation( + fused_segmentation = image_processor.post_process_panoptic_segmentation( outputs, threshold=0, mask_threshold=0, overlap_mask_area_threshold=0, label_ids_to_fuse={1} ) fused_segments = [el["segments_info"] for el in fused_segmentation] diff --git a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py index e244a5bf7695..c40920607a82 100644 --- a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py +++ b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py @@ -90,14 +90,14 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"shortest_edge": 20}) - self.assertEqual(feature_extractor.crop_size, {"height": 18, "width": 18}) - - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42, crop_size=84) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42}) - self.assertEqual(feature_extractor.crop_size, {"height": 84, "width": 84}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"shortest_edge": 20}) + self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) + + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + self.assertEqual(image_processor.size, {"shortest_edge": 42}) + self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_batch_feature(self): pass diff --git a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py index df0a63d4fb5f..57d6ae6e7635 100644 --- a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py +++ b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py @@ -83,21 +83,21 @@ def setUp(self): def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) - self.assertTrue(hasattr(feature_extractor, "do_center_crop")) - self.assertTrue(hasattr(feature_extractor, "crop_size")) - - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"shortest_edge": 20}) - self.assertEqual(feature_extractor.crop_size, {"height": 18, "width": 18}) - - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42, crop_size=84) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42}) - self.assertEqual(feature_extractor.crop_size, {"height": 84, "width": 84}) + def test_image_processor_properties(self): + image_processor = self.image_processing_class(**self.image_proc_dict) + self.assertTrue(hasattr(image_processor, "do_resize")) + self.assertTrue(hasattr(image_processor, "size")) + self.assertTrue(hasattr(image_processor, "do_center_crop")) + self.assertTrue(hasattr(image_processor, "crop_size")) + + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"shortest_edge": 20}) + self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) + + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + self.assertEqual(image_processor.size, {"shortest_edge": 42}) + self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_batch_feature(self): pass diff --git a/tests/models/mobilevit/test_image_processing_mobilevit.py b/tests/models/mobilevit/test_image_processing_mobilevit.py index c94c9a56e88e..ed58278b94db 100644 --- a/tests/models/mobilevit/test_image_processing_mobilevit.py +++ b/tests/models/mobilevit/test_image_processing_mobilevit.py @@ -94,14 +94,14 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_flip_channel_order")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"shortest_edge": 20}) - self.assertEqual(feature_extractor.crop_size, {"height": 18, "width": 18}) - - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42, crop_size=84) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42}) - self.assertEqual(feature_extractor.crop_size, {"height": 84, "width": 84}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"shortest_edge": 20}) + self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) + + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + self.assertEqual(image_processor.size, {"shortest_edge": 42}) + self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_batch_feature(self): pass diff --git a/tests/models/owlvit/test_image_processing_owlvit.py b/tests/models/owlvit/test_image_processing_owlvit.py index 9cda128e390c..540369b0ba84 100644 --- a/tests/models/owlvit/test_image_processing_owlvit.py +++ b/tests/models/owlvit/test_image_processing_owlvit.py @@ -104,14 +104,14 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"height": 18, "width": 18}) - self.assertEqual(feature_extractor.crop_size, {"height": 18, "width": 18}) - - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42, crop_size=84) - self.assertEqual(feature_extractor.size, {"height": 42, "width": 42}) - self.assertEqual(feature_extractor.crop_size, {"height": 84, "width": 84}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"height": 18, "width": 18}) + self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) + + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + self.assertEqual(image_processor.size, {"height": 42, "width": 42}) + self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_call_pil(self): # Initialize image_processing diff --git a/tests/models/poolformer/test_image_processing_poolformer.py b/tests/models/poolformer/test_image_processing_poolformer.py index 3fc836cf018b..43690a64c3f2 100644 --- a/tests/models/poolformer/test_image_processing_poolformer.py +++ b/tests/models/poolformer/test_image_processing_poolformer.py @@ -98,14 +98,14 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"shortest_edge": 30}) - self.assertEqual(feature_extractor.crop_size, {"height": 30, "width": 30}) - - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42, crop_size=84) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42}) - self.assertEqual(feature_extractor.crop_size, {"height": 84, "width": 84}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"shortest_edge": 30}) + self.assertEqual(image_processor.crop_size, {"height": 30, "width": 30}) + + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + self.assertEqual(image_processor.size, {"shortest_edge": 42}) + self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_batch_feature(self): pass diff --git a/tests/models/segformer/test_image_processing_segformer.py b/tests/models/segformer/test_image_processing_segformer.py index b99237cfa4e2..7c83f4b05435 100644 --- a/tests/models/segformer/test_image_processing_segformer.py +++ b/tests/models/segformer/test_image_processing_segformer.py @@ -116,16 +116,16 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_reduce_labels")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"height": 30, "width": 30}) - self.assertEqual(feature_extractor.do_reduce_labels, False) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"height": 30, "width": 30}) + self.assertEqual(image_processor.do_reduce_labels, False) - feature_extractor = self.feature_extraction_class.from_dict( - self.feat_extract_dict, size=42, reduce_labels=True + image_processor = self.image_processing_class.from_dict( + self.image_proc_dict, size=42, reduce_labels=True ) - self.assertEqual(feature_extractor.size, {"height": 42, "width": 42}) - self.assertEqual(feature_extractor.do_reduce_labels, True) + self.assertEqual(image_processor.size, {"height": 42, "width": 42}) + self.assertEqual(image_processor.do_reduce_labels, True) def test_batch_feature(self): pass diff --git a/tests/models/swin2sr/test_image_processing_swin2sr.py b/tests/models/swin2sr/test_image_processing_swin2sr.py index 393a44ecface..267fc7570423 100644 --- a/tests/models/swin2sr/test_image_processing_swin2sr.py +++ b/tests/models/swin2sr/test_image_processing_swin2sr.py @@ -59,7 +59,7 @@ def __init__( self.do_pad = do_pad self.pad_size = pad_size - def prepare_feat_extract_dict(self): + def prepare_image_proc_dict(self): return { "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, @@ -102,91 +102,91 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_vision class Swin2SRImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): - feature_extraction_class = Swin2SRImageProcessor if is_vision_available() else None + image_processing_class = Swin2SRImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = Swin2SRImageProcessingTester(self) + self.image_processor_tester = Swin2SRImageProcessingTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() + def image_proc_dict(self): + return self.image_processor_tester.prepare_image_proc_dict() - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "do_rescale")) - self.assertTrue(hasattr(feature_extractor, "rescale_factor")) - self.assertTrue(hasattr(feature_extractor, "do_pad")) - self.assertTrue(hasattr(feature_extractor, "pad_size")) + def test_image_processor_properties(self): + image_processor = self.image_processing_class(**self.image_proc_dict) + self.assertTrue(hasattr(image_processor, "do_rescale")) + self.assertTrue(hasattr(image_processor, "rescale_factor")) + self.assertTrue(hasattr(image_processor, "do_pad")) + self.assertTrue(hasattr(image_processor, "pad_size")) def test_batch_feature(self): pass def calculate_expected_size(self, image): old_height, old_width = get_image_size(image) - size = self.feature_extract_tester.pad_size + size = self.image_processor_tester.pad_size pad_height = (old_height // size + 1) * size - old_height pad_width = (old_width // size + 1) * size - old_width return old_height + pad_height, old_width + pad_width def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processor + image_processor = self.image_processing_class(**self.image_proc_dict) # create random PIL images - image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False) + image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs[0], return_tensors="pt").pixel_values expected_height, expected_width = self.calculate_expected_size(np.array(image_inputs[0])) self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processor + image_processor = self.image_processing_class(**self.image_proc_dict) # create random numpy tensors - image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False, numpify=True) + image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs[0], return_tensors="pt").pixel_values expected_height, expected_width = self.calculate_expected_size(image_inputs[0]) self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, + self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processor + image_processor = self.image_processing_class(**self.image_proc_dict) # create random PyTorch tensors - image_inputs = self.feature_extract_tester.prepare_inputs(equal_resolution=False, torchify=True) + image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs[0], return_tensors="pt").pixel_values expected_height, expected_width = self.calculate_expected_size(image_inputs[0]) self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, + self.image_processor_tester.num_channels, expected_height, expected_width, ), diff --git a/tests/models/videomae/test_image_processing_videomae.py b/tests/models/videomae/test_image_processing_videomae.py index fe1d35a1a5a8..f7b8df931a2c 100644 --- a/tests/models/videomae/test_image_processing_videomae.py +++ b/tests/models/videomae/test_image_processing_videomae.py @@ -101,14 +101,14 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "size")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"shortest_edge": 18}) - self.assertEqual(feature_extractor.crop_size, {"height": 18, "width": 18}) - - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42, crop_size=84) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42}) - self.assertEqual(feature_extractor.crop_size, {"height": 84, "width": 84}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"shortest_edge": 18}) + self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) + + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + self.assertEqual(image_processor.size, {"shortest_edge": 42}) + self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_batch_feature(self): pass diff --git a/tests/models/vilt/test_image_processing_vilt.py b/tests/models/vilt/test_image_processing_vilt.py index 58119716332e..ab99896f875f 100644 --- a/tests/models/vilt/test_image_processing_vilt.py +++ b/tests/models/vilt/test_image_processing_vilt.py @@ -137,12 +137,12 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "size_divisor")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"shortest_edge": 30}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"shortest_edge": 30}) - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42}) + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + self.assertEqual(image_processor.size, {"shortest_edge": 42}) def test_batch_feature(self): pass @@ -249,7 +249,7 @@ def test_equivalence_pad_and_create_pixel_mask(self): for image in image_inputs: self.assertIsInstance(image, torch.Tensor) - # Test whether the method "pad_and_return_pixel_mask" and calling the feature extractor return the same tensors + # Test whether the method "pad_and_return_pixel_mask" and calling the image processor return the same tensors encoded_images_with_method = image_processing_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt") encoded_images = image_processing_2(image_inputs, return_tensors="pt") diff --git a/tests/models/vit/test_image_processing_vit.py b/tests/models/vit/test_image_processing_vit.py index 8c9326d29537..fabb539dca93 100644 --- a/tests/models/vit/test_image_processing_vit.py +++ b/tests/models/vit/test_image_processing_vit.py @@ -93,12 +93,12 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"height": 18, "width": 18}) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"height": 18, "width": 18}) - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict, size=42) - self.assertEqual(feature_extractor.size, {"height": 42, "width": 42}) + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_batch_feature(self): pass diff --git a/tests/models/yolos/test_image_processing_yolos.py b/tests/models/yolos/test_image_processing_yolos.py index e0019074766b..8eb0dfb07ae4 100644 --- a/tests/models/yolos/test_image_processing_yolos.py +++ b/tests/models/yolos/test_image_processing_yolos.py @@ -134,16 +134,16 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) - def test_feat_extract_from_dict_with_kwargs(self): - feature_extractor = self.feature_extraction_class.from_dict(self.feat_extract_dict) - self.assertEqual(feature_extractor.size, {"shortest_edge": 18, "longest_edge": 1333}) - self.assertEqual(feature_extractor.do_pad, True) + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) + self.assertEqual(image_processor.do_pad, True) - feature_extractor = self.feature_extraction_class.from_dict( - self.feat_extract_dict, size=42, max_size=84, pad_and_return_pixel_mask=False + image_processor = self.image_processing_class.from_dict( + self.image_proc_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) - self.assertEqual(feature_extractor.size, {"shortest_edge": 42, "longest_edge": 84}) - self.assertEqual(feature_extractor.do_pad, False) + self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) + self.assertEqual(image_processor.do_pad, False) def test_batch_feature(self): pass @@ -255,7 +255,7 @@ def test_equivalence_padding(self): for image in image_inputs: self.assertIsInstance(image, torch.Tensor) - # Test whether the method "pad" and calling the feature extractor return the same tensors + # Test whether the method "pad" and calling the image processor return the same tensors encoded_images_with_method = image_processing_1.pad(image_inputs, return_tensors="pt") encoded_images = image_processing_2(image_inputs, return_tensors="pt") From 11b1aced0b63ee167dd3fa09ffb6153e28bc3926 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Thu, 19 Jan 2023 15:59:04 +0000 Subject: [PATCH 10/15] Fixup --- tests/models/segformer/test_image_processing_segformer.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/models/segformer/test_image_processing_segformer.py b/tests/models/segformer/test_image_processing_segformer.py index 7c83f4b05435..5b3b564a1d27 100644 --- a/tests/models/segformer/test_image_processing_segformer.py +++ b/tests/models/segformer/test_image_processing_segformer.py @@ -121,9 +121,7 @@ def test_image_processor_from_dict_with_kwargs(self): self.assertEqual(image_processor.size, {"height": 30, "width": 30}) self.assertEqual(image_processor.do_reduce_labels, False) - image_processor = self.image_processing_class.from_dict( - self.image_proc_dict, size=42, reduce_labels=True - ) + image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, reduce_labels=True) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) self.assertEqual(image_processor.do_reduce_labels, True) From 73aa8a25675e13916983b4f0df8ceaa3441276e6 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Fri, 20 Jan 2023 17:21:45 +0000 Subject: [PATCH 11/15] Use ImageProcessingSavingTestMixin --- tests/models/beit/test_image_processing_beit.py | 5 ++--- tests/models/blip/test_image_processing_blip.py | 6 +++--- .../chinese_clip/test_image_processing_chinese_clip.py | 6 +++--- tests/models/clip/test_image_processing_clip.py | 6 +++--- .../test_image_processing_conditional_detr.py | 5 ++--- tests/models/convnext/test_image_processing_convnext.py | 5 ++--- .../test_image_processing_deformable_detr.py | 5 ++--- tests/models/deit/test_image_processing_deit.py | 5 ++--- tests/models/detr/test_image_processing_detr.py | 5 ++--- tests/models/donut/test_image_processing_donut.py | 5 ++--- tests/models/dpt/test_image_processing_dpt.py | 5 ++--- .../test_image_processing_efficientformer.py | 5 ++--- tests/models/flava/test_image_processing_flava.py | 5 ++--- tests/models/glpn/test_image_processing_glpn.py | 5 ++--- tests/models/imagegpt/test_image_processing_imagegpt.py | 4 ++-- tests/models/layoutlmv2/test_image_processing_layoutlmv2.py | 5 ++--- tests/models/layoutlmv3/test_image_processing_layoutlmv3.py | 5 ++--- tests/models/levit/test_image_processing_levit.py | 5 ++--- tests/models/maskformer/test_image_processing_maskformer.py | 5 ++--- .../mobilenet_v1/test_image_processing_mobilenet_v1.py | 5 ++--- .../mobilenet_v2/test_image_processing_mobilenet_v2.py | 5 ++--- tests/models/mobilevit/test_image_processing_mobilevit.py | 5 ++--- tests/models/oneformer/test_image_processing_oneformer.py | 5 ++--- tests/models/owlvit/test_image_processing_owlvit.py | 5 ++--- tests/models/poolformer/test_image_processing_poolformer.py | 5 ++--- tests/models/segformer/test_image_processing_segformer.py | 5 ++--- tests/models/swin2sr/test_image_processing_swin2sr.py | 4 ++-- tests/models/videomae/test_image_processing_videomae.py | 5 ++--- tests/models/vilt/test_image_processing_vilt.py | 5 ++--- tests/models/vit/test_image_processing_vit.py | 5 ++--- tests/models/yolos/test_image_processing_yolos.py | 5 ++--- 31 files changed, 65 insertions(+), 91 deletions(-) diff --git a/tests/models/beit/test_image_processing_beit.py b/tests/models/beit/test_image_processing_beit.py index 06ba688bb359..b7debae12e86 100644 --- a/tests/models/beit/test_image_processing_beit.py +++ b/tests/models/beit/test_image_processing_beit.py @@ -22,8 +22,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -105,7 +104,7 @@ def prepare_semantic_batch_inputs(): @require_torch @require_vision -class BeitImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class BeitImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = BeitImageProcessor if is_vision_available() else None diff --git a/tests/models/blip/test_image_processing_blip.py b/tests/models/blip/test_image_processing_blip.py index ce1298768c34..8c8b25438818 100644 --- a/tests/models/blip/test_image_processing_blip.py +++ b/tests/models/blip/test_image_processing_blip.py @@ -21,7 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): @@ -109,7 +109,7 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_torch @require_vision -class BlipImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class BlipImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = BlipImageProcessor if is_vision_available() else None @@ -231,7 +231,7 @@ def test_call_pytorch(self): @require_torch @require_vision -class BlipImageProcessingTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): +class BlipImageProcessingTestFourChannels(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = BlipImageProcessor if is_vision_available() else None diff --git a/tests/models/chinese_clip/test_image_processing_chinese_clip.py b/tests/models/chinese_clip/test_image_processing_chinese_clip.py index d1a6bbc9eb32..e7a0410bdb94 100644 --- a/tests/models/chinese_clip/test_image_processing_chinese_clip.py +++ b/tests/models/chinese_clip/test_image_processing_chinese_clip.py @@ -21,7 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): @@ -113,7 +113,7 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_torch @require_vision -class ChineseCLIPImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ChineseCLIPImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = ChineseCLIPImageProcessor if is_vision_available() else None @@ -246,7 +246,7 @@ def test_call_pytorch(self): @require_torch @require_vision -class ChineseCLIPImageProcessingTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ChineseCLIPImageProcessingTestFourChannels(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = ChineseCLIPImageProcessor if is_vision_available() else None diff --git a/tests/models/clip/test_image_processing_clip.py b/tests/models/clip/test_image_processing_clip.py index d2d210d2faa6..c3fb83c488b0 100644 --- a/tests/models/clip/test_image_processing_clip.py +++ b/tests/models/clip/test_image_processing_clip.py @@ -21,7 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): @@ -113,7 +113,7 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_torch @require_vision -class CLIPImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class CLIPImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = CLIPImageProcessor if is_vision_available() else None @@ -246,7 +246,7 @@ def test_call_pytorch(self): @require_torch @require_vision -class CLIPImageProcessingTestFourChannels(FeatureExtractionSavingTestMixin, unittest.TestCase): +class CLIPImageProcessingTestFourChannels(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = CLIPImageProcessor if is_vision_available() else None diff --git a/tests/models/conditional_detr/test_image_processing_conditional_detr.py b/tests/models/conditional_detr/test_image_processing_conditional_detr.py index 9efd73211bfd..153a7c4b0af6 100644 --- a/tests/models/conditional_detr/test_image_processing_conditional_detr.py +++ b/tests/models/conditional_detr/test_image_processing_conditional_detr.py @@ -23,8 +23,7 @@ from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -115,7 +114,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class ConditionalDetrImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ConditionalDetrImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = ConditionalDetrImageProcessor if is_vision_available() else None diff --git a/tests/models/convnext/test_image_processing_convnext.py b/tests/models/convnext/test_image_processing_convnext.py index 0aecf2d20d25..55039924833f 100644 --- a/tests/models/convnext/test_image_processing_convnext.py +++ b/tests/models/convnext/test_image_processing_convnext.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -77,7 +76,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class ConvNextImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ConvNextImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = ConvNextImageProcessor if is_vision_available() else None diff --git a/tests/models/deformable_detr/test_image_processing_deformable_detr.py b/tests/models/deformable_detr/test_image_processing_deformable_detr.py index 0d2068b37037..9435fe207d38 100644 --- a/tests/models/deformable_detr/test_image_processing_deformable_detr.py +++ b/tests/models/deformable_detr/test_image_processing_deformable_detr.py @@ -23,8 +23,7 @@ from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -115,7 +114,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class DeformableDetrImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class DeformableDetrImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = DeformableDetrImageProcessor if is_vision_available() else None diff --git a/tests/models/deit/test_image_processing_deit.py b/tests/models/deit/test_image_processing_deit.py index 933c781f3e63..382d57707df0 100644 --- a/tests/models/deit/test_image_processing_deit.py +++ b/tests/models/deit/test_image_processing_deit.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -82,7 +81,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class DeiTImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class DeiTImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = DeiTImageProcessor if is_vision_available() else None test_cast_dtype = True diff --git a/tests/models/detr/test_image_processing_detr.py b/tests/models/detr/test_image_processing_detr.py index 019d5aa99f44..00f97a3b5d08 100644 --- a/tests/models/detr/test_image_processing_detr.py +++ b/tests/models/detr/test_image_processing_detr.py @@ -23,8 +23,7 @@ from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -115,7 +114,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class DetrImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class DetrImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = DetrImageProcessor if is_vision_available() else None diff --git a/tests/models/donut/test_image_processing_donut.py b/tests/models/donut/test_image_processing_donut.py index ab96204a3660..b551b491023f 100644 --- a/tests/models/donut/test_image_processing_donut.py +++ b/tests/models/donut/test_image_processing_donut.py @@ -21,8 +21,7 @@ from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -82,7 +81,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class DonutImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class DonutImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = DonutImageProcessor if is_vision_available() else None diff --git a/tests/models/dpt/test_image_processing_dpt.py b/tests/models/dpt/test_image_processing_dpt.py index ab0c84eb9e91..c457a940b6d7 100644 --- a/tests/models/dpt/test_image_processing_dpt.py +++ b/tests/models/dpt/test_image_processing_dpt.py @@ -21,8 +21,7 @@ from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -74,7 +73,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class DPTImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class DPTImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = DPTImageProcessor if is_vision_available() else None diff --git a/tests/models/efficientformer/test_image_processing_efficientformer.py b/tests/models/efficientformer/test_image_processing_efficientformer.py index 0a5255056461..cae206717007 100644 --- a/tests/models/efficientformer/test_image_processing_efficientformer.py +++ b/tests/models/efficientformer/test_image_processing_efficientformer.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -74,7 +73,7 @@ def prepare_feat_extract_dict(self): @require_torch @require_vision -class EfficientFormerImageProcessorTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class EfficientFormerImageProcessorTest(ImageProcessingSavingTestMixin, unittest.TestCase): feature_extraction_class = ViTFeatureExtractor if is_vision_available() else None diff --git a/tests/models/flava/test_image_processing_flava.py b/tests/models/flava/test_image_processing_flava.py index 4b6210301916..03a09dc98f61 100644 --- a/tests/models/flava/test_image_processing_flava.py +++ b/tests/models/flava/test_image_processing_flava.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -160,7 +159,7 @@ def get_expected_codebook_image_size(self): @require_torch @require_vision -class FlavaImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class FlavaImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = FlavaImageProcessor if is_vision_available() else None maxDiff = None diff --git a/tests/models/glpn/test_image_processing_glpn.py b/tests/models/glpn/test_image_processing_glpn.py index ac726d348096..cddc80d9ae2b 100644 --- a/tests/models/glpn/test_image_processing_glpn.py +++ b/tests/models/glpn/test_image_processing_glpn.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -67,7 +66,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class GLPNImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class GLPNImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = GLPNImageProcessor if is_vision_available() else None diff --git a/tests/models/imagegpt/test_image_processing_imagegpt.py b/tests/models/imagegpt/test_image_processing_imagegpt.py index 1ca38424d311..80236ee3af49 100644 --- a/tests/models/imagegpt/test_image_processing_imagegpt.py +++ b/tests/models/imagegpt/test_image_processing_imagegpt.py @@ -25,7 +25,7 @@ from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): @@ -78,7 +78,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class ImageGPTImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ImageGPTImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = ImageGPTImageProcessor if is_vision_available() else None diff --git a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py index f700b0a4d23b..86cb306afa62 100644 --- a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py +++ b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -64,7 +63,7 @@ def prepare_image_processor_dict(self): @require_torch @require_pytesseract -class LayoutLMv2ImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class LayoutLMv2ImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = LayoutLMv2ImageProcessor if is_pytesseract_available() else None diff --git a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py index 67e1f39978b6..0c36406ad91d 100644 --- a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py +++ b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -64,7 +63,7 @@ def prepare_image_processor_dict(self): @require_torch @require_pytesseract -class LayoutLMv3ImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class LayoutLMv3ImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = LayoutLMv3ImageProcessor if is_pytesseract_available() else None diff --git a/tests/models/levit/test_image_processing_levit.py b/tests/models/levit/test_image_processing_levit.py index c64a917142e3..76899ff43c44 100644 --- a/tests/models/levit/test_image_processing_levit.py +++ b/tests/models/levit/test_image_processing_levit.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -81,7 +80,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class LevitImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class LevitImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = LevitImageProcessor if is_vision_available() else None diff --git a/tests/models/maskformer/test_image_processing_maskformer.py b/tests/models/maskformer/test_image_processing_maskformer.py index e480c1f128e2..614be1809278 100644 --- a/tests/models/maskformer/test_image_processing_maskformer.py +++ b/tests/models/maskformer/test_image_processing_maskformer.py @@ -23,8 +23,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -131,7 +130,7 @@ def get_fake_maskformer_outputs(self): @require_torch @require_vision -class MaskFormerImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class MaskFormerImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = MaskFormerImageProcessor if (is_vision_available() and is_torch_available()) else None diff --git a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py index c40920607a82..3373709077e9 100644 --- a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py +++ b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -72,7 +71,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class MobileNetV1ImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class MobileNetV1ImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = MobileNetV1ImageProcessor if is_vision_available() else None diff --git a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py index 57d6ae6e7635..8b978f6527cc 100644 --- a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py +++ b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -72,7 +71,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class MobileNetV2ImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class MobileNetV2ImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = MobileNetV2ImageProcessor if is_vision_available() else None diff --git a/tests/models/mobilevit/test_image_processing_mobilevit.py b/tests/models/mobilevit/test_image_processing_mobilevit.py index ed58278b94db..a282f14a5c3f 100644 --- a/tests/models/mobilevit/test_image_processing_mobilevit.py +++ b/tests/models/mobilevit/test_image_processing_mobilevit.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -75,7 +74,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class MobileViTImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class MobileViTImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = MobileViTImageProcessor if is_vision_available() else None diff --git a/tests/models/oneformer/test_image_processing_oneformer.py b/tests/models/oneformer/test_image_processing_oneformer.py index 79c6d82c3f42..8faa441b5184 100644 --- a/tests/models/oneformer/test_image_processing_oneformer.py +++ b/tests/models/oneformer/test_image_processing_oneformer.py @@ -23,8 +23,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -156,7 +155,7 @@ def get_fake_oneformer_outputs(self): @require_torch @require_vision -class OneFormerImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class OneFormerImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_feat_extracttion_common.test_feat_extract_to_json_string feature_extraction_class = image_processing_class diff --git a/tests/models/owlvit/test_image_processing_owlvit.py b/tests/models/owlvit/test_image_processing_owlvit.py index 540369b0ba84..edb2b2d765da 100644 --- a/tests/models/owlvit/test_image_processing_owlvit.py +++ b/tests/models/owlvit/test_image_processing_owlvit.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -82,7 +81,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class OwlViTImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class OwlViTImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = OwlViTImageProcessor if is_vision_available() else None diff --git a/tests/models/poolformer/test_image_processing_poolformer.py b/tests/models/poolformer/test_image_processing_poolformer.py index 43690a64c3f2..bc18c76b59f4 100644 --- a/tests/models/poolformer/test_image_processing_poolformer.py +++ b/tests/models/poolformer/test_image_processing_poolformer.py @@ -20,8 +20,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -78,7 +77,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class PoolFormerImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class PoolFormerImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = PoolFormerImageProcessor if is_vision_available() else None diff --git a/tests/models/segformer/test_image_processing_segformer.py b/tests/models/segformer/test_image_processing_segformer.py index 5b3b564a1d27..7b27bf503d5e 100644 --- a/tests/models/segformer/test_image_processing_segformer.py +++ b/tests/models/segformer/test_image_processing_segformer.py @@ -22,8 +22,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -96,7 +95,7 @@ def prepare_semantic_batch_inputs(): @require_torch @require_vision -class SegformerImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class SegformerImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = SegformerImageProcessor if is_vision_available() else None diff --git a/tests/models/swin2sr/test_image_processing_swin2sr.py b/tests/models/swin2sr/test_image_processing_swin2sr.py index 267fc7570423..5db0e960df96 100644 --- a/tests/models/swin2sr/test_image_processing_swin2sr.py +++ b/tests/models/swin2sr/test_image_processing_swin2sr.py @@ -21,7 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin +from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): @@ -100,7 +100,7 @@ def prepare_inputs(self, equal_resolution=False, numpify=False, torchify=False): @require_torch @require_vision -class Swin2SRImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class Swin2SRImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = Swin2SRImageProcessor if is_vision_available() else None diff --git a/tests/models/videomae/test_image_processing_videomae.py b/tests/models/videomae/test_image_processing_videomae.py index f7b8df931a2c..56b41d8b7fde 100644 --- a/tests/models/videomae/test_image_processing_videomae.py +++ b/tests/models/videomae/test_image_processing_videomae.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_video_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): @@ -81,7 +80,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class VideoMAEImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class VideoMAEImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = VideoMAEImageProcessor if is_vision_available() else None diff --git a/tests/models/vilt/test_image_processing_vilt.py b/tests/models/vilt/test_image_processing_vilt.py index ab99896f875f..551cc95d1c68 100644 --- a/tests/models/vilt/test_image_processing_vilt.py +++ b/tests/models/vilt/test_image_processing_vilt.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -117,7 +116,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class ViltImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ViltImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = ViltImageProcessor if is_vision_available() else None diff --git a/tests/models/vit/test_image_processing_vit.py b/tests/models/vit/test_image_processing_vit.py index fabb539dca93..8b296623cb68 100644 --- a/tests/models/vit/test_image_processing_vit.py +++ b/tests/models/vit/test_image_processing_vit.py @@ -21,8 +21,7 @@ from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -74,7 +73,7 @@ def prepare_image_processor_dict(self): @require_torch @require_vision -class ViTImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class ViTImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = ViTImageProcessor if is_vision_available() else None diff --git a/tests/models/yolos/test_image_processing_yolos.py b/tests/models/yolos/test_image_processing_yolos.py index 8eb0dfb07ae4..06ed90e1c169 100644 --- a/tests/models/yolos/test_image_processing_yolos.py +++ b/tests/models/yolos/test_image_processing_yolos.py @@ -23,8 +23,7 @@ from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available -from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin -from ...test_image_processing_common import prepare_image_inputs +from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): @@ -115,7 +114,7 @@ def get_expected_values(self, image_inputs, batched=False): @require_torch @require_vision -class YolosImageProcessingTest(FeatureExtractionSavingTestMixin, unittest.TestCase): +class YolosImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = YolosImageProcessor if is_vision_available() else None From e2e3906e589f8111bcdc347e39518f15946f61f8 Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Mon, 23 Jan 2023 15:51:10 +0000 Subject: [PATCH 12/15] Update property ref in tests --- tests/models/layoutlmv2/test_image_processing_layoutlmv2.py | 4 ++-- tests/models/layoutlmv3/test_image_processing_layoutlmv3.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py index 86cb306afa62..d2eae1d8df36 100644 --- a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py +++ b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py @@ -81,10 +81,10 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "apply_ocr")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_batch_feature(self): diff --git a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py index 0c36406ad91d..c61d52b65a90 100644 --- a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py +++ b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py @@ -81,10 +81,10 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "apply_ocr")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_batch_feature(self): From 6901a62407728ff27abeb50f22e770be02b788dd Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Mon, 23 Jan 2023 16:01:17 +0000 Subject: [PATCH 13/15] Update property ref in tests --- .../models/beit/test_image_processing_beit.py | 4 ++-- .../models/blip/test_image_processing_blip.py | 22 +++++++++---------- .../test_image_processing_chinese_clip.py | 4 ++-- .../models/clip/test_image_processing_clip.py | 4 ++-- .../test_image_processing_conditional_detr.py | 4 ++-- .../test_image_processing_convnext.py | 4 ++-- .../test_image_processing_deformable_detr.py | 4 ++-- .../models/deit/test_image_processing_deit.py | 4 ++-- .../models/detr/test_image_processing_detr.py | 4 ++-- .../donut/test_image_processing_donut.py | 6 ++--- tests/models/dpt/test_image_processing_dpt.py | 4 ++-- .../flava/test_image_processing_flava.py | 4 ++-- .../test_image_processing_imagegpt.py | 8 +++---- .../levit/test_image_processing_levit.py | 4 ++-- .../test_image_processing_maskformer.py | 4 ++-- .../test_image_processing_mobilenet_v1.py | 4 ++-- .../test_image_processing_mobilenet_v2.py | 6 ++--- .../test_image_processing_mobilevit.py | 4 ++-- .../owlvit/test_image_processing_owlvit.py | 4 ++-- .../test_image_processing_poolformer.py | 4 ++-- .../test_image_processing_segformer.py | 4 ++-- .../swin2sr/test_image_processing_swin2sr.py | 14 ++++++------ .../test_image_processing_videomae.py | 4 ++-- .../models/vilt/test_image_processing_vilt.py | 4 ++-- tests/models/vit/test_image_processing_vit.py | 4 ++-- .../yolos/test_image_processing_yolos.py | 4 ++-- 26 files changed, 70 insertions(+), 70 deletions(-) diff --git a/tests/models/beit/test_image_processing_beit.py b/tests/models/beit/test_image_processing_beit.py index b7debae12e86..95348bfe6390 100644 --- a/tests/models/beit/test_image_processing_beit.py +++ b/tests/models/beit/test_image_processing_beit.py @@ -126,13 +126,13 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "image_std")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 20, "width": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) self.assertEqual(image_processor.do_reduce_labels, False) image_processor = self.image_processing_class.from_dict( - self.image_proc_dict, size=42, crop_size=84, reduce_labels=True + self.image_processor_dict, size=42, crop_size=84, reduce_labels=True ) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) diff --git a/tests/models/blip/test_image_processing_blip.py b/tests/models/blip/test_image_processing_blip.py index 8c8b25438818..f3f2ab21dfd4 100644 --- a/tests/models/blip/test_image_processing_blip.py +++ b/tests/models/blip/test_image_processing_blip.py @@ -65,7 +65,7 @@ def __init__( self.do_pad = do_pad self.do_convert_rgb = do_convert_rgb - def prepare_image_proc_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -117,11 +117,11 @@ def setUp(self): self.image_processor_tester = BlipImageProcessingTester(self) @property - def image_proc_dict(self): - return self.image_processor_tester.prepare_image_proc_dict() + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "do_resize")) self.assertTrue(hasattr(image_processor, "size")) self.assertTrue(hasattr(image_processor, "do_normalize")) @@ -134,7 +134,7 @@ def test_batch_feature(self): def test_call_pil(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False) for image in image_inputs: @@ -166,7 +166,7 @@ def test_call_pil(self): def test_call_numpy(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, numpify=True) for image in image_inputs: @@ -198,7 +198,7 @@ def test_call_numpy(self): def test_call_pytorch(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, torchify=True) for image in image_inputs: @@ -240,11 +240,11 @@ def setUp(self): self.expected_encoded_image_num_channels = 3 @property - def image_proc_dict(self): - return self.image_processor_tester.prepare_image_proc_dict() + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "do_resize")) self.assertTrue(hasattr(image_processor, "size")) self.assertTrue(hasattr(image_processor, "do_normalize")) @@ -257,7 +257,7 @@ def test_batch_feature(self): def test_call_pil_four_channels(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False) for image in image_inputs: diff --git a/tests/models/chinese_clip/test_image_processing_chinese_clip.py b/tests/models/chinese_clip/test_image_processing_chinese_clip.py index e7a0410bdb94..b7b31350713a 100644 --- a/tests/models/chinese_clip/test_image_processing_chinese_clip.py +++ b/tests/models/chinese_clip/test_image_processing_chinese_clip.py @@ -136,11 +136,11 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 224, "width": 224}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) diff --git a/tests/models/clip/test_image_processing_clip.py b/tests/models/clip/test_image_processing_clip.py index c3fb83c488b0..7ffaceb54c68 100644 --- a/tests/models/clip/test_image_processing_clip.py +++ b/tests/models/clip/test_image_processing_clip.py @@ -136,11 +136,11 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) diff --git a/tests/models/conditional_detr/test_image_processing_conditional_detr.py b/tests/models/conditional_detr/test_image_processing_conditional_detr.py index 153a7c4b0af6..f93d3cabbf2f 100644 --- a/tests/models/conditional_detr/test_image_processing_conditional_detr.py +++ b/tests/models/conditional_detr/test_image_processing_conditional_detr.py @@ -134,12 +134,12 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) image_processor = self.image_processing_class.from_dict( - self.image_proc_dict, size=42, max_size=84, pad_and_return_pixel_mask=False + self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.do_pad, False) diff --git a/tests/models/convnext/test_image_processing_convnext.py b/tests/models/convnext/test_image_processing_convnext.py index 55039924833f..da7d28e64dbc 100644 --- a/tests/models/convnext/test_image_processing_convnext.py +++ b/tests/models/convnext/test_image_processing_convnext.py @@ -97,10 +97,10 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "image_std")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"shortest_edge": 42}) def test_batch_feature(self): diff --git a/tests/models/deformable_detr/test_image_processing_deformable_detr.py b/tests/models/deformable_detr/test_image_processing_deformable_detr.py index 9435fe207d38..98ebac4bbbb4 100644 --- a/tests/models/deformable_detr/test_image_processing_deformable_detr.py +++ b/tests/models/deformable_detr/test_image_processing_deformable_detr.py @@ -136,12 +136,12 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) image_processor = self.image_processing_class.from_dict( - self.image_proc_dict, size=42, max_size=84, pad_and_return_pixel_mask=False + self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.do_pad, False) diff --git a/tests/models/deit/test_image_processing_deit.py b/tests/models/deit/test_image_processing_deit.py index 382d57707df0..d2919ccc2ab9 100644 --- a/tests/models/deit/test_image_processing_deit.py +++ b/tests/models/deit/test_image_processing_deit.py @@ -104,11 +104,11 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "image_std")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 20, "width": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) diff --git a/tests/models/detr/test_image_processing_detr.py b/tests/models/detr/test_image_processing_detr.py index 00f97a3b5d08..1638cb6794ff 100644 --- a/tests/models/detr/test_image_processing_detr.py +++ b/tests/models/detr/test_image_processing_detr.py @@ -137,12 +137,12 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_pad")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) image_processor = self.image_processing_class.from_dict( - self.image_proc_dict, size=42, max_size=84, pad_and_return_pixel_mask=False + self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.do_pad, False) diff --git a/tests/models/donut/test_image_processing_donut.py b/tests/models/donut/test_image_processing_donut.py index b551b491023f..5ac4d1fc3d1b 100644 --- a/tests/models/donut/test_image_processing_donut.py +++ b/tests/models/donut/test_image_processing_donut.py @@ -104,14 +104,14 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "image_std")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 20}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) # Previous config had dimensions in (width, height) order - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=(42, 84)) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84)) self.assertEqual(image_processor.size, {"height": 84, "width": 42}) def test_batch_feature(self): diff --git a/tests/models/dpt/test_image_processing_dpt.py b/tests/models/dpt/test_image_processing_dpt.py index c457a940b6d7..4ed6faadb6a8 100644 --- a/tests/models/dpt/test_image_processing_dpt.py +++ b/tests/models/dpt/test_image_processing_dpt.py @@ -93,10 +93,10 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_call_pil(self): diff --git a/tests/models/flava/test_image_processing_flava.py b/tests/models/flava/test_image_processing_flava.py index 03a09dc98f61..129343b998c8 100644 --- a/tests/models/flava/test_image_processing_flava.py +++ b/tests/models/flava/test_image_processing_flava.py @@ -194,14 +194,14 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "codebook_image_std")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 224, "width": 224}) self.assertEqual(image_processor.crop_size, {"height": 224, "width": 224}) self.assertEqual(image_processor.codebook_size, {"height": 112, "width": 112}) self.assertEqual(image_processor.codebook_crop_size, {"height": 112, "width": 112}) image_processor = self.image_processing_class.from_dict( - self.image_proc_dict, size=42, crop_size=84, codebook_size=33, codebook_crop_size=66 + self.image_processor_dict, size=42, crop_size=84, codebook_size=33, codebook_crop_size=66 ) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) diff --git a/tests/models/imagegpt/test_image_processing_imagegpt.py b/tests/models/imagegpt/test_image_processing_imagegpt.py index 80236ee3af49..efc456e99877 100644 --- a/tests/models/imagegpt/test_image_processing_imagegpt.py +++ b/tests/models/imagegpt/test_image_processing_imagegpt.py @@ -97,16 +97,16 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_normalize")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_image_processor_to_json_string(self): - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) obj = json.loads(image_processor.to_json_string()) - for key, value in self.image_proc_dict.items(): + for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(value, obj[key])) else: diff --git a/tests/models/levit/test_image_processing_levit.py b/tests/models/levit/test_image_processing_levit.py index 76899ff43c44..8fba9a5d03f8 100644 --- a/tests/models/levit/test_image_processing_levit.py +++ b/tests/models/levit/test_image_processing_levit.py @@ -101,11 +101,11 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) diff --git a/tests/models/maskformer/test_image_processing_maskformer.py b/tests/models/maskformer/test_image_processing_maskformer.py index 614be1809278..682aa2756ea8 100644 --- a/tests/models/maskformer/test_image_processing_maskformer.py +++ b/tests/models/maskformer/test_image_processing_maskformer.py @@ -153,12 +153,12 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "num_labels")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 32, "longest_edge": 1333}) self.assertEqual(image_processor.size_divisor, 0) image_processor = self.image_processing_class.from_dict( - self.image_proc_dict, size=42, max_size=84, size_divisibility=8 + self.image_processor_dict, size=42, max_size=84, size_divisibility=8 ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.size_divisor, 8) diff --git a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py index 3373709077e9..34096ff1f960 100644 --- a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py +++ b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py @@ -90,11 +90,11 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "center_crop")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) diff --git a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py index 8b978f6527cc..472280753e9c 100644 --- a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py +++ b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py @@ -83,18 +83,18 @@ def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "do_resize")) self.assertTrue(hasattr(image_processor, "size")) self.assertTrue(hasattr(image_processor, "do_center_crop")) self.assertTrue(hasattr(image_processor, "crop_size")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) diff --git a/tests/models/mobilevit/test_image_processing_mobilevit.py b/tests/models/mobilevit/test_image_processing_mobilevit.py index a282f14a5c3f..d14a40571501 100644 --- a/tests/models/mobilevit/test_image_processing_mobilevit.py +++ b/tests/models/mobilevit/test_image_processing_mobilevit.py @@ -94,11 +94,11 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_flip_channel_order")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) diff --git a/tests/models/owlvit/test_image_processing_owlvit.py b/tests/models/owlvit/test_image_processing_owlvit.py index edb2b2d765da..b94120f563da 100644 --- a/tests/models/owlvit/test_image_processing_owlvit.py +++ b/tests/models/owlvit/test_image_processing_owlvit.py @@ -104,11 +104,11 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) diff --git a/tests/models/poolformer/test_image_processing_poolformer.py b/tests/models/poolformer/test_image_processing_poolformer.py index bc18c76b59f4..d5596a55a9f2 100644 --- a/tests/models/poolformer/test_image_processing_poolformer.py +++ b/tests/models/poolformer/test_image_processing_poolformer.py @@ -98,11 +98,11 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "image_std")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 30}) self.assertEqual(image_processor.crop_size, {"height": 30, "width": 30}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) diff --git a/tests/models/segformer/test_image_processing_segformer.py b/tests/models/segformer/test_image_processing_segformer.py index 7b27bf503d5e..a05b3349e80f 100644 --- a/tests/models/segformer/test_image_processing_segformer.py +++ b/tests/models/segformer/test_image_processing_segformer.py @@ -116,11 +116,11 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "do_reduce_labels")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 30, "width": 30}) self.assertEqual(image_processor.do_reduce_labels, False) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, reduce_labels=True) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, reduce_labels=True) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) self.assertEqual(image_processor.do_reduce_labels, True) diff --git a/tests/models/swin2sr/test_image_processing_swin2sr.py b/tests/models/swin2sr/test_image_processing_swin2sr.py index 5db0e960df96..06e9539693e1 100644 --- a/tests/models/swin2sr/test_image_processing_swin2sr.py +++ b/tests/models/swin2sr/test_image_processing_swin2sr.py @@ -59,7 +59,7 @@ def __init__( self.do_pad = do_pad self.pad_size = pad_size - def prepare_image_proc_dict(self): + def prepare_image_processor_dict(self): return { "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, @@ -108,11 +108,11 @@ def setUp(self): self.image_processor_tester = Swin2SRImageProcessingTester(self) @property - def image_proc_dict(self): - return self.image_processor_tester.prepare_image_proc_dict() + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "do_rescale")) self.assertTrue(hasattr(image_processor, "rescale_factor")) self.assertTrue(hasattr(image_processor, "do_pad")) @@ -131,7 +131,7 @@ def calculate_expected_size(self, image): def test_call_pil(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False) for image in image_inputs: @@ -152,7 +152,7 @@ def test_call_pil(self): def test_call_numpy(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, numpify=True) for image in image_inputs: @@ -173,7 +173,7 @@ def test_call_numpy(self): def test_call_pytorch(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, torchify=True) for image in image_inputs: diff --git a/tests/models/videomae/test_image_processing_videomae.py b/tests/models/videomae/test_image_processing_videomae.py index 56b41d8b7fde..53676328450b 100644 --- a/tests/models/videomae/test_image_processing_videomae.py +++ b/tests/models/videomae/test_image_processing_videomae.py @@ -101,11 +101,11 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42, crop_size=84) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) diff --git a/tests/models/vilt/test_image_processing_vilt.py b/tests/models/vilt/test_image_processing_vilt.py index 551cc95d1c68..a89fd9b854d1 100644 --- a/tests/models/vilt/test_image_processing_vilt.py +++ b/tests/models/vilt/test_image_processing_vilt.py @@ -137,10 +137,10 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "size_divisor")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 30}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"shortest_edge": 42}) def test_batch_feature(self): diff --git a/tests/models/vit/test_image_processing_vit.py b/tests/models/vit/test_image_processing_vit.py index 8b296623cb68..ce0cc5610a83 100644 --- a/tests/models/vit/test_image_processing_vit.py +++ b/tests/models/vit/test_image_processing_vit.py @@ -93,10 +93,10 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) - image_processor = self.image_processing_class.from_dict(self.image_proc_dict, size=42) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_batch_feature(self): diff --git a/tests/models/yolos/test_image_processing_yolos.py b/tests/models/yolos/test_image_processing_yolos.py index 06ed90e1c169..b262a654a5a4 100644 --- a/tests/models/yolos/test_image_processing_yolos.py +++ b/tests/models/yolos/test_image_processing_yolos.py @@ -134,12 +134,12 @@ def test_image_processor_properties(self): self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): - image_processor = self.image_processing_class.from_dict(self.image_proc_dict) + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) image_processor = self.image_processing_class.from_dict( - self.image_proc_dict, size=42, max_size=84, pad_and_return_pixel_mask=False + self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.do_pad, False) From 00260d0ce3dd9cc62314c6b3819979cd3350d09f Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Mon, 23 Jan 2023 16:17:55 +0000 Subject: [PATCH 14/15] Update recently merged in models --- .../test_image_processing_efficientformer.py | 98 +++++++++---------- .../test_image_processing_oneformer.py | 24 ++--- 2 files changed, 61 insertions(+), 61 deletions(-) diff --git a/tests/models/efficientformer/test_image_processing_efficientformer.py b/tests/models/efficientformer/test_image_processing_efficientformer.py index cae206717007..680b1bf442fe 100644 --- a/tests/models/efficientformer/test_image_processing_efficientformer.py +++ b/tests/models/efficientformer/test_image_processing_efficientformer.py @@ -30,7 +30,7 @@ if is_vision_available(): from PIL import Image - from transformers import ViTFeatureExtractor + from transformers import ViTImageProcessor class EfficientFormerImageProcessorTester(unittest.TestCase): @@ -61,7 +61,7 @@ def __init__( self.image_mean = image_mean self.image_std = image_std - def prepare_feat_extract_dict(self): + def prepare_image_proc_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, @@ -75,118 +75,118 @@ def prepare_feat_extract_dict(self): @require_vision class EfficientFormerImageProcessorTest(ImageProcessingSavingTestMixin, unittest.TestCase): - feature_extraction_class = ViTFeatureExtractor if is_vision_available() else None + image_processing_class = ViTImageProcessor if is_vision_available() else None def setUp(self): - self.feature_extract_tester = EfficientFormerImageProcessorTester(self) + self.image_proc_tester = EfficientFormerImageProcessorTester(self) @property - def feat_extract_dict(self): - return self.feature_extract_tester.prepare_feat_extract_dict() + def image_proc_dict(self): + return self.image_proc_tester.prepare_image_proc_dict() - def test_feat_extract_properties(self): - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) - self.assertTrue(hasattr(feature_extractor, "image_mean")) - self.assertTrue(hasattr(feature_extractor, "image_std")) - self.assertTrue(hasattr(feature_extractor, "do_normalize")) - self.assertTrue(hasattr(feature_extractor, "do_resize")) - self.assertTrue(hasattr(feature_extractor, "size")) + def test_image_proc_properties(self): + image_processor = self.image_processing_class(**self.image_proc_dict) + self.assertTrue(hasattr(image_processor, "image_mean")) + self.assertTrue(hasattr(image_processor, "image_std")) + self.assertTrue(hasattr(image_processor, "do_normalize")) + self.assertTrue(hasattr(image_processor, "do_resize")) + self.assertTrue(hasattr(image_processor, "size")) def test_batch_feature(self): pass def test_call_pil(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processor + image_processor = self.image_processing_class(**self.image_proc_dict) # create random PIL images - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + image_inputs = prepare_image_inputs(self.image_proc_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_proc_tester.num_channels, + self.image_proc_tester.size["height"], + self.image_proc_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_proc_tester.batch_size, + self.image_proc_tester.num_channels, + self.image_proc_tester.size["height"], + self.image_proc_tester.size["width"], ), ) def test_call_numpy(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processor + image_processor = self.image_processing_class(**self.image_proc_dict) # create random numpy tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + image_inputs = prepare_image_inputs(self.image_proc_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_proc_tester.num_channels, + self.image_proc_tester.size["height"], + self.image_proc_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_proc_tester.batch_size, + self.image_proc_tester.num_channels, + self.image_proc_tester.size["height"], + self.image_proc_tester.size["width"], ), ) def test_call_pytorch(self): - # Initialize feature_extractor - feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # Initialize image_processor + image_processor = self.image_processing_class(**self.image_proc_dict) # create random PyTorch tensors - image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + image_inputs = prepare_image_inputs(self.image_proc_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input - encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_proc_tester.num_channels, + self.image_proc_tester.size["height"], + self.image_proc_tester.size["width"], ), ) # Test batched - encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + encoded_images = image_processor(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( - self.feature_extract_tester.batch_size, - self.feature_extract_tester.num_channels, - self.feature_extract_tester.size["height"], - self.feature_extract_tester.size["width"], + self.image_proc_tester.batch_size, + self.image_proc_tester.num_channels, + self.image_proc_tester.size["height"], + self.image_proc_tester.size["width"], ), ) diff --git a/tests/models/oneformer/test_image_processing_oneformer.py b/tests/models/oneformer/test_image_processing_oneformer.py index 8faa441b5184..13f8c137340c 100644 --- a/tests/models/oneformer/test_image_processing_oneformer.py +++ b/tests/models/oneformer/test_image_processing_oneformer.py @@ -99,7 +99,7 @@ def __init__( self.reduce_labels = reduce_labels self.ignore_index = ignore_index - def prepare_feat_extract_dict(self): + def prepare_image_proc_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -157,18 +157,18 @@ def get_fake_oneformer_outputs(self): @require_vision class OneFormerImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None - # only for test_feat_extracttion_common.test_feat_extract_to_json_string - feature_extraction_class = image_processing_class + # only for test_image_processing_common.test_image_proc_to_json_string + image_processing_class = image_processing_class def setUp(self): self.image_processing_tester = OneFormerImageProcessorTester(self) @property - def feat_extract_dict(self): - return self.image_processing_tester.prepare_feat_extract_dict() + def image_proc_dict(self): + return self.image_processing_tester.prepare_image_proc_dict() - def test_feat_extract_properties(self): - image_processor = self.image_processing_class(**self.feat_extract_dict) + def test_image_proc_properties(self): + image_processor = self.image_processing_class(**self.image_proc_dict) self.assertTrue(hasattr(image_processor, "image_mean")) self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "do_normalize")) @@ -186,7 +186,7 @@ def test_batch_feature(self): def test_call_pil(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.feat_extract_dict) + image_processor = self.image_processing_class(**self.image_proc_dict) # create random PIL images image_inputs = prepare_image_inputs(self.image_processing_tester, equal_resolution=False) for image in image_inputs: @@ -220,7 +220,7 @@ def test_call_pil(self): def test_call_numpy(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.feat_extract_dict) + image_processor = self.image_processing_class(**self.image_proc_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.image_processing_tester, equal_resolution=False, numpify=True) for image in image_inputs: @@ -254,7 +254,7 @@ def test_call_numpy(self): def test_call_pytorch(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.feat_extract_dict) + image_processor = self.image_processing_class(**self.image_proc_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.image_processing_tester, equal_resolution=False, torchify=True) for image in image_inputs: @@ -288,7 +288,7 @@ def test_call_pytorch(self): def test_equivalence_pad_and_create_pixel_mask(self): # Initialize image_processors - image_processor_1 = self.image_processing_class(**self.feat_extract_dict) + image_processor_1 = self.image_processing_class(**self.image_proc_dict) image_processor_2 = self.image_processing_class( do_resize=False, do_normalize=False, @@ -319,7 +319,7 @@ def test_equivalence_pad_and_create_pixel_mask(self): def comm_get_image_processor_inputs( self, with_segmentation_maps=False, is_instance_map=False, segmentation_type="np" ): - image_processor = self.image_processing_class(**self.feat_extract_dict) + image_processor = self.image_processing_class(**self.image_proc_dict) # prepare image and target num_labels = self.image_processing_tester.num_labels annotations = None From 2dbbb225b4626c9a1baa46f2e8a16ab2431271ec Mon Sep 17 00:00:00 2001 From: Amy Roberts <22614925+amyeroberts@users.noreply.github.com> Date: Mon, 23 Jan 2023 16:28:19 +0000 Subject: [PATCH 15/15] Small fix --- .../test_image_processing_efficientformer.py | 14 +++++++------- .../test_image_processing_oneformer.py | 18 +++++++++--------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/models/efficientformer/test_image_processing_efficientformer.py b/tests/models/efficientformer/test_image_processing_efficientformer.py index 680b1bf442fe..6a17783f61d1 100644 --- a/tests/models/efficientformer/test_image_processing_efficientformer.py +++ b/tests/models/efficientformer/test_image_processing_efficientformer.py @@ -61,7 +61,7 @@ def __init__( self.image_mean = image_mean self.image_std = image_std - def prepare_image_proc_dict(self): + def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, @@ -81,11 +81,11 @@ def setUp(self): self.image_proc_tester = EfficientFormerImageProcessorTester(self) @property - def image_proc_dict(self): - return self.image_proc_tester.prepare_image_proc_dict() + def image_processor_dict(self): + return self.image_proc_tester.prepare_image_processor_dict() def test_image_proc_properties(self): - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "image_mean")) self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "do_normalize")) @@ -97,7 +97,7 @@ def test_batch_feature(self): def test_call_pil(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = prepare_image_inputs(self.image_proc_tester, equal_resolution=False) for image in image_inputs: @@ -129,7 +129,7 @@ def test_call_pil(self): def test_call_numpy(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.image_proc_tester, equal_resolution=False, numpify=True) for image in image_inputs: @@ -161,7 +161,7 @@ def test_call_numpy(self): def test_call_pytorch(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.image_proc_tester, equal_resolution=False, torchify=True) for image in image_inputs: diff --git a/tests/models/oneformer/test_image_processing_oneformer.py b/tests/models/oneformer/test_image_processing_oneformer.py index 13f8c137340c..5d266bc92e4d 100644 --- a/tests/models/oneformer/test_image_processing_oneformer.py +++ b/tests/models/oneformer/test_image_processing_oneformer.py @@ -99,7 +99,7 @@ def __init__( self.reduce_labels = reduce_labels self.ignore_index = ignore_index - def prepare_image_proc_dict(self): + def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, @@ -164,11 +164,11 @@ def setUp(self): self.image_processing_tester = OneFormerImageProcessorTester(self) @property - def image_proc_dict(self): - return self.image_processing_tester.prepare_image_proc_dict() + def image_processor_dict(self): + return self.image_processing_tester.prepare_image_processor_dict() def test_image_proc_properties(self): - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "image_mean")) self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "do_normalize")) @@ -186,7 +186,7 @@ def test_batch_feature(self): def test_call_pil(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = prepare_image_inputs(self.image_processing_tester, equal_resolution=False) for image in image_inputs: @@ -220,7 +220,7 @@ def test_call_pil(self): def test_call_numpy(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.image_processing_tester, equal_resolution=False, numpify=True) for image in image_inputs: @@ -254,7 +254,7 @@ def test_call_numpy(self): def test_call_pytorch(self): # Initialize image_processor - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.image_processing_tester, equal_resolution=False, torchify=True) for image in image_inputs: @@ -288,7 +288,7 @@ def test_call_pytorch(self): def test_equivalence_pad_and_create_pixel_mask(self): # Initialize image_processors - image_processor_1 = self.image_processing_class(**self.image_proc_dict) + image_processor_1 = self.image_processing_class(**self.image_processor_dict) image_processor_2 = self.image_processing_class( do_resize=False, do_normalize=False, @@ -319,7 +319,7 @@ def test_equivalence_pad_and_create_pixel_mask(self): def comm_get_image_processor_inputs( self, with_segmentation_maps=False, is_instance_map=False, segmentation_type="np" ): - image_processor = self.image_processing_class(**self.image_proc_dict) + image_processor = self.image_processing_class(**self.image_processor_dict) # prepare image and target num_labels = self.image_processing_tester.num_labels annotations = None