diff --git a/setup.py b/setup.py index 012d88fd18a5..554d25dd1e8a 100644 --- a/setup.py +++ b/setup.py @@ -114,7 +114,7 @@ "GitPython<3.1.19", "hf-doc-builder>=0.3.0", "hf_xet", - "huggingface-hub==1.0.0.rc2", + "huggingface-hub==1.0.0.rc4", "importlib_metadata", "ipadic>=1.0.0,<2.0", "jinja2>=3.1.0", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index a6b6a9c445e6..1caefce16c3e 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -23,7 +23,7 @@ "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", "hf_xet": "hf_xet", - "huggingface-hub": "huggingface-hub==1.0.0.rc2", + "huggingface-hub": "huggingface-hub==1.0.0.rc4", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "jinja2": "jinja2>=3.1.0", diff --git a/src/transformers/image_processing_utils_fast.py b/src/transformers/image_processing_utils_fast.py index a9f6900a1046..8d4c79afbb1d 100644 --- a/src/transformers/image_processing_utils_fast.py +++ b/src/transformers/image_processing_utils_fast.py @@ -18,6 +18,7 @@ from typing import Any, Optional, Union import numpy as np +from huggingface_hub.dataclasses import validate_typed_dict from .image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from .image_transforms import ( @@ -710,6 +711,10 @@ def _validate_preprocess_kwargs( def preprocess(self, images: ImageInput, *args, **kwargs: Unpack[ImagesKwargs]) -> BatchFeature: # args are not validated, but their order in the `preprocess` and `_preprocess` signatures must be the same validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_kwargs_names) + + # Perform type validation on received kwargs + validate_typed_dict(self.valid_kwargs, kwargs) + # Set default kwargs from self. This ensures that if a kwarg is not provided # by the user, it gets its default value from the instance, or is set to None. for kwarg_name in self._valid_kwargs_names: diff --git a/src/transformers/models/aria/modular_aria.py b/src/transformers/models/aria/modular_aria.py index 1d820c00cf0a..46e35911c1f1 100644 --- a/src/transformers/models/aria/modular_aria.py +++ b/src/transformers/models/aria/modular_aria.py @@ -38,7 +38,7 @@ ) from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_utils import PreTrainedModel -from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack +from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils import PreTokenizedInput, TextInput from ...utils import TensorType, TransformersKwargs, auto_docstring, can_return_tuple, logging from ..auto import CONFIG_MAPPING, AutoConfig, AutoTokenizer @@ -904,7 +904,15 @@ def get_number_of_image_patches(self, height: int, width: int, images_kwargs=Non return num_patches +class AriaImagesKwargs(ImagesKwargs, total=False): + split_image: bool + max_image_size: int + min_image_size: int + + class AriaProcessorKwargs(ProcessingKwargs, total=False): + images_kwargs: AriaImagesKwargs + _defaults = { "text_kwargs": { "padding": False, diff --git a/src/transformers/models/aria/processing_aria.py b/src/transformers/models/aria/processing_aria.py index 976d2b983ee9..d0841c96aee2 100644 --- a/src/transformers/models/aria/processing_aria.py +++ b/src/transformers/models/aria/processing_aria.py @@ -24,13 +24,21 @@ from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput -from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack +from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils import PreTokenizedInput, TextInput from ...utils import TensorType from ..auto import AutoTokenizer +class AriaImagesKwargs(ImagesKwargs, total=False): + split_image: bool + max_image_size: int + min_image_size: int + + class AriaProcessorKwargs(ProcessingKwargs, total=False): + images_kwargs: AriaImagesKwargs + _defaults = { "text_kwargs": { "padding": False, diff --git a/src/transformers/models/beit/image_processing_beit.py b/src/transformers/models/beit/image_processing_beit.py index f65709168379..884619f12b13 100644 --- a/src/transformers/models/beit/image_processing_beit.py +++ b/src/transformers/models/beit/image_processing_beit.py @@ -55,7 +55,7 @@ logger = logging.get_logger(__name__) -class BeitImageProcessorKwargs(ImagesKwargs): +class BeitImageProcessorKwargs(ImagesKwargs, total=False): r""" do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 @@ -63,7 +63,7 @@ class BeitImageProcessorKwargs(ImagesKwargs): ADE20k). The background label will be replaced by 255. """ - do_reduce_labels: Optional[bool] + do_reduce_labels: bool @requires(backends=("vision",)) diff --git a/src/transformers/models/bridgetower/image_processing_bridgetower.py b/src/transformers/models/bridgetower/image_processing_bridgetower.py index cad23d02893f..73bfc7407666 100644 --- a/src/transformers/models/bridgetower/image_processing_bridgetower.py +++ b/src/transformers/models/bridgetower/image_processing_bridgetower.py @@ -123,8 +123,8 @@ def get_resize_output_image_size( return new_height, new_width -class BridgeTowerImageProcessorKwargs(ImagesKwargs): - size_divisor: Optional[int] +class BridgeTowerImageProcessorKwargs(ImagesKwargs, total=False): + size_divisor: int class BridgeTowerImageProcessor(BaseImageProcessor): diff --git a/src/transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py b/src/transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py index 358d84ac6d7c..afdd683e2312 100644 --- a/src/transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py +++ b/src/transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py @@ -33,7 +33,7 @@ from ...utils import TensorType, auto_docstring -class Cohere2VisionFastImageProcessorKwargs(ImagesKwargs): +class Cohere2VisionFastImageProcessorKwargs(ImagesKwargs, total=False): """ crop_to_patches (`bool`, *optional*, defaults to `False`): Whether to crop the image to patches. Can be overridden by the `crop_to_patches` parameter in the @@ -46,9 +46,9 @@ class Cohere2VisionFastImageProcessorKwargs(ImagesKwargs): set to `True`. Can be overridden by the `max_patches` parameter in the `preprocess` method. """ - crop_to_patches: Optional[bool] - min_patches: Optional[int] - max_patches: Optional[int] + crop_to_patches: bool + min_patches: int + max_patches: int @lru_cache(maxsize=10) diff --git a/src/transformers/models/cohere2_vision/modular_cohere2_vision.py b/src/transformers/models/cohere2_vision/modular_cohere2_vision.py index 997a6f2d638e..b801c24575ca 100644 --- a/src/transformers/models/cohere2_vision/modular_cohere2_vision.py +++ b/src/transformers/models/cohere2_vision/modular_cohere2_vision.py @@ -303,7 +303,7 @@ def get_optimal_tiled_canvas( return best_grid -class Cohere2VisionFastImageProcessorKwargs(ImagesKwargs): +class Cohere2VisionFastImageProcessorKwargs(ImagesKwargs, total=False): """ crop_to_patches (`bool`, *optional*, defaults to `False`): Whether to crop the image to patches. Can be overridden by the `crop_to_patches` parameter in the @@ -316,9 +316,9 @@ class Cohere2VisionFastImageProcessorKwargs(ImagesKwargs): set to `True`. Can be overridden by the `max_patches` parameter in the `preprocess` method. """ - crop_to_patches: Optional[bool] - min_patches: Optional[int] - max_patches: Optional[int] + crop_to_patches: bool + min_patches: int + max_patches: int @auto_docstring diff --git a/src/transformers/models/conditional_detr/image_processing_conditional_detr.py b/src/transformers/models/conditional_detr/image_processing_conditional_detr.py index 6df784585e9b..3f639e0c1ae3 100644 --- a/src/transformers/models/conditional_detr/image_processing_conditional_detr.py +++ b/src/transformers/models/conditional_detr/image_processing_conditional_detr.py @@ -729,7 +729,7 @@ def compute_segments( return segmentation, segments -class ConditionalDetrImageProcessorKwargs(ImagesKwargs): +class ConditionalDetrImageProcessorKwargs(ImagesKwargs, total=False): r""" format (`str`, *optional*, defaults to `AnnotationFormat.COCO_DETECTION`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". @@ -745,9 +745,9 @@ class ConditionalDetrImageProcessorKwargs(ImagesKwargs): Path to the directory containing the segmentation masks. """ - format: Optional[Union[str, AnnotationFormat]] - do_convert_annotations: Optional[bool] - return_segmentation_masks: Optional[bool] + format: Union[str, AnnotationFormat] + do_convert_annotations: bool + return_segmentation_masks: bool annotations: Optional[Union[AnnotationType, list[AnnotationType]]] masks_path: Optional[Union[str, pathlib.Path]] diff --git a/src/transformers/models/convnext/image_processing_convnext.py b/src/transformers/models/convnext/image_processing_convnext.py index d2e180de2464..c4e279346f3c 100644 --- a/src/transformers/models/convnext/image_processing_convnext.py +++ b/src/transformers/models/convnext/image_processing_convnext.py @@ -50,14 +50,14 @@ logger = logging.get_logger(__name__) -class ConvNextImageProcessorKwargs(ImagesKwargs): +class ConvNextImageProcessorKwargs(ImagesKwargs, total=False): """ crop_pct (`float`, *optional*): Percentage of the image to crop. Only has an effect if size < 384. Can be overridden by `crop_pct` in the`preprocess` method. """ - crop_pct: Optional[float] + crop_pct: float @requires(backends=("vision",)) diff --git a/src/transformers/models/deepseek_vl/image_processing_deepseek_vl.py b/src/transformers/models/deepseek_vl/image_processing_deepseek_vl.py index c41ac586753e..763182de4039 100644 --- a/src/transformers/models/deepseek_vl/image_processing_deepseek_vl.py +++ b/src/transformers/models/deepseek_vl/image_processing_deepseek_vl.py @@ -49,7 +49,7 @@ logger = logging.get_logger(__name__) -class DeepseekVLImageProcessorKwargs(ImagesKwargs): +class DeepseekVLImageProcessorKwargs(ImagesKwargs, total=False): r""" min_size (`int`, *optional*, defaults to 14): The minimum allowed size for the resized image. Ensures that neither the height nor width diff --git a/src/transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid.py b/src/transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid.py index 8b93f7fa6c94..c91aab91fca5 100644 --- a/src/transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid.py +++ b/src/transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid.py @@ -50,7 +50,7 @@ logger = logging.get_logger(__name__) -class DeepseekVLHybridImageProcessorKwargs(ImagesKwargs): +class DeepseekVLHybridImageProcessorKwargs(ImagesKwargs, total=False): r""" min_size (`int`, *optional*, defaults to 14): The minimum allowed size for the resized image. Ensures that neither the height nor width @@ -71,9 +71,9 @@ class DeepseekVLHybridImageProcessorKwargs(ImagesKwargs): min_size: int high_res_size: dict - high_res_resample: "PILImageResampling" - high_res_image_mean: list[float] - high_res_image_std: list[float] + high_res_resample: Union["PILImageResampling", int] + high_res_image_mean: Union[float, list[float], tuple[float, ...]] + high_res_image_std: Union[float, list[float], tuple[float, ...]] class DeepseekVLHybridImageProcessor(BaseImageProcessor): diff --git a/src/transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py b/src/transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py index 4135623743ae..43af7d43dfb3 100644 --- a/src/transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py +++ b/src/transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py @@ -429,7 +429,7 @@ def prepare_inputs_for_generation( return model_inputs -class DeepseekVLHybridImageProcessorKwargs(ImagesKwargs): +class DeepseekVLHybridImageProcessorKwargs(ImagesKwargs, total=False): r""" min_size (`int`, *optional*, defaults to 14): The minimum allowed size for the resized image. Ensures that neither the height nor width @@ -450,9 +450,9 @@ class DeepseekVLHybridImageProcessorKwargs(ImagesKwargs): min_size: int high_res_size: dict - high_res_resample: "PILImageResampling" - high_res_image_mean: list[float] - high_res_image_std: list[float] + high_res_resample: Union["PILImageResampling", int] + high_res_image_mean: Union[float, list[float], tuple[float, ...]] + high_res_image_std: Union[float, list[float], tuple[float, ...]] class DeepseekVLHybridImageProcessor(DeepseekVLImageProcessor): diff --git a/src/transformers/models/deformable_detr/image_processing_deformable_detr.py b/src/transformers/models/deformable_detr/image_processing_deformable_detr.py index eabdb536ff70..83587f45c295 100644 --- a/src/transformers/models/deformable_detr/image_processing_deformable_detr.py +++ b/src/transformers/models/deformable_detr/image_processing_deformable_detr.py @@ -82,7 +82,7 @@ logger = logging.get_logger(__name__) # pylint: disable=invalid-name -class DeformableDetrImageProcessorKwargs(ImagesKwargs): +class DeformableDetrImageProcessorKwargs(ImagesKwargs, total=False): r""" format (`str`, *optional*, defaults to `AnnotationFormat.COCO_DETECTION`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". @@ -98,9 +98,9 @@ class DeformableDetrImageProcessorKwargs(ImagesKwargs): Path to the directory containing the segmentation masks. """ - format: Optional[Union[str, AnnotationFormat]] - do_convert_annotations: Optional[bool] - return_segmentation_masks: Optional[bool] + format: Union[str, AnnotationFormat] + do_convert_annotations: bool + return_segmentation_masks: bool annotations: Optional[Union[AnnotationType, list[AnnotationType]]] masks_path: Optional[Union[str, pathlib.Path]] diff --git a/src/transformers/models/detr/image_processing_detr.py b/src/transformers/models/detr/image_processing_detr.py index 02261fc2a129..2f149b662ec2 100644 --- a/src/transformers/models/detr/image_processing_detr.py +++ b/src/transformers/models/detr/image_processing_detr.py @@ -84,7 +84,7 @@ SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC) -class DetrImageProcessorKwargs(ImagesKwargs): +class DetrImageProcessorKwargs(ImagesKwargs, total=False): r""" format (`str`, *optional*, defaults to `AnnotationFormat.COCO_DETECTION`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". @@ -100,9 +100,9 @@ class DetrImageProcessorKwargs(ImagesKwargs): Path to the directory containing the segmentation masks. """ - format: Optional[Union[str, AnnotationFormat]] - do_convert_annotations: Optional[bool] - return_segmentation_masks: Optional[bool] + format: Union[str, AnnotationFormat] + do_convert_annotations: bool + return_segmentation_masks: bool annotations: Optional[Union[AnnotationType, list[AnnotationType]]] masks_path: Optional[Union[str, pathlib.Path]] diff --git a/src/transformers/models/dia/processing_dia.py b/src/transformers/models/dia/processing_dia.py index 812a4149cb3f..6518b5444639 100644 --- a/src/transformers/models/dia/processing_dia.py +++ b/src/transformers/models/dia/processing_dia.py @@ -55,7 +55,9 @@ class DiaProcessorKwargs(ProcessingKwargs, total=False): "generation": True, "sampling_rate": 44100, }, - "common_kwargs": {"return_tensors": "pt"}, + "common_kwargs": { + "return_tensors": "pt", + }, } diff --git a/src/transformers/models/donut/image_processing_donut.py b/src/transformers/models/donut/image_processing_donut.py index 5af365099724..0f74ac62ec92 100644 --- a/src/transformers/models/donut/image_processing_donut.py +++ b/src/transformers/models/donut/image_processing_donut.py @@ -52,7 +52,7 @@ import PIL -class DonutImageProcessorKwargs(ImagesKwargs): +class DonutImageProcessorKwargs(ImagesKwargs, total=False): """ do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`): Whether to resize the image using thumbnail method. @@ -60,8 +60,8 @@ class DonutImageProcessorKwargs(ImagesKwargs): Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. """ - do_thumbnail: Optional[bool] - do_align_long_axis: Optional[bool] + do_thumbnail: bool + do_align_long_axis: bool @requires(backends=("vision",)) diff --git a/src/transformers/models/dpt/image_processing_dpt.py b/src/transformers/models/dpt/image_processing_dpt.py index 3ba5a6e30c21..6246b1f3f7c0 100644 --- a/src/transformers/models/dpt/image_processing_dpt.py +++ b/src/transformers/models/dpt/image_processing_dpt.py @@ -64,7 +64,7 @@ logger = logging.get_logger(__name__) -class DPTImageProcessorKwargs(ImagesKwargs): +class DPTImageProcessorKwargs(ImagesKwargs, total=False): """ ensure_multiple_of (`int`, *optional*, defaults to 1): If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Can be overridden @@ -78,10 +78,10 @@ class DPTImageProcessorKwargs(ImagesKwargs): ADE20k). The background label will be replaced by 255. """ - ensure_multiple_of: Optional[int] - size_divisor: Optional[int] - keep_aspect_ratio: Optional[bool] - do_reduce_labels: Optional[bool] + ensure_multiple_of: int + size_divisor: int + keep_aspect_ratio: bool + do_reduce_labels: bool def get_resize_output_image_size( diff --git a/src/transformers/models/efficientloftr/image_processing_efficientloftr.py b/src/transformers/models/efficientloftr/image_processing_efficientloftr.py index d1beabb6c2b9..acf9105fe77a 100644 --- a/src/transformers/models/efficientloftr/image_processing_efficientloftr.py +++ b/src/transformers/models/efficientloftr/image_processing_efficientloftr.py @@ -50,13 +50,13 @@ logger = logging.get_logger(__name__) -class EfficientLoFTRImageProcessorKwargs(ImagesKwargs): +class EfficientLoFTRImageProcessorKwargs(ImagesKwargs, total=False): r""" do_grayscale (`bool`, *optional*, defaults to `True`): Whether to convert the image to grayscale. Can be overridden by `do_grayscale` in the `preprocess` method. """ - do_grayscale: Optional[bool] = True + do_grayscale: bool # Copied from transformers.models.superpoint.image_processing_superpoint.is_grayscale diff --git a/src/transformers/models/efficientnet/image_processing_efficientnet.py b/src/transformers/models/efficientnet/image_processing_efficientnet.py index f5a69eff70e4..2a5b5c93749b 100644 --- a/src/transformers/models/efficientnet/image_processing_efficientnet.py +++ b/src/transformers/models/efficientnet/image_processing_efficientnet.py @@ -44,7 +44,7 @@ logger = logging.get_logger(__name__) -class EfficientNetImageProcessorKwargs(ImagesKwargs): +class EfficientNetImageProcessorKwargs(ImagesKwargs, total=False): """ rescale_offset (`bool`, *optional*, defaults to `self.rescale_offset`): Whether to rescale the image between [-max_range/2, scale_range/2] instead of [0, scale_range]. diff --git a/src/transformers/models/emu3/image_processing_emu3.py b/src/transformers/models/emu3/image_processing_emu3.py index fca5316a3fca..0c550937581f 100644 --- a/src/transformers/models/emu3/image_processing_emu3.py +++ b/src/transformers/models/emu3/image_processing_emu3.py @@ -47,9 +47,9 @@ logger = logging.get_logger(__name__) -class Emu3ImageProcessorKwargs(ImagesKwargs): - ratio: Optional[str] - image_area: Optional[int] +class Emu3ImageProcessorKwargs(ImagesKwargs, total=False): + ratio: str + image_area: int def smart_resize( diff --git a/src/transformers/models/eomt/image_processing_eomt.py b/src/transformers/models/eomt/image_processing_eomt.py index 3381e5bcac50..3459911cde1f 100644 --- a/src/transformers/models/eomt/image_processing_eomt.py +++ b/src/transformers/models/eomt/image_processing_eomt.py @@ -55,7 +55,7 @@ import torch.nn.functional as F -class EomtImageProcessorKwargs(ImagesKwargs): +class EomtImageProcessorKwargs(ImagesKwargs, total=False): """ do_split_image (`bool`, *optional*, defaults to `False`): Whether to split the input images into overlapping patches for semantic segmentation. If set to `True`, the @@ -67,7 +67,7 @@ class EomtImageProcessorKwargs(ImagesKwargs): """ do_split_image: bool - ignore_index: Optional[int] = None + ignore_index: Optional[int] # Adapted from transformers.models.maskformer.image_processing_maskformer.convert_segmentation_map_to_binary_masks diff --git a/src/transformers/models/flava/image_processing_flava.py b/src/transformers/models/flava/image_processing_flava.py index 3c19a2405169..b62717ae2cd6 100644 --- a/src/transformers/models/flava/image_processing_flava.py +++ b/src/transformers/models/flava/image_processing_flava.py @@ -57,7 +57,7 @@ LOGIT_LAPLACE_EPS: float = 0.1 -class FlavaImageProcessorKwargs(ImagesKwargs): +class FlavaImageProcessorKwargs(ImagesKwargs, total=False): """ return_image_mask (`bool`, *optional*, defaults to `False`): Whether to return the image mask. Can be overridden by the `return_image_mask` parameter in `preprocess`. @@ -118,26 +118,26 @@ class FlavaImageProcessorKwargs(ImagesKwargs): """ # Mask related params - return_image_mask: Optional[bool] - input_size_patches: Optional[int] - total_mask_patches: Optional[int] - mask_group_min_patches: Optional[int] - mask_group_max_patches: Optional[int] - mask_group_min_aspect_ratio: Optional[float] - mask_group_max_aspect_ratio: Optional[float] + return_image_mask: bool + input_size_patches: int + total_mask_patches: int + mask_group_min_patches: int + mask_group_max_patches: int + mask_group_min_aspect_ratio: float + mask_group_max_aspect_ratio: float # Codebook related params - return_codebook_pixels: Optional[bool] - codebook_do_resize: Optional[bool] - codebook_size: Optional[bool] - codebook_resample: Optional[int] - codebook_do_center_crop: Optional[bool] - codebook_crop_size: Optional[int] - codebook_do_rescale: Optional[bool] - codebook_rescale_factor: Optional[Union[int, float]] - codebook_do_map_pixels: Optional[bool] - codebook_do_normalize: Optional[bool] - codebook_image_mean: Optional[Union[float, Iterable[float]]] - codebook_image_std: Optional[Union[float, Iterable[float]]] + return_codebook_pixels: bool + codebook_do_resize: bool + codebook_size: bool + codebook_resample: int + codebook_do_center_crop: bool + codebook_crop_size: int + codebook_do_rescale: bool + codebook_rescale_factor: Union[int, float] + codebook_do_map_pixels: bool + codebook_do_normalize: bool + codebook_image_mean: Union[float, Iterable[float]] + codebook_image_std: Union[float, Iterable[float]] # Inspired from https://github.com/microsoft/unilm/blob/master/beit/masking_generator.py diff --git a/src/transformers/models/gemma3/image_processing_gemma3.py b/src/transformers/models/gemma3/image_processing_gemma3.py index 5206a13a04a3..d4bd7a00000e 100644 --- a/src/transformers/models/gemma3/image_processing_gemma3.py +++ b/src/transformers/models/gemma3/image_processing_gemma3.py @@ -51,7 +51,7 @@ import PIL -class Gemma3ImageProcessorKwargs(ImagesKwargs): +class Gemma3ImageProcessorKwargs(ImagesKwargs, total=False): """ do_pan_and_scan (`bool`, *optional*): Whether to apply `pan_and_scan` to images. @@ -63,10 +63,10 @@ class Gemma3ImageProcessorKwargs(ImagesKwargs): Minimum aspect ratio to activate pan and scan. """ - do_pan_and_scan: Optional[bool] - pan_and_scan_min_crop_size: Optional[int] - pan_and_scan_max_num_crops: Optional[int] - pan_and_scan_min_ratio_to_activate: Optional[float] + do_pan_and_scan: bool + pan_and_scan_min_crop_size: int + pan_and_scan_max_num_crops: int + pan_and_scan_min_ratio_to_activate: float class Gemma3ImageProcessor(BaseImageProcessor): diff --git a/src/transformers/models/glm4v/image_processing_glm4v.py b/src/transformers/models/glm4v/image_processing_glm4v.py index 13f4472e61f3..9a4348010750 100644 --- a/src/transformers/models/glm4v/image_processing_glm4v.py +++ b/src/transformers/models/glm4v/image_processing_glm4v.py @@ -47,7 +47,7 @@ logger = logging.get_logger(__name__) -class Glm4vImageProcessorKwargs(ImagesKwargs): +class Glm4vImageProcessorKwargs(ImagesKwargs, total=False): """ patch_size (`int`, *optional*, defaults to 14): The spatial patch size of the vision encoder. @@ -57,9 +57,9 @@ class Glm4vImageProcessorKwargs(ImagesKwargs): The merge size of the vision encoder to llm encoder. """ - patch_size: Optional[int] - temporal_patch_size: Optional[int] - merge_size: Optional[int] + patch_size: int + temporal_patch_size: int + merge_size: int def smart_resize( diff --git a/src/transformers/models/glm4v/video_processing_glm4v.py b/src/transformers/models/glm4v/video_processing_glm4v.py index 8324ad482baa..f27adfc7e25e 100644 --- a/src/transformers/models/glm4v/video_processing_glm4v.py +++ b/src/transformers/models/glm4v/video_processing_glm4v.py @@ -36,12 +36,12 @@ from .image_processing_glm4v import smart_resize -class Glm4vVideoProcessorInitKwargs(VideosKwargs): - max_image_size: Optional[dict[str, int]] - patch_size: Optional[int] - temporal_patch_size: Optional[int] - merge_size: Optional[int] - max_duration: Optional[int] +class Glm4vVideoProcessorInitKwargs(VideosKwargs, total=False): + max_image_size: dict[str, int] + patch_size: int + temporal_patch_size: int + merge_size: int + max_duration: int @add_start_docstrings( diff --git a/src/transformers/models/got_ocr2/image_processing_got_ocr2.py b/src/transformers/models/got_ocr2/image_processing_got_ocr2.py index 3424020c65b3..3fd5f7d512c1 100644 --- a/src/transformers/models/got_ocr2/image_processing_got_ocr2.py +++ b/src/transformers/models/got_ocr2/image_processing_got_ocr2.py @@ -49,7 +49,7 @@ logger = logging.get_logger(__name__) -class GotOcr2ImageProcessorKwargs(ImagesKwargs): +class GotOcr2ImageProcessorKwargs(ImagesKwargs, total=False): """ crop_to_patches (`bool`, *optional*, defaults to `False`): Whether to crop the image to patches. Can be overridden by the `crop_to_patches` parameter in the @@ -62,9 +62,9 @@ class GotOcr2ImageProcessorKwargs(ImagesKwargs): set to `True`. Can be overridden by the `max_patches` parameter in the `preprocess` method. """ - crop_to_patches: Optional[bool] - min_patches: Optional[int] - max_patches: Optional[int] + crop_to_patches: bool + min_patches: int + max_patches: int # Similar to image_processing_mllama.get_all_supported_aspect_ratios diff --git a/src/transformers/models/got_ocr2/processing_got_ocr2.py b/src/transformers/models/got_ocr2/processing_got_ocr2.py index 447122e18c22..1843b7f28830 100644 --- a/src/transformers/models/got_ocr2/processing_got_ocr2.py +++ b/src/transformers/models/got_ocr2/processing_got_ocr2.py @@ -36,13 +36,13 @@ class GotOcr2TextKwargs(TextKwargs, total=False): class GotOcr2ImagesKwargs(ImagesKwargs, total=False): - crop_to_patches: Optional[bool] - min_patches: Optional[int] - max_patches: Optional[int] + crop_to_patches: bool + min_patches: int + max_patches: int box: Optional[Union[list, tuple[float, float], tuple[float, float, float, float]]] color: Optional[str] - num_image_tokens: Optional[int] - multi_page: Optional[bool] + num_image_tokens: int + multi_page: bool class GotOcr2ProcessorKwargs(ProcessingKwargs, total=False): diff --git a/src/transformers/models/grounding_dino/image_processing_grounding_dino.py b/src/transformers/models/grounding_dino/image_processing_grounding_dino.py index c099d44e3d58..eb21ea3b376e 100644 --- a/src/transformers/models/grounding_dino/image_processing_grounding_dino.py +++ b/src/transformers/models/grounding_dino/image_processing_grounding_dino.py @@ -93,7 +93,7 @@ class AnnotationFormat(ExplicitEnum): SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC) -class GroundingDinoImageProcessorKwargs(ImagesKwargs): +class GroundingDinoImageProcessorKwargs(ImagesKwargs, total=False): r""" format (`str`, *optional*, defaults to `AnnotationFormat.COCO_DETECTION`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". @@ -109,9 +109,9 @@ class GroundingDinoImageProcessorKwargs(ImagesKwargs): Path to the directory containing the segmentation masks. """ - format: Optional[Union[str, AnnotationFormat]] - do_convert_annotations: Optional[bool] - return_segmentation_masks: Optional[bool] + format: Union[str, AnnotationFormat] + do_convert_annotations: bool + return_segmentation_masks: bool annotations: Optional[Union[AnnotationType, list[AnnotationType]]] masks_path: Optional[Union[str, pathlib.Path]] diff --git a/src/transformers/models/idefics/image_processing_idefics.py b/src/transformers/models/idefics/image_processing_idefics.py index 7fda46e3a990..870c741b826d 100644 --- a/src/transformers/models/idefics/image_processing_idefics.py +++ b/src/transformers/models/idefics/image_processing_idefics.py @@ -36,7 +36,7 @@ IDEFICS_STANDARD_STD = [0.26862954, 0.26130258, 0.27577711] -class IdeficsImageProcessorKwargs(ImagesKwargs): +class IdeficsImageProcessorKwargs(ImagesKwargs, total=False): """ transform (`Callable`, *optional*): A custom transform function that accepts a single image can be passed for training. For example, @@ -47,7 +47,7 @@ class IdeficsImageProcessorKwargs(ImagesKwargs): """ transform: Optional[Callable] - image_size: Optional[dict[str, int]] + image_size: dict[str, int] def convert_to_rgb(image): diff --git a/src/transformers/models/idefics2/image_processing_idefics2.py b/src/transformers/models/idefics2/image_processing_idefics2.py index b9b741a9704b..e068ac42f403 100644 --- a/src/transformers/models/idefics2/image_processing_idefics2.py +++ b/src/transformers/models/idefics2/image_processing_idefics2.py @@ -47,13 +47,13 @@ from PIL import Image -class Idefics2ImageProcessorKwargs(ImagesKwargs): +class Idefics2ImageProcessorKwargs(ImagesKwargs, total=False): """ do_image_splitting (`bool`, *optional*, defaults to `False`): Whether to split the image into a sequence 4 equal sub-images concatenated with the original image. """ - do_image_splitting: Optional[bool] + do_image_splitting: bool def get_resize_output_image_size(image, size, input_data_format) -> tuple[int, int]: diff --git a/src/transformers/models/idefics3/image_processing_idefics3.py b/src/transformers/models/idefics3/image_processing_idefics3.py index f098a9f54dc1..65e17ef4b776 100644 --- a/src/transformers/models/idefics3/image_processing_idefics3.py +++ b/src/transformers/models/idefics3/image_processing_idefics3.py @@ -48,7 +48,7 @@ from PIL import Image -class Idefics3ImageProcessorKwargs(ImagesKwargs): +class Idefics3ImageProcessorKwargs(ImagesKwargs, total=False): """ do_image_splitting (`bool`, *optional*, defaults to `True`): Whether to split the image into sub-images concatenated with the original image. They are split into patches @@ -59,9 +59,9 @@ class Idefics3ImageProcessorKwargs(ImagesKwargs): Whether to return the row and column information of the images. """ - do_image_splitting: Optional[bool] - max_image_size: Optional[dict[str, int]] - return_row_col_info: Optional[bool] + do_image_splitting: bool + max_image_size: dict[str, int] + return_row_col_info: bool def _resize_output_size_rescale_to_max_len( diff --git a/src/transformers/models/imagegpt/image_processing_imagegpt.py b/src/transformers/models/imagegpt/image_processing_imagegpt.py index 8f79cd58ec5f..ab7057f7d407 100644 --- a/src/transformers/models/imagegpt/image_processing_imagegpt.py +++ b/src/transformers/models/imagegpt/image_processing_imagegpt.py @@ -45,7 +45,7 @@ logger = logging.get_logger(__name__) -class ImageGPTImageProcessorKwargs(ImagesKwargs): +class ImageGPTImageProcessorKwargs(ImagesKwargs, total=False): """ clusters (`np.ndarray` or `list[list[int]]` or `torch.Tensor`, *optional*): The color clusters to use, of shape `(n_clusters, 3)` when color quantizing. Can be overridden by `clusters` @@ -56,7 +56,7 @@ class ImageGPTImageProcessorKwargs(ImagesKwargs): """ clusters: Optional[Union[np.ndarray, list[list[int]], "torch.Tensor"]] - do_color_quantize: Optional[bool] + do_color_quantize: bool def squared_euclidean_distance(a, b): diff --git a/src/transformers/models/instructblipvideo/video_processing_instructblipvideo.py b/src/transformers/models/instructblipvideo/video_processing_instructblipvideo.py index d2fe3cc7f343..f2c49925ef19 100644 --- a/src/transformers/models/instructblipvideo/video_processing_instructblipvideo.py +++ b/src/transformers/models/instructblipvideo/video_processing_instructblipvideo.py @@ -24,15 +24,11 @@ from ...image_processing_utils import BatchFeature from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling, SizeDict -from ...processing_utils import Unpack, VideosKwargs from ...utils import TensorType from ...video_processing_utils import BaseVideoProcessor from ...video_utils import group_videos_by_shape, reorder_videos -class InstructBlipVideoVideoProcessorInitKwargs(VideosKwargs): ... - - class InstructBlipVideoVideoProcessor(BaseVideoProcessor): resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN @@ -44,12 +40,8 @@ class InstructBlipVideoVideoProcessor(BaseVideoProcessor): do_normalize = True do_convert_rgb = True do_sample_frames = False # Set to False for BC, recommended to set `True` in new models - valid_kwargs = InstructBlipVideoVideoProcessorInitKwargs model_input_names = ["pixel_values"] - def __init__(self, **kwargs: Unpack[InstructBlipVideoVideoProcessorInitKwargs]): - super().__init__(**kwargs) - def _preprocess( self, videos: list["torch.Tensor"], diff --git a/src/transformers/models/internvl/video_processing_internvl.py b/src/transformers/models/internvl/video_processing_internvl.py index d16c57522d1c..a544bb08815a 100644 --- a/src/transformers/models/internvl/video_processing_internvl.py +++ b/src/transformers/models/internvl/video_processing_internvl.py @@ -27,7 +27,7 @@ from ...video_utils import VideoMetadata, group_videos_by_shape, reorder_videos -class InternVLVideoProcessorInitKwargs(VideosKwargs): +class InternVLVideoProcessorInitKwargs(VideosKwargs, total=False): initial_shift: Union[bool, float, int] diff --git a/src/transformers/models/janus/image_processing_janus.py b/src/transformers/models/janus/image_processing_janus.py index 06ea0fe0e4d1..c47461174516 100644 --- a/src/transformers/models/janus/image_processing_janus.py +++ b/src/transformers/models/janus/image_processing_janus.py @@ -51,7 +51,7 @@ logger = logging.get_logger(__name__) -class JanusImageProcessorKwargs(ImagesKwargs): +class JanusImageProcessorKwargs(ImagesKwargs, total=False): r""" min_size (`int`, *optional*, defaults to 14): The minimum allowed size for the resized image. Ensures that neither the height nor width diff --git a/src/transformers/models/janus/modular_janus.py b/src/transformers/models/janus/modular_janus.py index a2f2541d84fa..6a1742b44362 100644 --- a/src/transformers/models/janus/modular_janus.py +++ b/src/transformers/models/janus/modular_janus.py @@ -1289,7 +1289,7 @@ def generate( return generated_tokens -class JanusImageProcessorKwargs(ImagesKwargs): +class JanusImageProcessorKwargs(ImagesKwargs, total=False): r""" min_size (`int`, *optional*, defaults to 14): The minimum allowed size for the resized image. Ensures that neither the height nor width diff --git a/src/transformers/models/kosmos2/processing_kosmos2.py b/src/transformers/models/kosmos2/processing_kosmos2.py index 98f8925e8a69..f9fb98df6ac2 100644 --- a/src/transformers/models/kosmos2/processing_kosmos2.py +++ b/src/transformers/models/kosmos2/processing_kosmos2.py @@ -33,15 +33,17 @@ list[list[tuple[float, float, float]]], ] +NestedList = list[Union[Optional[int], "NestedList"]] + class Kosmos2ImagesKwargs(ImagesKwargs, total=False): - bboxes: Optional[list[float]] - num_image_tokens: Optional[int] + bboxes: Optional[NestedList] # NOTE: hub validators can't accept `Sequence` + num_image_tokens: int first_image_token_id: Optional[int] class Kosmos2TextKwargs(TextKwargs, total=False): - add_eos_token: Optional[bool] + add_eos_token: bool class Kosmos2ProcessorKwargs(ProcessingKwargs, total=False): diff --git a/src/transformers/models/kosmos2_5/image_processing_kosmos2_5.py b/src/transformers/models/kosmos2_5/image_processing_kosmos2_5.py index 5f337e4b04c9..fed17e08e1a7 100644 --- a/src/transformers/models/kosmos2_5/image_processing_kosmos2_5.py +++ b/src/transformers/models/kosmos2_5/image_processing_kosmos2_5.py @@ -46,7 +46,7 @@ DEFAULT_FONT_PATH = "ybelkada/fonts" -class Kosmos2_5ImageProcessorKwargs(ImagesKwargs): +class Kosmos2_5ImageProcessorKwargs(ImagesKwargs, total=False): r""" patch_size (`Dict[str, int]`, *optional*, defaults to `{"height": 16, "width": 16}`): The patch size to use for the image. According to Kosmos2_5 paper and code, the patch size is 16x16. @@ -55,8 +55,8 @@ class Kosmos2_5ImageProcessorKwargs(ImagesKwargs): [KOSMOS 2.5 paper](https://huggingface.co/papers/2309.11419). """ - patch_size: Optional[dict[str, int]] - max_patches: Optional[int] + patch_size: dict[str, int] + max_patches: int # Copied from transformers.models.pix2struct.image_processing_pix2struct.torch_extract_patches diff --git a/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py b/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py index d5a7e95537c5..6f53698f30b2 100644 --- a/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py @@ -52,7 +52,7 @@ logger = logging.get_logger(__name__) -class LayoutLMv2ImageProcessorKwargs(ImagesKwargs): +class LayoutLMv2ImageProcessorKwargs(ImagesKwargs, total=False): r""" apply_ocr (`bool`, *optional*, defaults to `True`): Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by @@ -66,7 +66,7 @@ class LayoutLMv2ImageProcessorKwargs(ImagesKwargs): `preprocess` method. """ - apply_ocr: Optional[bool] + apply_ocr: bool ocr_lang: Optional[str] tesseract_config: Optional[str] diff --git a/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py b/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py index b9273dc75cad..44d4b33e11d9 100644 --- a/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py @@ -56,7 +56,7 @@ logger = logging.get_logger(__name__) -class LayoutLMv3ImageProcessorKwargs(ImagesKwargs): +class LayoutLMv3ImageProcessorKwargs(ImagesKwargs, total=False): r""" apply_ocr (`bool`, *optional*, defaults to `True`): Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by @@ -70,7 +70,7 @@ class LayoutLMv3ImageProcessorKwargs(ImagesKwargs): `preprocess` method. """ - apply_ocr: Optional[bool] + apply_ocr: bool ocr_lang: Optional[str] tesseract_config: Optional[str] diff --git a/src/transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py b/src/transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py index ad99504fcad6..85d8fcd11b92 100755 --- a/src/transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py +++ b/src/transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py @@ -14,7 +14,7 @@ # limitations under the License. import math from functools import lru_cache -from typing import Optional, Union +from typing import Union import torch from torchvision.transforms.v2 import functional as F @@ -169,24 +169,24 @@ def pad_along_first_dim( return images, pixel_mask -class Lfm2VlImageProcessorKwargs(ImagesKwargs): +class Lfm2VlImageProcessorKwargs(ImagesKwargs, total=False): """ downsample_factor (`int`, *optional*, defaults to `2`): The downsampling factor for images used when resizing the image. """ - downsample_factor: Optional[int] - do_image_splitting: Optional[bool] - min_tiles: Optional[int] - max_tiles: Optional[int] - use_thumbnail: Optional[bool] - min_image_tokens: Optional[int] - max_image_tokens: Optional[int] - encoder_patch_size: Optional[int] - tile_size: Optional[int] - max_pixels_tolerance: Optional[float] - do_pad: Optional[bool] - return_row_col_info: Optional[bool] + downsample_factor: int + do_image_splitting: bool + min_tiles: int + max_tiles: int + use_thumbnail: bool + min_image_tokens: int + max_image_tokens: int + encoder_patch_size: int + tile_size: int + max_pixels_tolerance: float + do_pad: bool + return_row_col_info: bool @auto_docstring diff --git a/src/transformers/models/lfm2_vl/processing_lfm2_vl.py b/src/transformers/models/lfm2_vl/processing_lfm2_vl.py index 037e3cbc25b2..311dfdc3b123 100755 --- a/src/transformers/models/lfm2_vl/processing_lfm2_vl.py +++ b/src/transformers/models/lfm2_vl/processing_lfm2_vl.py @@ -18,9 +18,9 @@ from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput, make_nested_list_of_images from ...processing_utils import ( - ImagesKwargs, ProcessingKwargs, ProcessorMixin, + TextKwargs, Unpack, ) from ...tokenization_utils_base import BatchEncoding, TextInput @@ -30,25 +30,12 @@ logger = logging.get_logger(__name__) -class Lfm2VlImagesKwargs(ImagesKwargs, total=False): - downsample_factor: Optional[int] - do_image_splitting: Optional[bool] - min_tiles: Optional[int] - max_tiles: Optional[int] - use_thumbnail: Optional[bool] - min_image_tokens: Optional[int] - max_image_tokens: Optional[int] - encoder_patch_size: Optional[int] - tile_size: Optional[int] - max_pixels_tolerance: Optional[float] - patch_size: Optional[int] - do_pad: Optional[bool] - return_row_col_info: Optional[bool] +class Lfm2VlTextKwargs(TextKwargs, total=False): + use_image_special_tokens: Optional[bool] class Lfm2VlProcessorKwargs(ProcessingKwargs, total=False): - images_kwargs: Lfm2VlImagesKwargs - + text_kwargs: Lfm2VlTextKwargs _defaults = { "images_kwargs": { "return_row_col_info": True, @@ -75,8 +62,6 @@ class Lfm2VlProcessor(ProcessorMixin): An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. - use_image_special_tokens (`bool`, *optional*, defaults to `True`): - Whether to use image special tokens or not when processing. """ attributes = ["image_processor", "tokenizer"] @@ -88,12 +73,10 @@ def __init__( image_processor, tokenizer, chat_template: Optional[str] = None, - use_image_special_tokens: Optional[bool] = True, **kwargs, ): self.image_token = tokenizer.image_token self.image_token_id = tokenizer.image_token_id - self.use_image_special_tokens = use_image_special_tokens self.image_start_token = tokenizer.image_start_token self.image_end_token = tokenizer.image_end_token self.image_thumbnail_token = tokenizer.image_thumbnail diff --git a/src/transformers/models/llama4/image_processing_llama4_fast.py b/src/transformers/models/llama4/image_processing_llama4_fast.py index e2678f556d02..ccbb60585b0b 100644 --- a/src/transformers/models/llama4/image_processing_llama4_fast.py +++ b/src/transformers/models/llama4/image_processing_llama4_fast.py @@ -308,7 +308,7 @@ def get_best_fit( return optimal_canvas -class Llama4ImageProcessorKwargs(ImagesKwargs): +class Llama4ImageProcessorKwargs(ImagesKwargs, total=False): r""" max_patches (`int`, *optional*, defaults to 16): The maximum number of patches to be extracted from the image. @@ -320,8 +320,8 @@ class Llama4ImageProcessorKwargs(ImagesKwargs): but never upsample, unless the image is smaller than the patch size. """ - max_patches: Optional[int] - resize_to_max_canvas: Optional[bool] + max_patches: int + resize_to_max_canvas: bool @auto_docstring diff --git a/src/transformers/models/llava_next/image_processing_llava_next.py b/src/transformers/models/llava_next/image_processing_llava_next.py index 56ebc10f391d..c4bc1ed07287 100644 --- a/src/transformers/models/llava_next/image_processing_llava_next.py +++ b/src/transformers/models/llava_next/image_processing_llava_next.py @@ -59,7 +59,7 @@ from PIL import Image -class LlavaNextImageProcessorKwargs(ImagesKwargs): +class LlavaNextImageProcessorKwargs(ImagesKwargs, total=False): r""" image_grid_pinpoints (`list[list[int]]`, *optional*): A list of possible resolutions to use for processing high resolution images. The best resolution is selected @@ -67,7 +67,7 @@ class LlavaNextImageProcessorKwargs(ImagesKwargs): method. """ - image_grid_pinpoints: Optional[list[list[int]]] + image_grid_pinpoints: list[list[int]] def divide_to_patches(image: np.ndarray, patch_size: int, input_data_format) -> list[np.ndarray]: diff --git a/src/transformers/models/llava_onevision/image_processing_llava_onevision.py b/src/transformers/models/llava_onevision/image_processing_llava_onevision.py index 119df9550a2a..4b0f399e4959 100644 --- a/src/transformers/models/llava_onevision/image_processing_llava_onevision.py +++ b/src/transformers/models/llava_onevision/image_processing_llava_onevision.py @@ -58,7 +58,7 @@ from PIL import Image -class LlavaOnevisionImageProcessorKwargs(ImagesKwargs): +class LlavaOnevisionImageProcessorKwargs(ImagesKwargs, total=False): r""" image_grid_pinpoints (`list[list[int]]`, *optional*): A list of possible resolutions to use for processing high resolution images. The best resolution is selected @@ -66,7 +66,7 @@ class LlavaOnevisionImageProcessorKwargs(ImagesKwargs): method. """ - image_grid_pinpoints: Optional[list[list[int]]] + image_grid_pinpoints: list[list[int]] # Copied from transformers.models.llava_next.image_processing_llava_next.divide_to_patches diff --git a/src/transformers/models/llava_onevision/image_processing_llava_onevision_fast.py b/src/transformers/models/llava_onevision/image_processing_llava_onevision_fast.py index b309583461ce..b80b2b76b1a7 100644 --- a/src/transformers/models/llava_onevision/image_processing_llava_onevision_fast.py +++ b/src/transformers/models/llava_onevision/image_processing_llava_onevision_fast.py @@ -76,8 +76,7 @@ def preprocess(self, images: ImageInput, **kwargs: Unpack[LlavaOnevisionImagePro batch_num_images = [1] * len(images) else: batch_num_images = [1] - kwargs["batch_num_images"] = batch_num_images - return super().preprocess(images, **kwargs) + return super().preprocess(images, batch_num_images, **kwargs) def _resize_for_patching( self, @@ -202,6 +201,7 @@ def _pad_for_batching( def _preprocess( self, images: list["torch.Tensor"], + batch_num_images: list[int], do_resize: bool, size: SizeDict, image_grid_pinpoints: list[list[int]], @@ -214,7 +214,6 @@ def _preprocess( image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], do_pad: bool, - batch_num_images: list[int], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs, diff --git a/src/transformers/models/llava_onevision/modular_llava_onevision.py b/src/transformers/models/llava_onevision/modular_llava_onevision.py index 890fcdd7ecaa..88d1c10ab122 100644 --- a/src/transformers/models/llava_onevision/modular_llava_onevision.py +++ b/src/transformers/models/llava_onevision/modular_llava_onevision.py @@ -35,7 +35,7 @@ from ...cache_utils import Cache from ...image_processing_utils import BatchFeature -from ...image_processing_utils_fast import group_images_by_shape, reorder_images +from ...image_processing_utils_fast import BaseImageProcessorFast, group_images_by_shape, reorder_images from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, @@ -128,12 +128,12 @@ def preprocess(self, images: ImageInput, **kwargs: Unpack[LlavaOnevisionImagePro batch_num_images = [1] * len(images) else: batch_num_images = [1] - kwargs["batch_num_images"] = batch_num_images - return super().preprocess(images, **kwargs) + return BaseImageProcessorFast.preprocess(images, batch_num_images, **kwargs) def _preprocess( self, images: list["torch.Tensor"], + batch_num_images: list[int], do_resize: bool, size: SizeDict, image_grid_pinpoints: list[list[int]], @@ -146,7 +146,6 @@ def _preprocess( image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], do_pad: bool, - batch_num_images: list[int], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs, diff --git a/src/transformers/models/mask2former/image_processing_mask2former.py b/src/transformers/models/mask2former/image_processing_mask2former.py index ce79107f05b3..79b449eae416 100644 --- a/src/transformers/models/mask2former/image_processing_mask2former.py +++ b/src/transformers/models/mask2former/image_processing_mask2former.py @@ -61,7 +61,7 @@ from torch import nn -class Mask2FormerImageProcessorKwargs(ImagesKwargs): +class Mask2FormerImageProcessorKwargs(ImagesKwargs, total=False): r""" ignore_index (`int`, *optional*): Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels @@ -74,9 +74,9 @@ class Mask2FormerImageProcessorKwargs(ImagesKwargs): The number of labels in the segmentation map. """ - size_divisor: Optional[int] + size_divisor: int ignore_index: Optional[int] - do_reduce_labels: Optional[bool] + do_reduce_labels: bool num_labels: Optional[int] diff --git a/src/transformers/models/maskformer/image_processing_maskformer.py b/src/transformers/models/maskformer/image_processing_maskformer.py index 60e703405605..7d83809ced66 100644 --- a/src/transformers/models/maskformer/image_processing_maskformer.py +++ b/src/transformers/models/maskformer/image_processing_maskformer.py @@ -67,7 +67,7 @@ from torch import nn -class MaskFormerImageProcessorKwargs(ImagesKwargs): +class MaskFormerImageProcessorKwargs(ImagesKwargs, total=False): r""" ignore_index (`int`, *optional*): Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels @@ -80,9 +80,9 @@ class MaskFormerImageProcessorKwargs(ImagesKwargs): The number of labels in the segmentation map. """ - size_divisor: Optional[int] + size_divisor: int ignore_index: Optional[int] - do_reduce_labels: Optional[bool] + do_reduce_labels: bool num_labels: Optional[int] diff --git a/src/transformers/models/mllama/image_processing_mllama.py b/src/transformers/models/mllama/image_processing_mllama.py index cd79f7de3121..1a1d76774868 100644 --- a/src/transformers/models/mllama/image_processing_mllama.py +++ b/src/transformers/models/mllama/image_processing_mllama.py @@ -50,13 +50,13 @@ logger = logging.get_logger(__name__) -class MllamaImageProcessorKwargs(ImagesKwargs): +class MllamaImageProcessorKwargs(ImagesKwargs, total=False): """ max_image_tiles (`int`, *optional*): The maximum number of tiles allowed. """ - max_image_tiles: Optional[int] + max_image_tiles: int @lru_cache(maxsize=10) diff --git a/src/transformers/models/mllama/processing_mllama.py b/src/transformers/models/mllama/processing_mllama.py index 3955006a4f9e..53bf4cc210a0 100644 --- a/src/transformers/models/mllama/processing_mllama.py +++ b/src/transformers/models/mllama/processing_mllama.py @@ -258,9 +258,7 @@ def __call__( tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) - return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None) - images_kwargs = output_kwargs["images_kwargs"] data = {} if text is not None: @@ -306,7 +304,7 @@ def __call__( ) if images is not None: - image_features = self.image_processor(images, **images_kwargs) + image_features = self.image_processor(images, **output_kwargs["images_kwargs"]) num_tiles = image_features.pop("num_tiles") data.update(image_features) diff --git a/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py b/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py index e8dfe992544a..876d9c6be444 100644 --- a/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py +++ b/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py @@ -51,7 +51,7 @@ logger = logging.get_logger(__name__) -class MobileNetV2ImageProcessorKwargs(ImagesKwargs): +class MobileNetV2ImageProcessorKwargs(ImagesKwargs, total=False): """ do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 @@ -59,7 +59,7 @@ class MobileNetV2ImageProcessorKwargs(ImagesKwargs): ADE20k). The background label will be replaced by 255. """ - do_reduce_labels: Optional[bool] + do_reduce_labels: bool @requires(backends=("vision",)) diff --git a/src/transformers/models/mobilevit/image_processing_mobilevit.py b/src/transformers/models/mobilevit/image_processing_mobilevit.py index 576ef9f449dc..0a9b6bc64423 100644 --- a/src/transformers/models/mobilevit/image_processing_mobilevit.py +++ b/src/transformers/models/mobilevit/image_processing_mobilevit.py @@ -53,7 +53,7 @@ logger = logging.get_logger(__name__) -class MobileVitImageProcessorKwargs(ImagesKwargs): +class MobileVitImageProcessorKwargs(ImagesKwargs, total=False): """ do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`): Whether to flip the color channels from RGB to BGR or vice versa. @@ -63,8 +63,8 @@ class MobileVitImageProcessorKwargs(ImagesKwargs): ADE20k). The background label will be replaced by 255. """ - do_flip_channel_order: Optional[bool] - do_reduce_labels: Optional[bool] + do_flip_channel_order: bool + do_reduce_labels: bool @requires(backends=("vision",)) diff --git a/src/transformers/models/nougat/image_processing_nougat.py b/src/transformers/models/nougat/image_processing_nougat.py index 0a5c445645e0..a9178ab43e07 100644 --- a/src/transformers/models/nougat/image_processing_nougat.py +++ b/src/transformers/models/nougat/image_processing_nougat.py @@ -52,7 +52,7 @@ import PIL -class NougatImageProcessorKwargs(ImagesKwargs): +class NougatImageProcessorKwargs(ImagesKwargs, total=False): r""" do_crop_margin (`bool`, *optional*, defaults to `True`): Whether to crop the image margins. @@ -62,9 +62,9 @@ class NougatImageProcessorKwargs(ImagesKwargs): Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. """ - do_crop_margin: Optional[bool] - do_thumbnail: Optional[bool] - do_align_long_axis: Optional[bool] + do_crop_margin: bool + do_thumbnail: bool + do_align_long_axis: bool class NougatImageProcessor(BaseImageProcessor): diff --git a/src/transformers/models/oneformer/image_processing_oneformer.py b/src/transformers/models/oneformer/image_processing_oneformer.py index 86ce8abf084e..00d4989fdf28 100644 --- a/src/transformers/models/oneformer/image_processing_oneformer.py +++ b/src/transformers/models/oneformer/image_processing_oneformer.py @@ -64,7 +64,7 @@ from torch import nn -class OneFormerImageProcessorKwargs(ImagesKwargs): +class OneFormerImageProcessorKwargs(ImagesKwargs, total=False): r""" repo_path (`str`, *optional*, defaults to `shi-labs/oneformer_demo`): Path to a local directory or Hugging Face Hub repository containing model metadata. @@ -85,7 +85,7 @@ class OneFormerImageProcessorKwargs(ImagesKwargs): num_text: Optional[int] num_labels: Optional[int] ignore_index: Optional[int] - do_reduce_labels: Optional[bool] + do_reduce_labels: bool # Copied from transformers.models.detr.image_processing_detr.max_across_indices diff --git a/src/transformers/models/ovis2/image_processing_ovis2.py b/src/transformers/models/ovis2/image_processing_ovis2.py index 2bc883f95e73..4598e9f3f521 100644 --- a/src/transformers/models/ovis2/image_processing_ovis2.py +++ b/src/transformers/models/ovis2/image_processing_ovis2.py @@ -44,7 +44,7 @@ logger = logging.get_logger(__name__) -class Ovis2ImageProcessorKwargs(ImagesKwargs): +class Ovis2ImageProcessorKwargs(ImagesKwargs, total=False): """ crop_to_patches (`bool`, *optional*, defaults to `False`): Whether to crop the image to patches. Can be overridden by the `crop_to_patches` parameter in the @@ -61,10 +61,10 @@ class Ovis2ImageProcessorKwargs(ImagesKwargs): `preprocess` method. """ - crop_to_patches: Optional[bool] - min_patches: Optional[int] - max_patches: Optional[int] - use_covering_area_grid: Optional[bool] + crop_to_patches: bool + min_patches: int + max_patches: int + use_covering_area_grid: bool # Similar to image_processing_mllama.get_all_supported_aspect_ratios diff --git a/src/transformers/models/perception_lm/image_processing_perception_lm_fast.py b/src/transformers/models/perception_lm/image_processing_perception_lm_fast.py index c6491b4bc703..03ff515e63af 100644 --- a/src/transformers/models/perception_lm/image_processing_perception_lm_fast.py +++ b/src/transformers/models/perception_lm/image_processing_perception_lm_fast.py @@ -42,7 +42,7 @@ ) -class PerceptionLMImageProcessorKwargs(ImagesKwargs): +class PerceptionLMImageProcessorKwargs(ImagesKwargs, total=False): r""" vision_input_type (`str`, *optional*, defaults to `"thumb+tile"`): Vision processing strategy. `"thumb+tile"` uses both thumbnails and multiple tiles for @@ -54,8 +54,8 @@ class PerceptionLMImageProcessorKwargs(ImagesKwargs): """ vision_input_type: Optional[str] - tile_size: Optional[int] - max_num_tiles: Optional[int] + tile_size: int + max_num_tiles: int @auto_docstring @@ -68,7 +68,7 @@ class PerceptionLMImageProcessorFast(BaseImageProcessorFast): do_rescale = True do_normalize = True do_convert_rgb = True - vision_input_type = "thumb+tail" + vision_input_type = "thumb+tile" tile_size = 448 max_num_tiles = 36 size = {"width": 448, "height": 448} # for backward compatibility in tests diff --git a/src/transformers/models/phi4_multimodal/image_processing_phi4_multimodal_fast.py b/src/transformers/models/phi4_multimodal/image_processing_phi4_multimodal_fast.py index 33b10915354f..98f160a1fd5e 100644 --- a/src/transformers/models/phi4_multimodal/image_processing_phi4_multimodal_fast.py +++ b/src/transformers/models/phi4_multimodal/image_processing_phi4_multimodal_fast.py @@ -35,7 +35,7 @@ logger = logging.get_logger(__name__) -class Phi4MultimodalImageProcessorKwargs(ImagesKwargs): +class Phi4MultimodalImageProcessorKwargs(ImagesKwargs, total=False): r""" patch_size (`int`, *optional*): The size of the patch. @@ -43,8 +43,8 @@ class Phi4MultimodalImageProcessorKwargs(ImagesKwargs): The maximum number of crops per image. """ - patch_size: Optional[int] - dynamic_hd: Optional[int] + patch_size: int + dynamic_hd: int @auto_docstring diff --git a/src/transformers/models/pix2struct/image_processing_pix2struct.py b/src/transformers/models/pix2struct/image_processing_pix2struct.py index e0c630369029..3ec36ebda440 100644 --- a/src/transformers/models/pix2struct/image_processing_pix2struct.py +++ b/src/transformers/models/pix2struct/image_processing_pix2struct.py @@ -49,7 +49,7 @@ DEFAULT_FONT_PATH = "ybelkada/fonts" -class Pix2StructImageProcessorKwargs(ImagesKwargs): +class Pix2StructImageProcessorKwargs(ImagesKwargs, total=False): """ max_patches (`int`, *optional*): Maximum number of patches to extract. @@ -57,7 +57,7 @@ class Pix2StructImageProcessorKwargs(ImagesKwargs): Text to render as a header. Only has an effect if `image_processor.is_vqa` is `True`. """ - max_patches: Optional[int] + max_patches: int header_text: Optional[Union[list[str], str]] diff --git a/src/transformers/models/pixtral/image_processing_pixtral.py b/src/transformers/models/pixtral/image_processing_pixtral.py index f5df895e66a4..3cbfaeb41922 100644 --- a/src/transformers/models/pixtral/image_processing_pixtral.py +++ b/src/transformers/models/pixtral/image_processing_pixtral.py @@ -50,13 +50,13 @@ import PIL -class PixtralImageProcessorKwargs(ImagesKwargs): +class PixtralImageProcessorKwargs(ImagesKwargs, total=False): """ - patch_size (`dict[str, int]` *optional*, defaults to `{"height": 16, "width": 16}`): + patch_size (`Union[dict[str, int], int]` *optional*, defaults to `{"height": 16, "width": 16}`): Size of the patches in the model, used to calculate the output image size. Can be overridden by `patch_size` in the `preprocess` method. """ - patch_size: Optional[dict[str, int]] + patch_size: Union[dict[str, int], int] # Adapted from function in image_transforms.py to ensure any transparent pixels are converted to white. diff --git a/src/transformers/models/poolformer/image_processing_poolformer.py b/src/transformers/models/poolformer/image_processing_poolformer.py index 7d03f8281285..8d466739638d 100644 --- a/src/transformers/models/poolformer/image_processing_poolformer.py +++ b/src/transformers/models/poolformer/image_processing_poolformer.py @@ -48,13 +48,13 @@ logger = logging.get_logger(__name__) -class PoolFormerImageProcessorKwargs(ImagesKwargs): +class PoolFormerImageProcessorKwargs(ImagesKwargs, total=False): r""" crop_pct (`float`, *optional*, defaults to `self.crop_pct`): Percentage of the image to crop. Only has an effect if `do_resize` is set to `True`. """ - crop_pct: Optional[float] + crop_pct: float class PoolFormerImageProcessor(BaseImageProcessor): diff --git a/src/transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything.py b/src/transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything.py index b8220a30fa42..b62ba7994f0a 100644 --- a/src/transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything.py +++ b/src/transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything.py @@ -54,7 +54,7 @@ logger = logging.get_logger(__name__) -class PromptDepthAnythingImageProcessorKwargs(ImagesKwargs): +class PromptDepthAnythingImageProcessorKwargs(ImagesKwargs, total=False): r""" keep_aspect_ratio (`bool`, *optional*): If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. @@ -64,10 +64,10 @@ class PromptDepthAnythingImageProcessorKwargs(ImagesKwargs): Scale factor to convert the prompt depth to meters. """ - keep_aspect_ratio: Optional[bool] - ensure_multiple_of: Optional[int] - size_divisor: Optional[int] - prompt_scale_to_meter: Optional[float] + keep_aspect_ratio: bool + ensure_multiple_of: int + size_divisor: int + prompt_scale_to_meter: float def _constrain_to_multiple_of(val, multiple, min_val=0, max_val=None): diff --git a/src/transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py b/src/transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py index 95f687e1414a..ea60155999e6 100644 --- a/src/transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py +++ b/src/transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py @@ -32,17 +32,17 @@ # Redefine kwargs for videos because Qwen-Omni uses some kwargs for processing omni # and does not use them in video processor class -class Qwen2_5_OmniVideosKwargs(VideosKwargs): - min_pixels: Optional[int] - max_pixels: Optional[int] - patch_size: Optional[int] - temporal_patch_size: Optional[int] - merge_size: Optional[int] - min_frames: Optional[int] - max_frames: Optional[int] - use_audio_in_video: Optional[bool] - seconds_per_chunk: Optional[float] - position_id_per_seconds: Optional[int] +class Qwen2_5_OmniVideosKwargs(VideosKwargs, total=False): + min_pixels: int + max_pixels: int + patch_size: int + temporal_patch_size: int + merge_size: int + min_frames: int + max_frames: int + use_audio_in_video: bool + seconds_per_chunk: float + position_id_per_seconds: Union[int, float] class Qwen2_5OmniProcessorKwargs(ProcessingKwargs, total=False): diff --git a/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py b/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py index fe218bd05b9d..e5a1e0a7551e 100644 --- a/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py +++ b/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py @@ -52,7 +52,7 @@ logger = logging.get_logger(__name__) -class Qwen2VLImageProcessorKwargs(ImagesKwargs): +class Qwen2VLImageProcessorKwargs(ImagesKwargs, total=False): r""" min_pixels (`int`, *optional*, defaults to `56 * 56`): The min pixels of the image to resize the image. @@ -66,11 +66,11 @@ class Qwen2VLImageProcessorKwargs(ImagesKwargs): The merge size of the vision encoder to llm encoder. """ - min_pixels: Optional[int] - max_pixels: Optional[int] - patch_size: Optional[int] - temporal_patch_size: Optional[int] - merge_size: Optional[int] + min_pixels: int + max_pixels: int + patch_size: int + temporal_patch_size: int + merge_size: int def smart_resize( diff --git a/src/transformers/models/qwen2_vl/video_processing_qwen2_vl.py b/src/transformers/models/qwen2_vl/video_processing_qwen2_vl.py index 84bcd827f02e..11b5ff80dade 100644 --- a/src/transformers/models/qwen2_vl/video_processing_qwen2_vl.py +++ b/src/transformers/models/qwen2_vl/video_processing_qwen2_vl.py @@ -41,14 +41,14 @@ from .image_processing_qwen2_vl import smart_resize -class Qwen2VLVideoProcessorInitKwargs(VideosKwargs): - min_pixels: Optional[int] - max_pixels: Optional[int] - patch_size: Optional[int] - temporal_patch_size: Optional[int] - merge_size: Optional[int] - min_frames: Optional[int] - max_frames: Optional[int] +class Qwen2VLVideoProcessorInitKwargs(VideosKwargs, total=False): + min_pixels: int + max_pixels: int + patch_size: int + temporal_patch_size: int + merge_size: int + min_frames: int + max_frames: int @add_start_docstrings( diff --git a/src/transformers/models/qwen3_omni_moe/processing_qwen3_omni_moe.py b/src/transformers/models/qwen3_omni_moe/processing_qwen3_omni_moe.py index a118f7d2260b..df5629931fa3 100644 --- a/src/transformers/models/qwen3_omni_moe/processing_qwen3_omni_moe.py +++ b/src/transformers/models/qwen3_omni_moe/processing_qwen3_omni_moe.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import re -from typing import Optional +from typing import Union import numpy as np @@ -34,17 +34,17 @@ # Redefine kwargs for videos because Qwen-Omni uses some kwargs for processing omni # and does not use them in video processor class -class Qwen3OmniMoeVideosKwargs(VideosKwargs): - min_pixels: Optional[int] - max_pixels: Optional[int] - patch_size: Optional[int] - temporal_patch_size: Optional[int] - merge_size: Optional[int] - min_frames: Optional[int] - max_frames: Optional[int] - use_audio_in_video: Optional[bool] - seconds_per_chunk: Optional[float] - position_id_per_seconds: Optional[int] +class Qwen3OmniMoeVideosKwargs(VideosKwargs, total=False): + min_pixels: int + max_pixels: int + patch_size: int + temporal_patch_size: int + merge_size: int + min_frames: int + max_frames: int + use_audio_in_video: bool + seconds_per_chunk: float + position_id_per_seconds: Union[int, float] class Qwen3OmniMoeProcessorKwargs(ProcessingKwargs, total=False): diff --git a/src/transformers/models/qwen3_vl/video_processing_qwen3_vl.py b/src/transformers/models/qwen3_vl/video_processing_qwen3_vl.py index c4648788c9dc..e74f55b642dd 100644 --- a/src/transformers/models/qwen3_vl/video_processing_qwen3_vl.py +++ b/src/transformers/models/qwen3_vl/video_processing_qwen3_vl.py @@ -64,12 +64,12 @@ def smart_resize( return h_bar, w_bar -class Qwen3VLVideoProcessorInitKwargs(VideosKwargs): - patch_size: Optional[int] - temporal_patch_size: Optional[int] - merge_size: Optional[int] - min_frames: Optional[int] - max_frames: Optional[int] +class Qwen3VLVideoProcessorInitKwargs(VideosKwargs, total=False): + patch_size: int + temporal_patch_size: int + merge_size: int + min_frames: int + max_frames: int @add_start_docstrings( diff --git a/src/transformers/models/rt_detr/image_processing_rt_detr.py b/src/transformers/models/rt_detr/image_processing_rt_detr.py index b3c77a8920cd..b366ca62fabf 100644 --- a/src/transformers/models/rt_detr/image_processing_rt_detr.py +++ b/src/transformers/models/rt_detr/image_processing_rt_detr.py @@ -68,7 +68,7 @@ SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION,) -class RTDetrImageProcessorKwargs(ImagesKwargs): +class RTDetrImageProcessorKwargs(ImagesKwargs, total=False): r""" format (`str`, *optional*, defaults to `AnnotationFormat.COCO_DETECTION`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". @@ -84,9 +84,9 @@ class RTDetrImageProcessorKwargs(ImagesKwargs): Path to the directory containing the segmentation masks. """ - format: Optional[Union[str, AnnotationFormat]] - do_convert_annotations: Optional[bool] - return_segmentation_masks: Optional[bool] + format: Union[str, AnnotationFormat] + do_convert_annotations: bool + return_segmentation_masks: bool annotations: Optional[Union[AnnotationType, list[AnnotationType]]] masks_path: Optional[Union[str, pathlib.Path]] diff --git a/src/transformers/models/sam/image_processing_sam.py b/src/transformers/models/sam/image_processing_sam.py index e9da260a6e9c..eb2615b3e963 100644 --- a/src/transformers/models/sam/image_processing_sam.py +++ b/src/transformers/models/sam/image_processing_sam.py @@ -58,7 +58,7 @@ logger = logging.get_logger(__name__) -class SamImageProcessorKwargs(ImagesKwargs): +class SamImageProcessorKwargs(ImagesKwargs, total=False): r""" mask_size (`dict[str, int]`, *optional*): The size `{"longest_edge": int}` to resize the segmentation maps to. @@ -67,8 +67,8 @@ class SamImageProcessorKwargs(ImagesKwargs): map size provided for preprocessing. """ - mask_size: Optional[dict[str, int]] - mask_pad_size: Optional[dict[str, int]] + mask_size: dict[str, int] + mask_pad_size: dict[str, int] class SamImageProcessor(BaseImageProcessor): diff --git a/src/transformers/models/sam/processing_sam.py b/src/transformers/models/sam/processing_sam.py index bc82daf2034d..d6cdd2ab2653 100644 --- a/src/transformers/models/sam/processing_sam.py +++ b/src/transformers/models/sam/processing_sam.py @@ -31,14 +31,14 @@ import torch -class SamImagesKwargs(ImagesKwargs): +class SamImagesKwargs(ImagesKwargs, total=False): segmentation_maps: Optional[ImageInput] input_points: Optional[list[list[float]]] input_labels: Optional[list[list[int]]] input_boxes: Optional[list[list[list[float]]]] - point_pad_value: Optional[int] - mask_size: Optional[dict[str, int]] - mask_pad_size: Optional[dict[str, int]] + point_pad_value: int + mask_size: dict[str, int] + mask_pad_size: dict[str, int] class SamProcessorKwargs(ProcessingKwargs, total=False): diff --git a/src/transformers/models/sam2/image_processing_sam2_fast.py b/src/transformers/models/sam2/image_processing_sam2_fast.py index c468f6400d54..014354d8c642 100644 --- a/src/transformers/models/sam2/image_processing_sam2_fast.py +++ b/src/transformers/models/sam2/image_processing_sam2_fast.py @@ -43,13 +43,13 @@ from ...utils import TensorType, auto_docstring -class Sam2FastImageProcessorKwargs(ImagesKwargs): +class Sam2FastImageProcessorKwargs(ImagesKwargs, total=False): r""" mask_size (`dict[str, int]`, *optional*): The size `{"height": int, "width": int}` to resize the segmentation maps to. """ - mask_size: Optional[dict[str, int]] + mask_size: dict[str, int] def _compute_stability_score(masks: "torch.Tensor", mask_threshold: float, stability_score_offset: int): diff --git a/src/transformers/models/sam2/modular_sam2.py b/src/transformers/models/sam2/modular_sam2.py index 8fcfe36a759e..d451fc946e6d 100644 --- a/src/transformers/models/sam2/modular_sam2.py +++ b/src/transformers/models/sam2/modular_sam2.py @@ -70,13 +70,13 @@ logger = logging.get_logger(__name__) -class Sam2FastImageProcessorKwargs(ImagesKwargs): +class Sam2FastImageProcessorKwargs(ImagesKwargs, total=False): r""" mask_size (`dict[str, int]`, *optional*): The size `{"height": int, "width": int}` to resize the segmentation maps to. """ - mask_size: Optional[dict[str, int]] + mask_size: dict[str, int] @auto_docstring diff --git a/src/transformers/models/sam_hq/processing_samhq.py b/src/transformers/models/sam_hq/processing_samhq.py index f2852b8623c4..d0b11ab06146 100644 --- a/src/transformers/models/sam_hq/processing_samhq.py +++ b/src/transformers/models/sam_hq/processing_samhq.py @@ -31,14 +31,14 @@ import torch -class SamHQImagesKwargs(ImagesKwargs): +class SamHQImagesKwargs(ImagesKwargs, total=False): segmentation_maps: Optional[ImageInput] input_points: Optional[list[list[float]]] input_labels: Optional[list[list[int]]] input_boxes: Optional[list[list[list[float]]]] point_pad_value: Optional[int] - mask_size: Optional[dict[str, int]] - mask_pad_size: Optional[dict[str, int]] + mask_size: dict[str, int] + mask_pad_size: dict[str, int] class SamHQProcessorKwargs(ProcessingKwargs, total=False): diff --git a/src/transformers/models/segformer/image_processing_segformer.py b/src/transformers/models/segformer/image_processing_segformer.py index ce9ace8115a4..ede9d589294b 100644 --- a/src/transformers/models/segformer/image_processing_segformer.py +++ b/src/transformers/models/segformer/image_processing_segformer.py @@ -55,7 +55,7 @@ logger = logging.get_logger(__name__) -class SegformerImageProcessorKwargs(ImagesKwargs): +class SegformerImageProcessorKwargs(ImagesKwargs, total=False): r""" do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 @@ -63,7 +63,7 @@ class SegformerImageProcessorKwargs(ImagesKwargs): ADE20k). The background label will be replaced by 255. """ - do_reduce_labels: Optional[bool] + do_reduce_labels: bool @requires(backends=("vision",)) diff --git a/src/transformers/models/siglip2/image_processing_siglip2.py b/src/transformers/models/siglip2/image_processing_siglip2.py index caff1bce0bc9..85063fc9078a 100644 --- a/src/transformers/models/siglip2/image_processing_siglip2.py +++ b/src/transformers/models/siglip2/image_processing_siglip2.py @@ -48,7 +48,7 @@ from PIL import Image -class Siglip2ImageProcessorKwargs(ImagesKwargs): +class Siglip2ImageProcessorKwargs(ImagesKwargs, total=False): """ patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch the image will be split to. @@ -57,8 +57,8 @@ class Siglip2ImageProcessorKwargs(ImagesKwargs): and then padded in "patch" dimension to match this number exactly. """ - patch_size: Optional[int] - max_num_patches: Optional[int] + patch_size: int + max_num_patches: int @lru_cache(maxsize=256) diff --git a/src/transformers/models/smolvlm/image_processing_smolvlm.py b/src/transformers/models/smolvlm/image_processing_smolvlm.py index e231c1ec6b07..a946cc0c191b 100644 --- a/src/transformers/models/smolvlm/image_processing_smolvlm.py +++ b/src/transformers/models/smolvlm/image_processing_smolvlm.py @@ -53,7 +53,7 @@ logger = logging.get_logger(__name__) -class SmolVLMImageProcessorKwargs(ImagesKwargs): +class SmolVLMImageProcessorKwargs(ImagesKwargs, total=False): """ do_image_splitting (`bool`, *optional*, defaults to `True`): Whether to split the image into sub-images concatenated with the original image. They are split into patches @@ -64,9 +64,9 @@ class SmolVLMImageProcessorKwargs(ImagesKwargs): Whether to return the row and column information of the images. """ - do_image_splitting: Optional[bool] - max_image_size: Optional[dict[str, int]] - return_row_col_info: Optional[bool] + do_image_splitting: bool + max_image_size: dict[str, int] + return_row_col_info: bool MAX_IMAGE_SIZE = 4096 # 4k resolution as absolute maximum diff --git a/src/transformers/models/smolvlm/video_processing_smolvlm.py b/src/transformers/models/smolvlm/video_processing_smolvlm.py index ce73dfb4a82e..09751486f0ae 100644 --- a/src/transformers/models/smolvlm/video_processing_smolvlm.py +++ b/src/transformers/models/smolvlm/video_processing_smolvlm.py @@ -90,8 +90,8 @@ def get_resize_output_image_size( return height, width -class SmolVLMVideoProcessorInitKwargs(VideosKwargs): - max_image_size: Optional[dict[str, int]] +class SmolVLMVideoProcessorInitKwargs(VideosKwargs, total=False): + max_image_size: dict[str, int] class SmolVLMVideoProcessor(BaseVideoProcessor): diff --git a/src/transformers/models/superpoint/image_processing_superpoint.py b/src/transformers/models/superpoint/image_processing_superpoint.py index 633d9b0b16b9..57b1a9dc6cb1 100644 --- a/src/transformers/models/superpoint/image_processing_superpoint.py +++ b/src/transformers/models/superpoint/image_processing_superpoint.py @@ -46,13 +46,13 @@ logger = logging.get_logger(__name__) -class SuperPointImageProcessorKwargs(ImagesKwargs): +class SuperPointImageProcessorKwargs(ImagesKwargs, total=False): r""" do_grayscale (`bool`, *optional*, defaults to `True`): Whether to convert the image to grayscale. Can be overridden by `do_grayscale` in the `preprocess` method. """ - do_grayscale: Optional[bool] = True + do_grayscale: bool def is_grayscale( diff --git a/src/transformers/models/swin2sr/image_processing_swin2sr.py b/src/transformers/models/swin2sr/image_processing_swin2sr.py index 018a1bf0f4df..0ba052e92e05 100644 --- a/src/transformers/models/swin2sr/image_processing_swin2sr.py +++ b/src/transformers/models/swin2sr/image_processing_swin2sr.py @@ -38,8 +38,8 @@ logger = logging.get_logger(__name__) -class Swin2SRImageProcessorKwargs(ImagesKwargs): - size_divisor: Optional[int] +class Swin2SRImageProcessorKwargs(ImagesKwargs, total=False): + size_divisor: int class Swin2SRImageProcessor(BaseImageProcessor): diff --git a/src/transformers/models/textnet/image_processing_textnet.py b/src/transformers/models/textnet/image_processing_textnet.py index 1a4d68522205..bd7aa6f5086e 100644 --- a/src/transformers/models/textnet/image_processing_textnet.py +++ b/src/transformers/models/textnet/image_processing_textnet.py @@ -49,8 +49,8 @@ import PIL -class TextNetImageProcessorKwargs(ImagesKwargs): - size_divisor: Optional[int] +class TextNetImageProcessorKwargs(ImagesKwargs, total=False): + size_divisor: int class TextNetImageProcessor(BaseImageProcessor): diff --git a/src/transformers/models/tvp/image_processing_tvp.py b/src/transformers/models/tvp/image_processing_tvp.py index 67c1ffe4fae8..d1ae5c374b4b 100644 --- a/src/transformers/models/tvp/image_processing_tvp.py +++ b/src/transformers/models/tvp/image_processing_tvp.py @@ -50,7 +50,7 @@ logger = logging.get_logger(__name__) -class TvpImageProcessorKwargs(ImagesKwargs): +class TvpImageProcessorKwargs(ImagesKwargs, total=False): r""" do_flip_channel_order (`bool`, *optional*): Whether to flip the channel order of the image from RGB to BGR. @@ -60,7 +60,7 @@ class TvpImageProcessorKwargs(ImagesKwargs): Padding mode to use — `'constant'`, `'edge'`, `'reflect'`, or `'symmetric'`. """ - do_flip_channel_order: Optional[bool] + do_flip_channel_order: bool constant_values: Optional[Union[float, list[float]]] pad_mode: Optional[str] diff --git a/src/transformers/models/udop/processing_udop.py b/src/transformers/models/udop/processing_udop.py index 1be71aea63e2..c44fa3d504ea 100644 --- a/src/transformers/models/udop/processing_udop.py +++ b/src/transformers/models/udop/processing_udop.py @@ -31,7 +31,7 @@ class UdopTextKwargs(TextKwargs, total=False): word_labels: Optional[Union[list[int], list[list[int]]]] - boxes: Union[list[list[int]], list[list[list[int]]]] + boxes: Optional[Union[list[list[int]], list[list[list[int]]]]] class UdopProcessorKwargs(ProcessingKwargs, total=False): diff --git a/src/transformers/models/vilt/image_processing_vilt.py b/src/transformers/models/vilt/image_processing_vilt.py index bb29e1d1ee30..5c1b2acf6e4b 100644 --- a/src/transformers/models/vilt/image_processing_vilt.py +++ b/src/transformers/models/vilt/image_processing_vilt.py @@ -47,8 +47,8 @@ logger = logging.get_logger(__name__) -class ViltImageProcessorKwargs(ImagesKwargs): - size_divisor: Optional[int] +class ViltImageProcessorKwargs(ImagesKwargs, total=False): + size_divisor: int def max_across_indices(values: Iterable[Any]) -> list[Any]: diff --git a/src/transformers/models/vitmatte/image_processing_vitmatte.py b/src/transformers/models/vitmatte/image_processing_vitmatte.py index 95933c053ce5..ea54ba603435 100644 --- a/src/transformers/models/vitmatte/image_processing_vitmatte.py +++ b/src/transformers/models/vitmatte/image_processing_vitmatte.py @@ -41,8 +41,8 @@ logger = logging.get_logger(__name__) -class VitMatteImageProcessorKwargs(ImagesKwargs): - size_divisor: Optional[int] +class VitMatteImageProcessorKwargs(ImagesKwargs, total=False): + size_divisor: int class VitMatteImageProcessor(BaseImageProcessor): diff --git a/src/transformers/models/yolos/image_processing_yolos.py b/src/transformers/models/yolos/image_processing_yolos.py index 21aac76adac8..b594c296707b 100644 --- a/src/transformers/models/yolos/image_processing_yolos.py +++ b/src/transformers/models/yolos/image_processing_yolos.py @@ -81,7 +81,7 @@ SUPPORTED_ANNOTATION_FORMATS = (AnnotationFormat.COCO_DETECTION, AnnotationFormat.COCO_PANOPTIC) -class YolosImageProcessorKwargs(ImagesKwargs): +class YolosImageProcessorKwargs(ImagesKwargs, total=False): r""" format (`str`, *optional*, defaults to `AnnotationFormat.COCO_DETECTION`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". @@ -97,9 +97,9 @@ class YolosImageProcessorKwargs(ImagesKwargs): Path to the directory containing the segmentation masks. """ - format: Optional[Union[str, AnnotationFormat]] - do_convert_annotations: Optional[bool] - return_segmentation_masks: Optional[bool] + format: Union[str, AnnotationFormat] + do_convert_annotations: bool + return_segmentation_masks: bool annotations: Optional[Union[AnnotationType, list[AnnotationType]]] masks_path: Optional[Union[str, pathlib.Path]] diff --git a/src/transformers/models/zoedepth/image_processing_zoedepth.py b/src/transformers/models/zoedepth/image_processing_zoedepth.py index e8ad44dd76c3..d94a2ee088eb 100644 --- a/src/transformers/models/zoedepth/image_processing_zoedepth.py +++ b/src/transformers/models/zoedepth/image_processing_zoedepth.py @@ -62,7 +62,7 @@ logger = logging.get_logger(__name__) -class ZoeDepthImageProcessorKwargs(ImagesKwargs): +class ZoeDepthImageProcessorKwargs(ImagesKwargs, total=False): """ keep_aspect_ratio (`bool`, *optional*, defaults to `True`): If `True`, the image is resized by choosing the smaller of the height and width scaling factors and using it @@ -77,8 +77,8 @@ class ZoeDepthImageProcessorKwargs(ImagesKwargs): Can be overridden by `ensure_multiple_of` in `preprocess`. """ - keep_aspect_ratio: Optional[bool] - ensure_multiple_of: Optional[int] + keep_aspect_ratio: bool + ensure_multiple_of: int def get_resize_output_image_size( diff --git a/src/transformers/processing_utils.py b/src/transformers/processing_utils.py index 8040fbf9d97b..55844c8d9cce 100644 --- a/src/transformers/processing_utils.py +++ b/src/transformers/processing_utils.py @@ -25,10 +25,11 @@ import warnings from dataclasses import dataclass from pathlib import Path -from typing import Any, Optional, TypedDict, TypeVar, Union +from typing import Annotated, Any, Literal, Optional, TypedDict, TypeVar, Union import numpy as np import typing_extensions +from huggingface_hub.dataclasses import validate_typed_dict from huggingface_hub.errors import EntryNotFoundError from .audio_utils import AudioInput, load_audio @@ -36,13 +37,23 @@ from .feature_extraction_utils import BatchFeature from .image_utils import ChannelDimension, ImageInput, is_vision_available from .utils.chat_template_utils import render_jinja_template -from .video_utils import VideoInput, VideoMetadata +from .utils.type_validators import ( + device_validator, + image_size_validator, + padding_validator, + positive_any_number, + positive_int, + resampling_validator, + tensor_type_validator, + truncation_validator, + video_metadata_validator, +) +from .video_utils import VideoInput, VideoMetadataType if is_vision_available(): from .image_utils import PILImageResampling - from .tokenization_utils_base import ( PaddingStrategy, PreTokenizedInput, @@ -72,8 +83,6 @@ if is_torch_available(): - import torch - from .modeling_utils import PreTrainedAudioTokenizerBase @@ -137,18 +146,22 @@ class TextKwargs(TypedDict, total=False): The side on which padding will be applied. return_mm_token_type_ids (`bool`, *optional*): Whether to return multimodal token type ids indicating mm placeholder token positions. + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Acceptable values are: + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. """ text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] - text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] + text_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] text_pair_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] add_special_tokens: Optional[bool] - padding: Union[bool, str, PaddingStrategy] - truncation: Union[bool, str, TruncationStrategy] - max_length: Optional[int] - stride: Optional[int] + padding: Annotated[Optional[Union[bool, str, PaddingStrategy]], padding_validator()] + truncation: Annotated[Optional[Union[bool, str, TruncationStrategy]], truncation_validator()] + max_length: Annotated[Optional[int], positive_int()] + stride: Annotated[Optional[int], positive_int()] is_split_into_words: Optional[bool] - pad_to_multiple_of: Optional[int] + pad_to_multiple_of: Annotated[Optional[int], positive_int()] return_token_type_ids: Optional[bool] return_attention_mask: Optional[bool] return_overflowing_tokens: Optional[bool] @@ -156,9 +169,9 @@ class TextKwargs(TypedDict, total=False): return_offsets_mapping: Optional[bool] return_length: Optional[bool] verbose: Optional[bool] - padding_side: Optional[str] + padding_side: Optional[Literal["left", "right"]] return_mm_token_type_ids: Optional[bool] - return_tensors: Optional[Union[str, TensorType]] + return_tensors: Annotated[Optional[Union[str, TensorType]], tensor_type_validator()] class ImagesKwargs(TypedDict, total=False): @@ -175,6 +188,8 @@ class methods and docstrings. Resize the shorter side of the input to `size["shortest_edge"]`. crop_size (`dict[str, int]`, *optional*): Desired output size when applying center-cropping. + do_convert_rgb (`bool`): + Whether to convert the video to RGB format. resample (`PILImageResampling`, *optional*): Resampling filter to use if resizing the image. do_rescale (`bool`, *optional*): @@ -183,9 +198,9 @@ class methods and docstrings. Scale factor to use if rescaling the image. do_normalize (`bool`, *optional*): Whether to normalize the image. - image_mean (`float` or `list[float]`, *optional*): + image_mean (`float` or `list[float] or tuple[float, float, float]`, *optional*): Mean to use if normalizing the image. - image_std (`float` or `list[float]`, *optional*): + image_std (`float` or `list[float] or tuple[float, float, float]`, *optional*): Standard deviation to use if normalizing the image. do_pad (`bool`, *optional*): Whether to pad the images in the batch. @@ -199,28 +214,32 @@ class methods and docstrings. The channel dimension format for the input image. device (`Union[str, torch.Tensor]`, *optional*): The device to use for processing (e.g. "cpu", "cuda"), only relevant for fast image processing. + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Acceptable values are: + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. disable_grouping (`bool`, *optional*): Whether to group images by shapes when processing or not, only relevant for fast image processing. """ do_convert_rgb: Optional[bool] do_resize: Optional[bool] - size: Optional[dict[str, int]] - crop_size: Optional[dict[str, int]] - resample: Optional[Union["PILImageResampling", int]] + size: Annotated[Optional[Union[int, list[int], tuple[int, ...], dict[str, int]]], image_size_validator()] + crop_size: Annotated[Optional[Union[int, list[int], tuple[int, ...], dict[str, int]]], image_size_validator()] + resample: Annotated[Optional[Union["PILImageResampling", int]], resampling_validator()] do_rescale: Optional[bool] rescale_factor: Optional[float] do_normalize: Optional[bool] - image_mean: Optional[Union[float, list[float]]] - image_std: Optional[Union[float, list[float]]] + image_mean: Optional[Union[float, list[float], tuple[float, ...]]] + image_std: Optional[Union[float, list[float], tuple[float, ...]]] do_pad: Optional[bool] - pad_size: Optional[dict[str, int]] + pad_size: Annotated[Optional[Union[int, list[int], tuple[int, ...], dict[str, int]]], image_size_validator()] do_center_crop: Optional[bool] - data_format: Optional[ChannelDimension] + data_format: Optional[Union[str, ChannelDimension]] input_data_format: Optional[Union[str, ChannelDimension]] - device: Optional[Union[str, "torch.device"]] + device: Annotated[Optional[str], device_validator()] + return_tensors: Annotated[Optional[Union[str, TensorType]], tensor_type_validator()] disable_grouping: Optional[bool] - return_tensors: Optional[Union[str, TensorType]] class VideosKwargs(TypedDict, total=False): @@ -244,9 +263,9 @@ class VideosKwargs(TypedDict, total=False): Scale factor to use if rescaling the video. do_normalize (`bool`, *optional*): Whether to normalize the video. - image_mean (`float` or `list[float]`, *optional*): + image_mean (`float` or `list[float] or tuple[float, float, float]`, *optional*): Mean to use if normalizing the video. - image_std (`float` or `list[float]`, *optional*): + image_std (`float` or `list[float] or tuple[float, float, float]`, *optional*): Standard deviation to use if normalizing the video. do_center_crop (`bool`, *optional*): Whether to center crop the video. @@ -268,32 +287,36 @@ class VideosKwargs(TypedDict, total=False): The channel dimension format for the input video. device (`Union[str, torch.Tensor]`, *optional*): The device to use for processing (e.g. "cpu", "cuda"), only relevant for fast image processing. - return_metadata (`ChannelDimension` or `str`, *optional*): + return_metadata (`bool`, *optional*): Whether to return video metadata or not. + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Acceptable values are: + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. """ do_convert_rgb: Optional[bool] do_resize: Optional[bool] - size: Optional[dict[str, int]] + size: Annotated[Optional[Union[int, list[int], tuple[int, ...], dict[str, int]]], image_size_validator()] default_to_square: Optional[bool] - resample: Optional["PILImageResampling"] + resample: Annotated[Optional[Union["PILImageResampling", int]], resampling_validator()] do_rescale: Optional[bool] rescale_factor: Optional[float] do_normalize: Optional[bool] - image_mean: Optional[Union[float, list[float]]] - image_std: Optional[Union[float, list[float]]] + image_mean: Optional[Union[float, list[float], tuple[float, ...]]] + image_std: Optional[Union[float, list[float], tuple[float, ...]]] do_center_crop: Optional[bool] do_pad: Optional[bool] - crop_size: Optional[dict[str, int]] - data_format: Optional[ChannelDimension] + crop_size: Annotated[Optional[Union[int, list[int], tuple[int, ...], dict[str, int]]], image_size_validator()] + data_format: Optional[Union[str, ChannelDimension]] input_data_format: Optional[Union[str, ChannelDimension]] - device: Optional[Union[str, "torch.device"]] + device: Annotated[Optional[str], device_validator()] do_sample_frames: Optional[bool] - video_metadata: Optional[Union[VideoMetadata, dict]] - fps: Optional[Union[int, float]] - num_frames: Optional[int] + video_metadata: Annotated[Optional[VideoMetadataType], video_metadata_validator()] + fps: Annotated[Optional[Union[int, float]], positive_any_number()] + num_frames: Annotated[Optional[int], positive_int()] return_metadata: Optional[bool] - return_tensors: Optional[Union[str, TensorType]] + return_tensors: Annotated[Optional[Union[str, TensorType]], tensor_type_validator()] class AudioKwargs(TypedDict, total=False): @@ -324,16 +347,20 @@ class AudioKwargs(TypedDict, total=False): If set, will pad the sequence to a multiple of the provided value. return_attention_mask (`bool`, *optional*): Whether or not [`~ASTFeatureExtractor.__call__`] should return `attention_mask`. + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Acceptable values are: + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. """ - sampling_rate: Optional[int] - raw_speech: Optional[Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]]] - padding: Optional[Union[bool, str, PaddingStrategy]] - max_length: Optional[int] - truncation: Optional[bool] - pad_to_multiple_of: Optional[int] + sampling_rate: Annotated[Optional[int], positive_int()] + raw_speech: Optional[Union["np.ndarray", list[float], list["np.ndarray"], list[list[float]]]] + padding: Annotated[Optional[Union[bool, str, PaddingStrategy]], padding_validator()] + max_length: Annotated[Optional[int], positive_int()] + truncation: Annotated[Optional[Union[bool, str, TruncationStrategy]], truncation_validator()] + pad_to_multiple_of: Annotated[Optional[int], positive_int()] return_attention_mask: Optional[bool] - return_tensors: Optional[Union[str, TensorType]] + return_tensors: Annotated[Optional[Union[str, TensorType]], tensor_type_validator()] class ProcessingKwargs(TypedDict, total=False): @@ -1361,6 +1388,18 @@ class MyProcessingKwargs(ProcessingKwargs, CommonKwargs, TextKwargs, ImagesKwarg f"Keyword argument `{key}` is not a valid argument for this processor and will be ignored." ) + for key, typed_dict_obj in ModelProcessorKwargs.__annotations__.items(): + if key in map_preprocessor_kwargs: + preprocessor = getattr(self, map_preprocessor_kwargs[key], None) + if preprocessor is None or getattr(preprocessor, "valid_kwargs", None) is None: + continue + preprocessor_typed_dict_obj = getattr(preprocessor, "valid_kwargs") + typed_dict_obj = TypedDict( + "merged_typed_dict", + {**preprocessor_typed_dict_obj.__annotations__, **typed_dict_obj.__annotations__}, + total=False, + ) + validate_typed_dict(typed_dict_obj, output_kwargs[key]) return output_kwargs @classmethod diff --git a/src/transformers/utils/type_validators.py b/src/transformers/utils/type_validators.py new file mode 100644 index 000000000000..6e6ccdc4c8e9 --- /dev/null +++ b/src/transformers/utils/type_validators.py @@ -0,0 +1,115 @@ +from collections.abc import Sequence +from typing import Optional, Union + +from ..tokenization_utils_base import PaddingStrategy, TruncationStrategy +from ..video_utils import VideoMetadataType +from .generic import TensorType +from .import_utils import is_vision_available + + +if is_vision_available(): + from ..image_utils import PILImageResampling + + +def positive_any_number(value: Optional[Union[int, float]] = None): + if value is not None and (not isinstance(value, (int, float)) or not value >= 0): + raise ValueError(f"Value must be a positive integer or floating number, got {value}") + + +def positive_int(value: Optional[int] = None): + if value is not None and (not isinstance(value, int) or not value >= 0): + raise ValueError(f"Value must be a positive integer, got {value}") + + +def padding_validator(value: Optional[Union[bool, str, PaddingStrategy]] = None): + possible_names = ["longest", "max_length", "do_not_pad"] + if value is None: + pass + elif not isinstance(value, (bool, str, PaddingStrategy)): + raise ValueError("Value for padding must be either a boolean, a string or a `PaddingStrategy`") + elif isinstance(value, str) and value not in possible_names: + raise ValueError(f"If padding is a string, the value must be one of {possible_names}") + + +def truncation_validator(value: Optional[Union[bool, str, TruncationStrategy]] = None): + possible_names = ["only_first", "only_second", "longest_first", "do_not_truncate"] + if value is None: + pass + elif not isinstance(value, (bool, str, TruncationStrategy)): + raise ValueError("Value for truncation must be either a boolean, a string or a `TruncationStrategy`") + elif isinstance(value, str) and value not in possible_names: + raise ValueError(f"If truncation is a string, value must be one of {possible_names}") + + +def image_size_validator(value: Optional[Union[int, Sequence[int], dict[str, int]]] = None): + possible_keys = ["height", "width", "longest_edge", "shortest_edge", "max_height", "max_width"] + if value is None: + pass + elif isinstance(value, dict) and any(k not in possible_keys for k in value.keys()): + raise ValueError(f"Value for size must be a dict with keys {possible_keys} but got size={value}") + + +def device_validator(value: Optional[Union[str, int]] = None): + possible_names = ["cpu", "cuda", "xla", "xpu", "mps", "meta"] + if value is None: + pass + elif isinstance(value, int) and value < 0: + raise ValueError( + f"If device is an integer, the value must be a strictly positive integer but got device={value}" + ) + elif isinstance(value, str) and value.split(":")[0] not in possible_names: + raise ValueError(f"If device is an string, the value must be one of {possible_names} but got device={value}") + elif not isinstance(value, (int, str)): + raise ValueError( + f"Device must be either an integer device ID or a string (e.g., 'cpu', 'cuda:0'), but got device={value}" + ) + + +def resampling_validator(value: Optional[Union[int, "PILImageResampling"]] = None): + if value is None: + pass + elif isinstance(value, int) and value not in list(range(6)): + raise ValueError( + f"The resampling should be one of {list(range(6))} when provided as integer, but got resampling={value}" + ) + elif is_vision_available() and not isinstance(value, (PILImageResampling, int)): + raise ValueError(f"The resampling should an integer or `PIL.Image.Resampling`, but got resampling={value}") + + +def video_metadata_validator(value: Optional[VideoMetadataType] = None): + if value is None: + return + + valid_keys = ["total_num_frames", "fps", "width", "height", "duration", "video_backend", "frames_indices"] + + def check_dict_keys(d: dict) -> bool: + return all(key in valid_keys for key in d.keys()) + + if isinstance(value, Sequence) and isinstance(value[0], Sequence) and isinstance(value[0][0], dict): + for sublist in value: + for item in sublist: + if not check_dict_keys(item): + raise ValueError( + f"Invalid keys found in video metadata. Valid keys: {valid_keys} got: {list(item.keys())}" + ) + + elif isinstance(value, Sequence) and isinstance(value[0], dict): + for item in value: + if not check_dict_keys(item): + raise ValueError( + f"Invalid keys found in video metadata. Valid keys: {valid_keys} got: {list(item.keys())}" + ) + + elif isinstance(value, dict): + if not check_dict_keys(value): + raise ValueError( + f"Invalid keys found in video metadata. Valid keys: {valid_keys}, got: {list(value.keys())}" + ) + + +def tensor_type_validator(value: Optional[Union[str, TensorType]] = None): + possible_names = ["pt", "np", "mlx"] + if value is None: + pass + elif not isinstance(value, str) or value not in possible_names: + raise ValueError(f"The tensor type should be one of {possible_names} but got tensor_type={value}") diff --git a/src/transformers/video_processing_utils.py b/src/transformers/video_processing_utils.py index 117c30972288..4283c163c574 100644 --- a/src/transformers/video_processing_utils.py +++ b/src/transformers/video_processing_utils.py @@ -21,6 +21,7 @@ from typing import Any, Callable, Optional, Union import numpy as np +from huggingface_hub.dataclasses import validate_typed_dict from .dynamic_module_utils import custom_object_save from .image_processing_utils import ( @@ -358,6 +359,10 @@ def preprocess( captured_kwargs=kwargs.keys(), valid_processor_keys=list(self.valid_kwargs.__annotations__.keys()) + ["return_tensors"], ) + + # Perform type validation on received kwargs + validate_typed_dict(self.valid_kwargs, kwargs) + # Set default kwargs from self. This ensures that if a kwarg is not provided # by the user, it gets its default value from the instance, or is set to None. for kwarg_name in self.valid_kwargs.__annotations__: diff --git a/src/transformers/video_utils.py b/src/transformers/video_utils.py index 73aebbfcbf26..1faecf9791c4 100644 --- a/src/transformers/video_utils.py +++ b/src/transformers/video_utils.py @@ -112,6 +112,11 @@ def update(self, dictionary): setattr(self, key, value) +VideoMetadataType = Union[ + VideoMetadata, dict, list[Union[dict, VideoMetadata]], list[list[Union[dict, VideoMetadata]]] +] + + def is_valid_video_frame(frame): return isinstance(frame, PIL.Image.Image) or ( (is_numpy_array(frame) or is_torch_tensor(frame)) and frame.ndim == 3 @@ -217,7 +222,7 @@ def make_batched_videos(videos) -> list[Union[np.ndarray, "torch.Tensor", "URL", return flat_videos_list -def make_batched_metadata(videos: VideoInput, video_metadata: Union[VideoMetadata, dict]): +def make_batched_metadata(videos: VideoInput, video_metadata: VideoMetadataType) -> list[VideoMetadata]: if video_metadata is None: # Create default metadata and fill attributes we can infer from given video video_metadata = [ diff --git a/tests/models/cohere2_vision/test_image_processing_cohere2_vision.py b/tests/models/cohere2_vision/test_image_processing_cohere2_vision.py index 7ab3bf70d57b..81a16ba39c14 100644 --- a/tests/models/cohere2_vision/test_image_processing_cohere2_vision.py +++ b/tests/models/cohere2_vision/test_image_processing_cohere2_vision.py @@ -176,8 +176,8 @@ def test_call_numpy_4_channels(self): image_inputs[0], return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), ).pixel_values self.assertEqual(tuple(encoded_images.shape), (10, 4, 30, 30)) @@ -186,7 +186,7 @@ def test_call_numpy_4_channels(self): image_inputs, return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), ).pixel_values self.assertEqual(tuple(encoded_images.shape), (70, 4, 30, 30)) diff --git a/tests/models/colpali/test_processing_colpali.py b/tests/models/colpali/test_processing_colpali.py index 221836db8423..119af1432ce1 100644 --- a/tests/models/colpali/test_processing_colpali.py +++ b/tests/models/colpali/test_processing_colpali.py @@ -133,7 +133,7 @@ def test_tokenizer_defaults_preserved_by_kwargs(self): def test_image_processor_defaults_preserved_by_image_kwargs(self): """ - We use do_rescale=True, rescale_factor=-1 to ensure that image_processor kwargs are preserved in the processor. + We use do_rescale=True, rescale_factor=-1.0 to ensure that image_processor kwargs are preserved in the processor. We then check that the mean of the pixel_values is less than or equal to 0 after processing. Since the original pixel_values are in [0, 255], this is a good indicator that the rescale_factor is indeed applied. """ @@ -141,7 +141,7 @@ def test_image_processor_defaults_preserved_by_image_kwargs(self): self.skipTest(f"image_processor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor_components["image_processor"] = self.get_component( - "image_processor", do_rescale=True, rescale_factor=-1 + "image_processor", do_rescale=True, rescale_factor=-1.0 ) processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length") @@ -179,7 +179,7 @@ def test_kwargs_overrides_default_image_processor_kwargs(self): image_input = self.prepare_image_inputs() - inputs = processor(images=image_input, do_rescale=True, rescale_factor=-1, return_tensors="pt") + inputs = processor(images=image_input, do_rescale=True, rescale_factor=-1.0, return_tensors="pt") self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0) def test_unstructured_kwargs(self): @@ -194,7 +194,7 @@ def test_unstructured_kwargs(self): text=input_str, return_tensors="pt", do_rescale=True, - rescale_factor=-1, + rescale_factor=-1.0, padding="max_length", max_length=76, ) @@ -213,7 +213,7 @@ def test_unstructured_kwargs_batched(self): images=image_input, return_tensors="pt", do_rescale=True, - rescale_factor=-1, + rescale_factor=-1.0, padding="longest", max_length=76, ) @@ -231,7 +231,7 @@ def test_doubly_passed_kwargs(self): with self.assertRaises(ValueError): _ = processor( images=image_input, - images_kwargs={"do_rescale": True, "rescale_factor": -1}, + images_kwargs={"do_rescale": True, "rescale_factor": -1.0}, do_rescale=True, return_tensors="pt", ) @@ -248,7 +248,7 @@ def test_structured_kwargs_nested(self): # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, - "images_kwargs": {"do_rescale": True, "rescale_factor": -1}, + "images_kwargs": {"do_rescale": True, "rescale_factor": -1.0}, "text_kwargs": {"padding": "max_length", "max_length": 76}, } @@ -268,7 +268,7 @@ def test_structured_kwargs_nested_from_dict(self): # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, - "images_kwargs": {"do_rescale": True, "rescale_factor": -1}, + "images_kwargs": {"do_rescale": True, "rescale_factor": -1.0}, "text_kwargs": {"padding": "max_length", "max_length": 76}, } diff --git a/tests/models/colqwen2/test_processing_colqwen2.py b/tests/models/colqwen2/test_processing_colqwen2.py index 7346c0d5079c..236456dd7f88 100644 --- a/tests/models/colqwen2/test_processing_colqwen2.py +++ b/tests/models/colqwen2/test_processing_colqwen2.py @@ -132,7 +132,7 @@ def test_tokenizer_defaults_preserved_by_kwargs(self): def test_image_processor_defaults_preserved_by_image_kwargs(self): """ - We use do_rescale=True, rescale_factor=-1 to ensure that image_processor kwargs are preserved in the processor. + We use do_rescale=True, rescale_factor=-1.0 to ensure that image_processor kwargs are preserved in the processor. We then check that the mean of the pixel_values is less than or equal to 0 after processing. Since the original pixel_values are in [0, 255], this is a good indicator that the rescale_factor is indeed applied. """ @@ -140,7 +140,7 @@ def test_image_processor_defaults_preserved_by_image_kwargs(self): self.skipTest(f"image_processor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor_components["image_processor"] = self.get_component( - "image_processor", do_rescale=True, rescale_factor=-1 + "image_processor", do_rescale=True, rescale_factor=-1.0 ) processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length") @@ -178,7 +178,7 @@ def test_kwargs_overrides_default_image_processor_kwargs(self): image_input = self.prepare_image_inputs() - inputs = processor(images=image_input, do_rescale=True, rescale_factor=-1, return_tensors="pt") + inputs = processor(images=image_input, do_rescale=True, rescale_factor=-1.0, return_tensors="pt") self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0) def test_unstructured_kwargs(self): @@ -193,7 +193,7 @@ def test_unstructured_kwargs(self): text=input_str, return_tensors="pt", do_rescale=True, - rescale_factor=-1, + rescale_factor=-1.0, padding="max_length", max_length=76, ) @@ -212,7 +212,7 @@ def test_unstructured_kwargs_batched(self): images=image_input, return_tensors="pt", do_rescale=True, - rescale_factor=-1, + rescale_factor=-1.0, padding="longest", max_length=76, ) @@ -230,7 +230,7 @@ def test_doubly_passed_kwargs(self): with self.assertRaises(ValueError): _ = processor( images=image_input, - images_kwargs={"do_rescale": True, "rescale_factor": -1}, + images_kwargs={"do_rescale": True, "rescale_factor": -1.0}, do_rescale=True, return_tensors="pt", ) @@ -247,7 +247,7 @@ def test_structured_kwargs_nested(self): # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, - "images_kwargs": {"do_rescale": True, "rescale_factor": -1}, + "images_kwargs": {"do_rescale": True, "rescale_factor": -1.0}, "text_kwargs": {"padding": "max_length", "max_length": 76}, } @@ -267,7 +267,7 @@ def test_structured_kwargs_nested_from_dict(self): # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, - "images_kwargs": {"do_rescale": True, "rescale_factor": -1}, + "images_kwargs": {"do_rescale": True, "rescale_factor": -1.0}, "text_kwargs": {"padding": "max_length", "max_length": 76}, } diff --git a/tests/models/glm4v/test_image_processing_glm4v.py b/tests/models/glm4v/test_image_processing_glm4v.py index cb5af4b275d2..1226fe473db9 100644 --- a/tests/models/glm4v/test_image_processing_glm4v.py +++ b/tests/models/glm4v/test_image_processing_glm4v.py @@ -236,8 +236,8 @@ def test_call_numpy_4_channels(self): image_inputs[0], return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) @@ -247,8 +247,8 @@ def test_call_numpy_4_channels(self): image_inputs, return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) diff --git a/tests/models/glm4v/test_video_processing_glm4v.py b/tests/models/glm4v/test_video_processing_glm4v.py index 1dcd4bdecca6..8443c728f2f2 100644 --- a/tests/models/glm4v/test_video_processing_glm4v.py +++ b/tests/models/glm4v/test_video_processing_glm4v.py @@ -250,8 +250,8 @@ def test_call_numpy_4_channels(self): video_inputs[0], return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) @@ -261,8 +261,8 @@ def test_call_numpy_4_channels(self): video_inputs, return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) diff --git a/tests/models/janus/test_processing_janus.py b/tests/models/janus/test_processing_janus.py index 73212e3ec4b3..47efd5c2be6d 100644 --- a/tests/models/janus/test_processing_janus.py +++ b/tests/models/janus/test_processing_janus.py @@ -444,7 +444,7 @@ def test_chat_template_accepts_processing_kwargs(self): tokenize=True, return_dict=True, do_rescale=True, - rescale_factor=-1, + rescale_factor=-1.0, return_tensors="np", ) self.assertLessEqual(out_dict[self.images_input_name][0][0].mean(), 0) diff --git a/tests/models/lfm2_vl/test_processing_lfm2_vl.py b/tests/models/lfm2_vl/test_processing_lfm2_vl.py index f2c33e40e3f6..d1f7669bdddd 100755 --- a/tests/models/lfm2_vl/test_processing_lfm2_vl.py +++ b/tests/models/lfm2_vl/test_processing_lfm2_vl.py @@ -100,7 +100,7 @@ def prepare_processor_dict(): "{{'<|im_start|>assistant\n' }}" "{% endif %}" ) - return {"chat_template": chat_template, "use_image_special_tokens": True} + return {"chat_template": chat_template} # Override as Lfm2VL needs images/video to be an explicitly nested batch def prepare_image_inputs(self, batch_size=None): diff --git a/tests/models/mllama/test_processing_mllama.py b/tests/models/mllama/test_processing_mllama.py index e9acdddcd0c3..50a6b7db0f4e 100644 --- a/tests/models/mllama/test_processing_mllama.py +++ b/tests/models/mllama/test_processing_mllama.py @@ -386,7 +386,7 @@ def test_unstructured_kwargs_batched(self): images=image_input, return_tensors="pt", do_rescale=True, - rescale_factor=-1, + rescale_factor=-1.0, padding="longest", max_length=76, ) diff --git a/tests/models/nougat/test_image_processing_nougat.py b/tests/models/nougat/test_image_processing_nougat.py index c014c21828f4..68a71a6dfb8c 100644 --- a/tests/models/nougat/test_image_processing_nougat.py +++ b/tests/models/nougat/test_image_processing_nougat.py @@ -282,8 +282,8 @@ def test_call_numpy_4_channels(self): image_inputs[0], return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape( [image_inputs[0]] @@ -295,8 +295,8 @@ def test_call_numpy_4_channels(self): image_inputs, return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( diff --git a/tests/models/oneformer/test_image_processing_oneformer.py b/tests/models/oneformer/test_image_processing_oneformer.py index 4fe89959bf0b..1343d069d819 100644 --- a/tests/models/oneformer/test_image_processing_oneformer.py +++ b/tests/models/oneformer/test_image_processing_oneformer.py @@ -224,7 +224,6 @@ def comm_get_image_processor_inputs( annotations, return_tensors="pt", instance_id_to_semantic_id=instance_id_to_semantic_id, - pad_and_return_pixel_mask=True, ) return inputs diff --git a/tests/models/qwen2_vl/test_video_processing_qwen2_vl.py b/tests/models/qwen2_vl/test_video_processing_qwen2_vl.py index 4d6026a06289..b80adebbd9ab 100644 --- a/tests/models/qwen2_vl/test_video_processing_qwen2_vl.py +++ b/tests/models/qwen2_vl/test_video_processing_qwen2_vl.py @@ -265,8 +265,8 @@ def test_call_numpy_4_channels(self): video_inputs[0], return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) @@ -276,8 +276,8 @@ def test_call_numpy_4_channels(self): video_inputs, return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) diff --git a/tests/models/qwen3_vl/test_video_processing_qwen3_vl.py b/tests/models/qwen3_vl/test_video_processing_qwen3_vl.py index 9230f0f9502e..60f4023938bb 100644 --- a/tests/models/qwen3_vl/test_video_processing_qwen3_vl.py +++ b/tests/models/qwen3_vl/test_video_processing_qwen3_vl.py @@ -249,8 +249,8 @@ def test_call_numpy_4_channels(self): video_inputs[0], return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) @@ -260,8 +260,8 @@ def test_call_numpy_4_channels(self): video_inputs, return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) diff --git a/tests/models/smolvlm/test_processing_smolvlm.py b/tests/models/smolvlm/test_processing_smolvlm.py index 3a11103d6efb..40aaaf7a6ca2 100644 --- a/tests/models/smolvlm/test_processing_smolvlm.py +++ b/tests/models/smolvlm/test_processing_smolvlm.py @@ -482,7 +482,7 @@ def test_unstructured_kwargs_batched_video(self): videos=video_input, return_tensors="pt", do_rescale=True, - rescale_factor=-1, + rescale_factor=-1.0, padding="max_length", max_length=172, ) diff --git a/tests/models/tvp/test_image_processing_tvp.py b/tests/models/tvp/test_image_processing_tvp.py index c2c8b81dfc0a..6d454daf9e4b 100644 --- a/tests/models/tvp/test_image_processing_tvp.py +++ b/tests/models/tvp/test_image_processing_tvp.py @@ -274,7 +274,11 @@ def test_call_numpy_4_channels(self): # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing( - test_inputs[0], return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" + test_inputs[0], + return_tensors="pt", + image_mean=(0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0), + input_data_format="channels_first", ).pixel_values self.assertListEqual( list(encoded_videos.shape), @@ -292,7 +296,11 @@ def test_call_numpy_4_channels(self): video_inputs, batched=True ) encoded_videos = image_processing( - test_inputs, return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" + test_inputs, + return_tensors="pt", + image_mean=(0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0), + input_data_format="channels_first", ).pixel_values self.assertListEqual( list(encoded_videos.shape), diff --git a/tests/models/videomae/test_image_processing_videomae.py b/tests/models/videomae/test_image_processing_videomae.py index 2fe9303f3705..f8576a7bc8af 100644 --- a/tests/models/videomae/test_image_processing_videomae.py +++ b/tests/models/videomae/test_image_processing_videomae.py @@ -177,14 +177,22 @@ def test_call_numpy_4_channels(self): # Test not batched input encoded_videos = image_processing( - video_inputs[0], return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" + video_inputs[0], + return_tensors="pt", + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), + input_data_format="channels_first", ).pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) # Test batched encoded_videos = image_processing( - video_inputs, return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" + video_inputs, + return_tensors="pt", + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), + input_data_format="channels_first", ).pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( diff --git a/tests/models/vitmatte/test_image_processing_vitmatte.py b/tests/models/vitmatte/test_image_processing_vitmatte.py index a103c33a9cca..b100fb3c30b6 100644 --- a/tests/models/vitmatte/test_image_processing_vitmatte.py +++ b/tests/models/vitmatte/test_image_processing_vitmatte.py @@ -220,8 +220,8 @@ def test_call_numpy_4_channels(self): images=image, trimaps=trimap, input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), return_tensors="pt", ).pixel_values @@ -255,18 +255,24 @@ def test_image_processor_preprocess_arguments(self): # vitmatte require additional trimap input for image_processor # that is why we override original common test - for image_processing_class in self.image_processor_list: + for i, image_processing_class in enumerate(self.image_processor_list): image_processor = image_processing_class(**self.image_processor_dict) image = self.image_processor_tester.prepare_image_inputs()[0] trimap = np.random.randint(0, 3, size=image.size[::-1]) - with warnings.catch_warnings(record=True) as raised_warnings: - warnings.simplefilter("always") - image_processor(image, trimaps=trimap, extra_argument=True) - - messages = " ".join([str(w.message) for w in raised_warnings]) - self.assertGreaterEqual(len(raised_warnings), 1) - self.assertIn("extra_argument", messages) + # Type validation will fail for fast processors only (for now) + if image_processing_class.__name__.endswith("Fast"): + with self.assertRaises(TypeError): + image_processor(image, trimaps=trimap, extra_argument=True) + else: + # Else we just consume extra kwargs and raise a warning + with warnings.catch_warnings(record=True) as raised_warnings: + warnings.simplefilter("always") + image_processor(image, trimaps=trimap, extra_argument=True) + + messages = " ".join([str(w.message) for w in raised_warnings]) + self.assertGreaterEqual(len(raised_warnings), 1) + self.assertIn("extra_argument", messages) @unittest.skip(reason="Many failing cases. This test needs a more deep investigation.") def test_fast_is_faster_than_slow(self): diff --git a/tests/models/vitpose/test_image_processing_vitpose.py b/tests/models/vitpose/test_image_processing_vitpose.py index 44d9ddf8eb59..c0ede8e22de0 100644 --- a/tests/models/vitpose/test_image_processing_vitpose.py +++ b/tests/models/vitpose/test_image_processing_vitpose.py @@ -205,8 +205,8 @@ def test_call_numpy_4_channels(self): boxes=boxes, return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (len(boxes[0]), *expected_output_image_shape)) @@ -218,8 +218,8 @@ def test_call_numpy_4_channels(self): boxes=boxes, return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( diff --git a/tests/models/vivit/test_image_processing_vivit.py b/tests/models/vivit/test_image_processing_vivit.py index bf61fc1082b2..323dbd3cc55f 100644 --- a/tests/models/vivit/test_image_processing_vivit.py +++ b/tests/models/vivit/test_image_processing_vivit.py @@ -191,14 +191,22 @@ def test_call_numpy_4_channels(self): # Test not batched input encoded_videos = image_processing( - video_inputs[0], return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" + video_inputs[0], + return_tensors="pt", + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), + input_data_format="channels_first", ).pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) # Test batched encoded_videos = image_processing( - video_inputs, return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" + video_inputs, + return_tensors="pt", + image_mean=(0.0, 0.0, 0.0, 0.0), + image_std=(1.0, 1.0, 1.0, 1.0), + input_data_format="channels_first", ).pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( diff --git a/tests/test_image_processing_common.py b/tests/test_image_processing_common.py index 6ac1b8e18d06..15e334c73a14 100644 --- a/tests/test_image_processing_common.py +++ b/tests/test_image_processing_common.py @@ -519,8 +519,8 @@ def test_call_numpy_4_channels(self): image_inputs[0], return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=[0.0, 0.0, 0.0, 0.0], + image_std=[1.0, 1.0, 1.0, 1.0], ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) @@ -530,8 +530,8 @@ def test_call_numpy_4_channels(self): image_inputs, return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=[0.0, 0.0, 0.0, 0.0], + image_std=[1.0, 1.0, 1.0, 1.0], ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( diff --git a/tests/test_processing_common.py b/tests/test_processing_common.py index e0094bafa695..295ee03a769e 100644 --- a/tests/test_processing_common.py +++ b/tests/test_processing_common.py @@ -383,7 +383,7 @@ def test_tokenizer_defaults_preserved_by_kwargs(self): def test_image_processor_defaults_preserved_by_image_kwargs(self): """ - We use do_rescale=True, rescale_factor=-1 to ensure that image_processor kwargs are preserved in the processor. + We use do_rescale=True, rescale_factor=-1.0 to ensure that image_processor kwargs are preserved in the processor. We then check that the mean of the pixel_values is less than or equal to 0 after processing. Since the original pixel_values are in [0, 255], this is a good indicator that the rescale_factor is indeed applied. """ @@ -391,7 +391,7 @@ def test_image_processor_defaults_preserved_by_image_kwargs(self): self.skipTest(f"image_processor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor_components["image_processor"] = self.get_component( - "image_processor", do_rescale=True, rescale_factor=-1 + "image_processor", do_rescale=True, rescale_factor=-1.0 ) processor_components["tokenizer"] = self.get_component("tokenizer", max_length=117, padding="max_length") processor_kwargs = self.prepare_processor_dict() @@ -437,7 +437,9 @@ def test_kwargs_overrides_default_image_processor_kwargs(self): input_str = self.prepare_text_inputs(modalities="image") image_input = self.prepare_image_inputs() - inputs = processor(text=input_str, images=image_input, do_rescale=True, rescale_factor=-1, return_tensors="pt") + inputs = processor( + text=input_str, images=image_input, do_rescale=True, rescale_factor=-1.0, return_tensors="pt" + ) self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0) def test_unstructured_kwargs(self): @@ -455,7 +457,7 @@ def test_unstructured_kwargs(self): images=image_input, return_tensors="pt", do_rescale=True, - rescale_factor=-1, + rescale_factor=-1.0, padding="max_length", max_length=76, ) @@ -478,7 +480,7 @@ def test_unstructured_kwargs_batched(self): images=image_input, return_tensors="pt", do_rescale=True, - rescale_factor=-1, + rescale_factor=-1.0, padding="longest", max_length=76, ) @@ -503,7 +505,7 @@ def test_doubly_passed_kwargs(self): _ = processor( text=input_str, images=image_input, - images_kwargs={"do_rescale": True, "rescale_factor": -1}, + images_kwargs={"do_rescale": True, "rescale_factor": -1.0}, do_rescale=True, return_tensors="pt", ) @@ -534,7 +536,7 @@ def test_structured_kwargs_nested(self): # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, - "images_kwargs": {"do_rescale": True, "rescale_factor": -1}, + "images_kwargs": {"do_rescale": True, "rescale_factor": -1.0}, "text_kwargs": {"padding": "max_length", "max_length": 76}, } @@ -557,7 +559,7 @@ def test_structured_kwargs_nested_from_dict(self): # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, - "images_kwargs": {"do_rescale": True, "rescale_factor": -1}, + "images_kwargs": {"do_rescale": True, "rescale_factor": -1.0}, "text_kwargs": {"padding": "max_length", "max_length": 76}, } @@ -683,7 +685,7 @@ def test_tokenizer_defaults_preserved_by_kwargs_video(self): def test_video_processor_defaults_preserved_by_video_kwargs(self): """ - We use do_rescale=True, rescale_factor=-1 to ensure that image_processor kwargs are preserved in the processor. + We use do_rescale=True, rescale_factor=-1.0 to ensure that image_processor kwargs are preserved in the processor. We then check that the mean of the pixel_values is less than or equal to 0 after processing. Since the original pixel_values are in [0, 255], this is a good indicator that the rescale_factor is indeed applied. """ @@ -691,7 +693,7 @@ def test_video_processor_defaults_preserved_by_video_kwargs(self): self.skipTest(f"video_processor attribute not present in {self.processor_class}") processor_components = self.prepare_components() processor_components["video_processor"] = self.get_component( - "video_processor", do_rescale=True, rescale_factor=-1 + "video_processor", do_rescale=True, rescale_factor=-1.0 ) processor_components["tokenizer"] = self.get_component("tokenizer", max_length=167, padding="max_length") processor_kwargs = self.prepare_processor_dict() @@ -747,7 +749,7 @@ def test_kwargs_overrides_default_video_processor_kwargs(self): videos=video_input, do_sample_frames=False, do_rescale=True, - rescale_factor=-1, + rescale_factor=-1.0, return_tensors="pt", ) self.assertLessEqual(inputs[self.videos_input_name][0].mean(), 0) @@ -768,7 +770,7 @@ def test_unstructured_kwargs_video(self): do_sample_frames=False, return_tensors="pt", do_rescale=True, - rescale_factor=-1, + rescale_factor=-1.0, padding="max_length", max_length=176, ) @@ -792,7 +794,7 @@ def test_unstructured_kwargs_batched_video(self): do_sample_frames=False, return_tensors="pt", do_rescale=True, - rescale_factor=-1, + rescale_factor=-1.0, padding="longest", max_length=176, ) @@ -818,7 +820,7 @@ def test_doubly_passed_kwargs_video(self): text=input_str, videos=video_input, do_sample_frames=False, - videos_kwargs={"do_rescale": True, "rescale_factor": -1}, + videos_kwargs={"do_rescale": True, "rescale_factor": -1.0}, do_rescale=True, return_tensors="pt", ) @@ -837,7 +839,7 @@ def test_structured_kwargs_nested_video(self): # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, - "videos_kwargs": {"do_rescale": True, "rescale_factor": -1, "do_sample_frames": False}, + "videos_kwargs": {"do_rescale": True, "rescale_factor": -1.0, "do_sample_frames": False}, "text_kwargs": {"padding": "max_length", "max_length": 176}, } @@ -860,7 +862,7 @@ def test_structured_kwargs_nested_from_dict_video(self): # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, - "videos_kwargs": {"do_rescale": True, "rescale_factor": -1, "do_sample_frames": False}, + "videos_kwargs": {"do_rescale": True, "rescale_factor": -1.0, "do_sample_frames": False}, "text_kwargs": {"padding": "max_length", "max_length": 176}, } diff --git a/tests/test_video_processing_common.py b/tests/test_video_processing_common.py index 3d0477ee05d5..67a31cf8d20e 100644 --- a/tests/test_video_processing_common.py +++ b/tests/test_video_processing_common.py @@ -398,8 +398,8 @@ def test_call_numpy_4_channels(self): video_inputs[0], return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=0.0, + image_std=1.0, )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) if video_processor.do_convert_rgb: @@ -412,8 +412,8 @@ def test_call_numpy_4_channels(self): video_inputs, return_tensors="pt", input_data_format="channels_last", - image_mean=0, - image_std=1, + image_mean=0.0, + image_std=1.0, )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) if video_processor.do_convert_rgb: