diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index c3036b8a3973..8a3bd5617ef0 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -302,6 +302,8 @@ title: Image tasks with IDEFICS - local: tasks/image_text_to_text title: Image-text-to-text + - local: tasks/any_to_any + title: Any-to-any - local: tasks/video_text_to_text title: Video-text-to-text - local: tasks/visual_document_retrieval diff --git a/docs/source/en/main_classes/pipelines.md b/docs/source/en/main_classes/pipelines.md index 9e699f7d2027..442ccd91998d 100644 --- a/docs/source/en/main_classes/pipelines.md +++ b/docs/source/en/main_classes/pipelines.md @@ -485,6 +485,12 @@ Pipelines available for multimodal tasks include the following. - __call__ - all +### AnyToAnyPipeline + +[[autodoc]] AnyToAnyPipeline + - __call__ + - all + ### MaskGenerationPipeline [[autodoc]] MaskGenerationPipeline diff --git a/docs/source/en/model_doc/auto.md b/docs/source/en/model_doc/auto.md index 09884bcb71d4..575c30818995 100644 --- a/docs/source/en/model_doc/auto.md +++ b/docs/source/en/model_doc/auto.md @@ -241,6 +241,10 @@ The following auto classes are available for the following audio tasks. The following auto classes are available for the following multimodal tasks. +### AutoModelForMultimodalLM + +[[autodoc]] AutoModelForMultimodalLM + ### AutoModelForTableQuestionAnswering [[autodoc]] AutoModelForTableQuestionAnswering diff --git a/docs/source/en/tasks/any_to_any.md b/docs/source/en/tasks/any_to_any.md new file mode 100644 index 000000000000..5a21c4422359 --- /dev/null +++ b/docs/source/en/tasks/any_to_any.md @@ -0,0 +1,134 @@ + + +# Multimodal Generation + +[[open-in-colab]] + +Multimodal (any-to-any) models are language models capable of processing diverse types of input data (e.g., text, images, audio, or video) and generating outputs in any of these modalities. Unlike traditional unimodal or fixed-modality models, they allow flexible combinations of input and output, enabling a single system to handle a wide range of tasks: from text-to-image generation to audio-to-text transcription, image captioning, video understanding, and so on. This task shares many similarities with image-text-to-text, but supports a wider range of input and output modalities. + +In this guide, we provide a brief overview of any-to-any models and show how to use them with Transformers for inference. Unlike Vision LLMs, which are typically limited to vision-and-language tasks, omni-modal models can accept any combination of modalities (e.g., text, images, audio, video) as input, and generate outputs in different modalities, such as text or images. + +Let’s begin by installing dependencies: + +```bash +pip install -q transformers accelerate flash_attn +``` + +Let's initialize the model and the processor. + +```python +from transformers import AutoProcessor, AutoModelForMultimodalLM, infer_device +import torch + +device = torch.device(infer_device()) +model = AutoModelForMultimodalLM.from_pretrained( + "Qwen/Qwen2.5-Omni-3B", + dtype=torch.bfloat16, + attn_implementation="flash_attention_2", +).to(device) + +processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-Omni-3B") +``` + +These models typically include a [chat template](./chat_templating) to structure conversations across modalities. Inputs can mix images, text, audio, or other supported formats in a single turn. Outputs may also vary (e.g., text generation or audio generation), depending on the configuration. + +Below is an example providing a "text + audio" input and requesting a text response. + +```python +messages = [ + { + "role": "user", + "content": [ + {"type": "audio", "url": "https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/f2641_0_throatclearing.wav"}, + {"type": "text", "text": "What do you hear in this audio?"}, + ] + }, +] +``` + +We will now call the processors' [`~ProcessorMixin.apply_chat_template`] method to preprocess its output along with the image inputs. + +```python +inputs = processor.apply_chat_template( + messages, + tokenize=True, + return_dict=True, + return_tensors="pt", + add_generation_prompt=True, +) +``` + +We can now pass the preprocessed inputs to the model. + +```python +with torch.no_grad(): + generated_ids = model.generate(**inputs, max_new_tokens=100) +generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True) +print(generated_texts) +``` + +## Pipeline + +The fastest way to get started is to use the [`Pipeline`] API. Specify the `"any-to-any"` task and the model you want to use. + +```python +from transformers import pipeline +pipe = pipeline("any-to-any", model="mistralai/Voxtral-Mini-3B-2507") +``` + +The example below uses chat templates to format the text inputs and uses audio modality as an multimodal data. + +```python +messages = [ + { + "role": "user", + "content": [ + { + "type": "audio", + "url": "https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/glass-breaking-151256.mp3", + }, + {"type": "text", "text": "What do you hear in this audio?"}, + ], + }, + ] +``` + +Pass the chat template formatted text and image to [`Pipeline`] and set `return_full_text=False` to remove the input from the generated output. + +```python +outputs = pipe(text=messages, max_new_tokens=20, return_full_text=False) +outputs[0]["generated_text"] +``` + +Any-to-any pipeline also supports generating audio or images with any-to-any models. For that you need to set `generation_mode` parameter. Do not forget to set video sampling to the desired FPS, otherwise the whole video will be loaded without sampling. Here is an example code: + +```python +import soundfile as sf +pipe = pipeline("any-to-any", model="Qwen/Qwen2.5-Omni-3B") +messages = [ + { + "role": "user", + "content": [ + {"type": "video", "path": "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Cooking_cake.mp4"}, + {"type": "text", "text": "Describe this video."}, + ], + }, +] +output = pipe(text=messages, fps=1, load_audio_from_video=True, max_new_tokens=20, generation_mode="audio") +sf.write("generated_audio.wav", out[0]["generated_audio"]) +``` + diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index ae0e0b67c874..de7f684e8add 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -130,6 +130,7 @@ "loss": [], "modelcard": ["ModelCard"], "pipelines": [ + "AnyToAnyPipeline", "AudioClassificationPipeline", "AutomaticSpeechRecognitionPipeline", "CsvPipelineDataFormat", @@ -636,6 +637,7 @@ from .optimization import get_wsd_schedule as get_wsd_schedule # Pipelines + from .pipelines import AnyToAnyPipeline as AnyToAnyPipeline from .pipelines import AudioClassificationPipeline as AudioClassificationPipeline from .pipelines import AutomaticSpeechRecognitionPipeline as AutomaticSpeechRecognitionPipeline from .pipelines import CsvPipelineDataFormat as CsvPipelineDataFormat diff --git a/src/transformers/feature_extraction_sequence_utils.py b/src/transformers/feature_extraction_sequence_utils.py index 1a48062cb5c1..c90b428acd97 100644 --- a/src/transformers/feature_extraction_sequence_utils.py +++ b/src/transformers/feature_extraction_sequence_utils.py @@ -19,6 +19,7 @@ import numpy as np +from .audio_utils import is_valid_audio, load_audio from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_torch_tensor, logging, to_numpy @@ -366,3 +367,19 @@ def _get_padding_strategies(self, padding=False, max_length=None): ) return padding_strategy + + def fetch_audio(self, audio_url_or_urls: Union[str, list[str], list[list[str]]]): + """ + Convert a single or a list of urls into the corresponding `np.ndarray` objects. + + If a single url is passed, the return value will be a single object. If a list is passed a list of objects is + returned. + """ + if isinstance(audio_url_or_urls, list): + return [self.fetch_audio(x) for x in audio_url_or_urls] + elif isinstance(audio_url_or_urls, str): + return load_audio(audio_url_or_urls) + elif is_valid_audio(audio_url_or_urls): + return audio_url_or_urls + else: + raise TypeError(f"only a single or a list of entries is supported but got type={type(audio_url_or_urls)}") diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index 09d3555774ae..b64f455178b7 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -367,7 +367,7 @@ class GenerationMixin(ContinuousMixin): """ # Should be overwritten by models that can generate non-text output - output_modalities = "text" + output_modalities = ("text",) def adjust_generation_fn( self, diff --git a/src/transformers/models/aimv2/modeling_aimv2.py b/src/transformers/models/aimv2/modeling_aimv2.py index 4c7e81ed59cc..2ce372c1040a 100644 --- a/src/transformers/models/aimv2/modeling_aimv2.py +++ b/src/transformers/models/aimv2/modeling_aimv2.py @@ -394,7 +394,7 @@ class Aimv2PreTrainedModel(PreTrainedModel): config: Aimv2Config base_model_prefix = "aimv2" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = [ "Aimv2EncoderLayer", diff --git a/src/transformers/models/aimv2/modular_aimv2.py b/src/transformers/models/aimv2/modular_aimv2.py index 4262efd5b45b..87d46fae9b34 100644 --- a/src/transformers/models/aimv2/modular_aimv2.py +++ b/src/transformers/models/aimv2/modular_aimv2.py @@ -437,7 +437,7 @@ class Aimv2PreTrainedModel(PreTrainedModel): config: Aimv2Config base_model_prefix = "aimv2" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = [ "Aimv2EncoderLayer", diff --git a/src/transformers/models/align/modeling_align.py b/src/transformers/models/align/modeling_align.py index 84ac48e7675c..d136a1001075 100644 --- a/src/transformers/models/align/modeling_align.py +++ b/src/transformers/models/align/modeling_align.py @@ -821,7 +821,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: class AlignPreTrainedModel(PreTrainedModel): config: AlignConfig base_model_prefix = "align" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True @torch.no_grad() @@ -853,7 +853,7 @@ def _init_weights(self, module: nn.Module): ) class AlignTextModel(AlignPreTrainedModel): config: AlignTextConfig - input_modalities = "text" + input_modalities = ("text",) _no_split_modules = ["AlignTextEmbeddings"] def __init__(self, config: AlignTextConfig, add_pooling_layer: bool = True): @@ -974,7 +974,7 @@ def forward( class AlignVisionModel(AlignPreTrainedModel): config: AlignVisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = False def __init__(self, config: AlignVisionConfig): diff --git a/src/transformers/models/altclip/modeling_altclip.py b/src/transformers/models/altclip/modeling_altclip.py index 8cb6bbb3ba2f..254a4fc294d5 100755 --- a/src/transformers/models/altclip/modeling_altclip.py +++ b/src/transformers/models/altclip/modeling_altclip.py @@ -767,7 +767,7 @@ def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=Fals class AltCLIPPreTrainedModel(PreTrainedModel): config: AltCLIPConfig base_model_prefix = "altclip" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_module = [] @@ -872,7 +872,7 @@ def forward( class AltCLIPVisionModel(AltCLIPPreTrainedModel): config: AltCLIPVisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: AltCLIPVisionConfig): super().__init__(config) @@ -1031,7 +1031,7 @@ def forward( class AltCLIPTextModel(AltCLIPPreTrainedModel): config: AltCLIPTextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config): super().__init__(config) diff --git a/src/transformers/models/aria/modeling_aria.py b/src/transformers/models/aria/modeling_aria.py index 96a6a82da91d..801b1ae989fa 100644 --- a/src/transformers/models/aria/modeling_aria.py +++ b/src/transformers/models/aria/modeling_aria.py @@ -573,7 +573,7 @@ def forward( class AriaTextPreTrainedModel(PreTrainedModel): config: AriaTextConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") _no_split_modules = ["AriaTextDecoderLayer", "AriaGroupedExpertsGemm"] supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/aria/modular_aria.py b/src/transformers/models/aria/modular_aria.py index 3e500f4bfc24..d2e15336832f 100644 --- a/src/transformers/models/aria/modular_aria.py +++ b/src/transformers/models/aria/modular_aria.py @@ -1184,7 +1184,7 @@ def __init__(self, config: AriaTextConfig, layer_idx: int): class AriaTextPreTrainedModel(PreTrainedModel): config: AriaTextConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") _no_split_modules = ["AriaTextDecoderLayer", "AriaGroupedExpertsGemm"] supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/audioflamingo3/modeling_audioflamingo3.py b/src/transformers/models/audioflamingo3/modeling_audioflamingo3.py index 50e10b702532..72f3356c1544 100644 --- a/src/transformers/models/audioflamingo3/modeling_audioflamingo3.py +++ b/src/transformers/models/audioflamingo3/modeling_audioflamingo3.py @@ -257,7 +257,7 @@ def forward( class AudioFlamingo3PreTrainedModel(PreTrainedModel): config: AudioFlamingo3Config base_model_prefix = "model" - input_modalities = ["audio", "text"] + input_modalities = ("audio", "text") supports_gradient_checkpointing = True _no_split_modules = ["AudioFlamingo3Attention"] _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 22985f413341..55b6a1147eca 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -1029,6 +1029,21 @@ class _BaseModelWithGenerate(PreTrainedModel, GenerationMixin): ] ) +# Models that accept text and optionally multimodal data in inputs +# and can generate text and optionally multimodal data. +MODEL_FOR_MULTIMODAL_LM_MAPPING_NAMES = OrderedDict( + [ + *list(MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES.items()), + ("granite_speech", "GraniteSpeechForConditionalGeneration"), + ("kyutai_speech_to_text", "KyutaiSpeechToTextForConditionalGeneration"), + ("phi4_multimodal", "Phi4MultimodalForCausalLM"), + ("qwen2_5_omni", "Qwen2_5OmniForConditionalGeneration"), + ("qwen2_audio", "Qwen2AudioForConditionalGeneration"), + ("qwen3_omni_moe", "Qwen3OmniMoeForConditionalGeneration"), + ("voxtral", "VoxtralForConditionalGeneration"), + ] +) + MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict( [ # Model for Masked LM mapping @@ -1782,6 +1797,7 @@ class _BaseModelWithGenerate(PreTrainedModel, GenerationMixin): MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES ) +MODEL_FOR_MULTIMODAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MULTIMODAL_LM_MAPPING_NAMES) MODEL_FOR_RETRIEVAL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_RETRIEVAL_MAPPING_NAMES) MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES @@ -2126,6 +2142,13 @@ def from_pretrained( AutoModelForImageTextToText = auto_class_update(AutoModelForImageTextToText, head_doc="image-text-to-text modeling") +class AutoModelForMultimodalLM(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_MULTIMODAL_LM_MAPPING + + +AutoModelForMultimodalLM = auto_class_update(AutoModelForMultimodalLM, head_doc="multimodal generation") + + class AutoModelForAudioClassification(_BaseAutoModelClass): _model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING @@ -2276,6 +2299,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): "MODEL_FOR_VISION_2_SEQ_MAPPING", "MODEL_FOR_RETRIEVAL_MAPPING", "MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING", + "MODEL_FOR_MULTIMODAL_LM_MAPPING", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING", "MODEL_MAPPING", "MODEL_WITH_LM_HEAD_MAPPING", @@ -2303,6 +2327,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): "AutoModelForMaskedImageModeling", "AutoModelForMaskedLM", "AutoModelForMultipleChoice", + "AutoModelForMultimodalLM", "AutoModelForNextSentencePrediction", "AutoModelForObjectDetection", "AutoModelForPreTraining", diff --git a/src/transformers/models/autoformer/modeling_autoformer.py b/src/transformers/models/autoformer/modeling_autoformer.py index c026c8a6d206..843ea4a358a1 100644 --- a/src/transformers/models/autoformer/modeling_autoformer.py +++ b/src/transformers/models/autoformer/modeling_autoformer.py @@ -823,7 +823,7 @@ def forward( class AutoformerPreTrainedModel(PreTrainedModel): config: AutoformerConfig base_model_prefix = "model" - input_modalities = "time" + input_modalities = ("time",) main_input_name = "past_values" supports_gradient_checkpointing = True diff --git a/src/transformers/models/aya_vision/modeling_aya_vision.py b/src/transformers/models/aya_vision/modeling_aya_vision.py index 742d7374aef2..0e21032c38c3 100644 --- a/src/transformers/models/aya_vision/modeling_aya_vision.py +++ b/src/transformers/models/aya_vision/modeling_aya_vision.py @@ -91,7 +91,7 @@ def pixel_shuffle(self, image_features): # B, S, D class AyaVisionPreTrainedModel(PreTrainedModel): config: AyaVisionConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/bark/modeling_bark.py b/src/transformers/models/bark/modeling_bark.py index fe3afdc7bbd2..ee0c5d6f81ad 100644 --- a/src/transformers/models/bark/modeling_bark.py +++ b/src/transformers/models/bark/modeling_bark.py @@ -353,7 +353,7 @@ def device(self) -> torch.device: # GPT2-like autoregressive model class BarkCausalModel(BarkPreTrainedModel, GenerationMixin): config: BarkSubModelConfig - output_modalities = "audio" + output_modalities = ("audio",) def __init__(self, config): super().__init__(config) diff --git a/src/transformers/models/beit/modeling_beit.py b/src/transformers/models/beit/modeling_beit.py index 76e20239648b..8698ad95913c 100755 --- a/src/transformers/models/beit/modeling_beit.py +++ b/src/transformers/models/beit/modeling_beit.py @@ -668,7 +668,7 @@ def forward( class BeitPreTrainedModel(PreTrainedModel): config: BeitConfig base_model_prefix = "beit" - input_modalities = "image" + input_modalities = ("image",) main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["BeitLayer"] diff --git a/src/transformers/models/bit/modeling_bit.py b/src/transformers/models/bit/modeling_bit.py index 61879e54b5a5..9074afe35a89 100644 --- a/src/transformers/models/bit/modeling_bit.py +++ b/src/transformers/models/bit/modeling_bit.py @@ -625,7 +625,7 @@ def forward( class BitPreTrainedModel(PreTrainedModel): config: BitConfig base_model_prefix = "bit" - input_modalities = "image" + input_modalities = ("image",) main_input_name = "pixel_values" _no_split_modules = ["BitEmbeddings"] diff --git a/src/transformers/models/blip/modeling_blip.py b/src/transformers/models/blip/modeling_blip.py index a1f06ae3a37e..e0268b73f692 100644 --- a/src/transformers/models/blip/modeling_blip.py +++ b/src/transformers/models/blip/modeling_blip.py @@ -415,7 +415,7 @@ def forward( class BlipPreTrainedModel(PreTrainedModel): config: BlipConfig base_model_prefix = "blip" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["BlipEncoderLayer", "BlipTextEmbeddings"] _skip_keys_device_placement = ["past_key_values"] @@ -466,7 +466,7 @@ def forward( class BlipVisionModel(BlipPreTrainedModel): main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) config: BlipVisionConfig _can_record_outputs = { "hidden_states": BlipEncoderLayer, diff --git a/src/transformers/models/blip_2/modeling_blip_2.py b/src/transformers/models/blip_2/modeling_blip_2.py index d6ee68a6680b..d38112f1fbf8 100644 --- a/src/transformers/models/blip_2/modeling_blip_2.py +++ b/src/transformers/models/blip_2/modeling_blip_2.py @@ -392,7 +392,7 @@ def forward( class Blip2PreTrainedModel(PreTrainedModel): config: Blip2Config base_model_prefix = "blip" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _supports_attention_backend = True _supports_flash_attn = True @@ -467,7 +467,7 @@ def forward( # Copied from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->Blip2, BLIP->BLIP_2 class Blip2VisionModel(Blip2PreTrainedModel): main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) config: Blip2VisionConfig _can_record_outputs = { "hidden_states": Blip2EncoderLayer, @@ -1441,7 +1441,7 @@ def forward( @auto_docstring class Blip2VisionModelWithProjection(Blip2PreTrainedModel): main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _keep_in_fp32_modules = ["query_tokens", "qformer"] _supports_flash_attn = False # because self.qformer does not support FA2 @@ -1904,7 +1904,7 @@ def generate( ) class Blip2ForImageTextRetrieval(Blip2PreTrainedModel): main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _keep_in_fp32_modules = ["query_tokens", "qformer"] _supports_flash_attn = False # because self.qformer does not support FA2 diff --git a/src/transformers/models/blt/modeling_blt.py b/src/transformers/models/blt/modeling_blt.py index d4b19101c861..03fb85630050 100644 --- a/src/transformers/models/blt/modeling_blt.py +++ b/src/transformers/models/blt/modeling_blt.py @@ -431,7 +431,7 @@ def forward( class BltPreTrainedModel(PreTrainedModel): config: BltConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["BltTransformerLayer"] _can_compile_fullgraph = False # static cache cannot have different shapes for each layer diff --git a/src/transformers/models/bridgetower/modeling_bridgetower.py b/src/transformers/models/bridgetower/modeling_bridgetower.py index 66de121e78c9..b83fbf482607 100644 --- a/src/transformers/models/bridgetower/modeling_bridgetower.py +++ b/src/transformers/models/bridgetower/modeling_bridgetower.py @@ -915,7 +915,7 @@ def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_l class BridgeTowerPreTrainedModel(PreTrainedModel): config: BridgeTowerConfig base_model_prefix = "bridgetower" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = False _no_split_modules = ["BridgeTowerSelfAttention", "BridgeTowerResidualAttention"] _skip_keys_device_placement = "past_key_values" @@ -950,7 +950,7 @@ def _init_weights(self, module: nn.Module): class BridgeTowerVisionModel(BridgeTowerPreTrainedModel): config: BridgeTowerVisionConfig - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config): super().__init__(config) @@ -980,7 +980,7 @@ def forward(self, image, image_mask=None, interpolate_pos_encoding=False): ) class BridgeTowerTextModel(BridgeTowerPreTrainedModel): config: BridgeTowerTextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config, add_pooling_layer=True): r""" diff --git a/src/transformers/models/chameleon/modeling_chameleon.py b/src/transformers/models/chameleon/modeling_chameleon.py index 1bf2179deec6..2e816ad63b4d 100644 --- a/src/transformers/models/chameleon/modeling_chameleon.py +++ b/src/transformers/models/chameleon/modeling_chameleon.py @@ -772,7 +772,7 @@ def convert_img2bpe(self, img_batch: torch.Tensor) -> torch.Tensor: class ChameleonPreTrainedModel(PreTrainedModel): config: ChameleonConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["ChameleonDecoderLayer", "ChameleonSwinDecoderLayer"] _skip_keys_device_placement = ["past_key_values", "causal_mask"] diff --git a/src/transformers/models/chinese_clip/modeling_chinese_clip.py b/src/transformers/models/chinese_clip/modeling_chinese_clip.py index d1d00eda88ad..957615c923ec 100644 --- a/src/transformers/models/chinese_clip/modeling_chinese_clip.py +++ b/src/transformers/models/chinese_clip/modeling_chinese_clip.py @@ -560,7 +560,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: class ChineseCLIPPreTrainedModel(PreTrainedModel): config: ChineseCLIPConfig base_model_prefix = "chinese_clip" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True @torch.no_grad() @@ -798,7 +798,7 @@ class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel): """ config: ChineseCLIPTextConfig - input_modalities = "text" + input_modalities = ("text",) _no_split_modules = ["ChineseCLIPTextEmbeddings"] def __init__(self, config, add_pooling_layer=True): @@ -906,7 +906,7 @@ def forward( class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel): config: ChineseCLIPVisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["ChineseCLIPVisionEmbeddings", "ChineseCLIPVisionAttention"] def __init__(self, config: ChineseCLIPVisionConfig): diff --git a/src/transformers/models/clap/modeling_clap.py b/src/transformers/models/clap/modeling_clap.py index 583ac01290b8..1421bdb83c9d 100644 --- a/src/transformers/models/clap/modeling_clap.py +++ b/src/transformers/models/clap/modeling_clap.py @@ -1306,7 +1306,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: class ClapPreTrainedModel(PreTrainedModel): config: ClapConfig base_model_prefix = "clap" - input_modalities = ["audio", "text"] + input_modalities = ("audio", "text") supports_gradient_checkpointing = False @torch.no_grad() @@ -1410,7 +1410,7 @@ def forward( ) class ClapTextModel(ClapPreTrainedModel): config: ClapTextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config, add_pooling_layer=True): r""" @@ -1715,7 +1715,7 @@ def forward( @auto_docstring class ClapTextModelWithProjection(ClapPreTrainedModel): config: ClapTextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: ClapTextConfig): super().__init__(config) diff --git a/src/transformers/models/clip/modeling_clip.py b/src/transformers/models/clip/modeling_clip.py index 49bab19b971f..05109a841cfa 100644 --- a/src/transformers/models/clip/modeling_clip.py +++ b/src/transformers/models/clip/modeling_clip.py @@ -398,7 +398,7 @@ def forward( class CLIPPreTrainedModel(PreTrainedModel): config: CLIPConfig base_model_prefix = "clip" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _supports_sdpa = True _supports_flash_attn = True @@ -597,7 +597,7 @@ def forward( ) class CLIPTextModel(CLIPPreTrainedModel): config: CLIPTextConfig - input_modalities = "text" + input_modalities = ("text",) _no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"] @@ -693,7 +693,7 @@ def forward( class CLIPVisionModel(CLIPPreTrainedModel): config: CLIPVisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["CLIPEncoderLayer"] def __init__(self, config: CLIPVisionConfig): @@ -942,7 +942,7 @@ def forward( @auto_docstring class CLIPTextModelWithProjection(CLIPPreTrainedModel): config: CLIPTextConfig - input_modalities = "text" + input_modalities = ("text",) _no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"] @@ -1008,7 +1008,7 @@ def forward( class CLIPVisionModelWithProjection(CLIPPreTrainedModel): config: CLIPVisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: CLIPVisionConfig): super().__init__(config) @@ -1075,7 +1075,7 @@ def forward( ) class CLIPForImageClassification(CLIPPreTrainedModel): main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: CLIPConfig) -> None: super().__init__(config) diff --git a/src/transformers/models/clipseg/modeling_clipseg.py b/src/transformers/models/clipseg/modeling_clipseg.py index bba971644a23..af7f0324142a 100644 --- a/src/transformers/models/clipseg/modeling_clipseg.py +++ b/src/transformers/models/clipseg/modeling_clipseg.py @@ -425,7 +425,7 @@ def forward( class CLIPSegPreTrainedModel(PreTrainedModel): config: CLIPSegConfig base_model_prefix = "clip" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True @torch.no_grad() @@ -651,7 +651,7 @@ def forward( class CLIPSegTextModel(CLIPSegPreTrainedModel): config: CLIPSegTextConfig - input_modalities = "text" + input_modalities = ("text",) _no_split_modules = ["CLIPSegTextEmbeddings", "CLIPSegEncoderLayer"] @@ -757,7 +757,7 @@ def forward( class CLIPSegVisionModel(CLIPSegPreTrainedModel): config: CLIPSegVisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: CLIPSegVisionConfig): super().__init__(config) diff --git a/src/transformers/models/cohere2_vision/modeling_cohere2_vision.py b/src/transformers/models/cohere2_vision/modeling_cohere2_vision.py index 2ebfa7f044cd..45ebf5af4516 100644 --- a/src/transformers/models/cohere2_vision/modeling_cohere2_vision.py +++ b/src/transformers/models/cohere2_vision/modeling_cohere2_vision.py @@ -130,7 +130,7 @@ class Cohere2VisionCausalLMOutputWithPast(ModelOutput): class Cohere2VisionPreTrainedModel(PreTrainedModel): config: Cohere2VisionConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/colpali/modeling_colpali.py b/src/transformers/models/colpali/modeling_colpali.py index 75e35c4126c0..c31195f249a1 100644 --- a/src/transformers/models/colpali/modeling_colpali.py +++ b/src/transformers/models/colpali/modeling_colpali.py @@ -33,7 +33,7 @@ class ColPaliPreTrainedModel(PreTrainedModel): config: ColPaliConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") _no_split_modules = [] _supports_sdpa = True _supports_flash_attn = True diff --git a/src/transformers/models/colqwen2/modeling_colqwen2.py b/src/transformers/models/colqwen2/modeling_colqwen2.py index 3d0ca8d3a84b..383cde0b8cb9 100644 --- a/src/transformers/models/colqwen2/modeling_colqwen2.py +++ b/src/transformers/models/colqwen2/modeling_colqwen2.py @@ -41,7 +41,7 @@ class ColQwen2PreTrainedModel(PreTrainedModel): config: ColQwen2Config base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") _no_split_modules = [] _supports_sdpa = True _supports_flash_attn = True diff --git a/src/transformers/models/conditional_detr/modeling_conditional_detr.py b/src/transformers/models/conditional_detr/modeling_conditional_detr.py index 3721678dd688..0b0336505db7 100644 --- a/src/transformers/models/conditional_detr/modeling_conditional_detr.py +++ b/src/transformers/models/conditional_detr/modeling_conditional_detr.py @@ -968,7 +968,7 @@ class ConditionalDetrPreTrainedModel(PreTrainedModel): config: ConditionalDetrConfig base_model_prefix = "model" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = [r"ConditionalDetrConvEncoder", r"ConditionalDetrEncoderLayer", r"ConditionalDetrDecoderLayer"] @torch.no_grad() diff --git a/src/transformers/models/convnext/modeling_convnext.py b/src/transformers/models/convnext/modeling_convnext.py index 851bee0060e1..8d1188dea3b8 100755 --- a/src/transformers/models/convnext/modeling_convnext.py +++ b/src/transformers/models/convnext/modeling_convnext.py @@ -237,7 +237,7 @@ class ConvNextPreTrainedModel(PreTrainedModel): config: ConvNextConfig base_model_prefix = "convnext" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["ConvNextLayer"] _can_record_outputs = {} # hidden states are collected explicitly diff --git a/src/transformers/models/convnextv2/modeling_convnextv2.py b/src/transformers/models/convnextv2/modeling_convnextv2.py index 02e780aa70aa..b7774fcd4d11 100644 --- a/src/transformers/models/convnextv2/modeling_convnextv2.py +++ b/src/transformers/models/convnextv2/modeling_convnextv2.py @@ -258,7 +258,7 @@ class ConvNextV2PreTrainedModel(PreTrainedModel): config: ConvNextV2Config base_model_prefix = "convnextv2" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["ConvNextV2Layer"] @torch.no_grad() diff --git a/src/transformers/models/csm/modeling_csm.py b/src/transformers/models/csm/modeling_csm.py index 87da76281717..2ba3394b4416 100644 --- a/src/transformers/models/csm/modeling_csm.py +++ b/src/transformers/models/csm/modeling_csm.py @@ -395,7 +395,7 @@ def forward( class CsmPreTrainedModel(PreTrainedModel): config: CsmConfig base_model_prefix = "model" - input_modalities = ["audio", "text"] + input_modalities = ("audio", "text") supports_gradient_checkpointing = True _no_split_modules = ["CsmDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] diff --git a/src/transformers/models/csm/modular_csm.py b/src/transformers/models/csm/modular_csm.py index 0ecb41071cd2..cbff1111587b 100644 --- a/src/transformers/models/csm/modular_csm.py +++ b/src/transformers/models/csm/modular_csm.py @@ -126,7 +126,7 @@ class CsmDecoderLayer(LlamaDecoderLayer): class CsmPreTrainedModel(PreTrainedModel): config: CsmConfig base_model_prefix = "model" - input_modalities = ["audio", "text"] + input_modalities = ("audio", "text") supports_gradient_checkpointing = True _no_split_modules = ["CsmDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] diff --git a/src/transformers/models/d_fine/modeling_d_fine.py b/src/transformers/models/d_fine/modeling_d_fine.py index 86953cd47ecf..9c1f0d25de3e 100644 --- a/src/transformers/models/d_fine/modeling_d_fine.py +++ b/src/transformers/models/d_fine/modeling_d_fine.py @@ -441,7 +441,7 @@ class DFinePreTrainedModel(PreTrainedModel): config: DFineConfig base_model_prefix = "d_fine" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = [r"DFineHybridEncoder", r"DFineDecoderLayer"] @torch.no_grad() diff --git a/src/transformers/models/dab_detr/modeling_dab_detr.py b/src/transformers/models/dab_detr/modeling_dab_detr.py index a1337acefd7a..f2ac2916d757 100644 --- a/src/transformers/models/dab_detr/modeling_dab_detr.py +++ b/src/transformers/models/dab_detr/modeling_dab_detr.py @@ -813,7 +813,7 @@ class DabDetrPreTrainedModel(PreTrainedModel): config: DabDetrConfig base_model_prefix = "model" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = [r"DabDetrConvEncoder", r"DabDetrEncoderLayer", r"DabDetrDecoderLayer"] @torch.no_grad() diff --git a/src/transformers/models/data2vec/modeling_data2vec_vision.py b/src/transformers/models/data2vec/modeling_data2vec_vision.py index d21dfd325e4b..16d6bb4ff685 100644 --- a/src/transformers/models/data2vec/modeling_data2vec_vision.py +++ b/src/transformers/models/data2vec/modeling_data2vec_vision.py @@ -682,7 +682,7 @@ def forward( class Data2VecVisionPreTrainedModel(PreTrainedModel): config: Data2VecVisionConfig base_model_prefix = "data2vec_vision" - input_modalities = "image" + input_modalities = ("image",) main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["Data2VecVisionLayer"] diff --git a/src/transformers/models/deepseek_vl/modeling_deepseek_vl.py b/src/transformers/models/deepseek_vl/modeling_deepseek_vl.py index 98f1462231af..f568477303c0 100644 --- a/src/transformers/models/deepseek_vl/modeling_deepseek_vl.py +++ b/src/transformers/models/deepseek_vl/modeling_deepseek_vl.py @@ -122,7 +122,7 @@ def forward(self, vision_encodings: torch.Tensor) -> torch.Tensor: class DeepseekVLPreTrainedModel(PreTrainedModel): config: DeepseekVLConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["LlamaDecoderLayer"] _skip_keys_device_placement = ["past_key_values", "causal_mask"] @@ -235,7 +235,7 @@ def forward( class DeepseekVLForConditionalGeneration(DeepseekVLPreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"} - output_modalities = "text" + output_modalities = ("text",) _can_compile_fullgraph = True def __init__(self, config: DeepseekVLConfig): diff --git a/src/transformers/models/deepseek_vl/modular_deepseek_vl.py b/src/transformers/models/deepseek_vl/modular_deepseek_vl.py index 18f2b65a669e..f251b393f089 100644 --- a/src/transformers/models/deepseek_vl/modular_deepseek_vl.py +++ b/src/transformers/models/deepseek_vl/modular_deepseek_vl.py @@ -157,7 +157,7 @@ def __init__(self, config): class DeepseekVLForConditionalGeneration(JanusForConditionalGeneration): - output_modalities = "text" + output_modalities = ("text",) def prepare_embeddings_for_image_generation(self): raise AttributeError("Not needed for DeepseekVL") diff --git a/src/transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py b/src/transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py index 7eab9ea059f4..c0204fe94382 100644 --- a/src/transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py +++ b/src/transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py @@ -205,7 +205,7 @@ def forward( class DeepseekVLHybridPreTrainedModel(PreTrainedModel): config: DeepseekVLHybridConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["LlamaDecoderLayer"] _skip_keys_device_placement = ["past_key_values", "causal_mask"] @@ -390,7 +390,7 @@ def get_high_res_image_features(self, pixel_values): class DeepseekVLHybridForConditionalGeneration(DeepseekVLHybridPreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"} - output_modalities = "text" + output_modalities = ("text",) _can_compile_fullgraph = True def __init__(self, config: DeepseekVLHybridConfig): diff --git a/src/transformers/models/deformable_detr/modeling_deformable_detr.py b/src/transformers/models/deformable_detr/modeling_deformable_detr.py index a8133c85b573..68cb0456ddac 100755 --- a/src/transformers/models/deformable_detr/modeling_deformable_detr.py +++ b/src/transformers/models/deformable_detr/modeling_deformable_detr.py @@ -919,7 +919,7 @@ class DeformableDetrPreTrainedModel(PreTrainedModel): config: DeformableDetrConfig base_model_prefix = "model" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = [ r"DeformableDetrConvEncoder", diff --git a/src/transformers/models/deit/modeling_deit.py b/src/transformers/models/deit/modeling_deit.py index 62a787e11507..7f7cffa4f786 100644 --- a/src/transformers/models/deit/modeling_deit.py +++ b/src/transformers/models/deit/modeling_deit.py @@ -355,7 +355,7 @@ class DeiTPreTrainedModel(PreTrainedModel): config: DeiTConfig base_model_prefix = "deit" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["DeiTLayer"] _supports_sdpa = True diff --git a/src/transformers/models/depth_anything/modeling_depth_anything.py b/src/transformers/models/depth_anything/modeling_depth_anything.py index 158179e04184..e13499b24fbd 100644 --- a/src/transformers/models/depth_anything/modeling_depth_anything.py +++ b/src/transformers/models/depth_anything/modeling_depth_anything.py @@ -213,7 +213,7 @@ class DepthAnythingPreTrainedModel(PreTrainedModel): config: DepthAnythingConfig base_model_prefix = "depth_anything" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True diff --git a/src/transformers/models/depth_pro/modeling_depth_pro.py b/src/transformers/models/depth_pro/modeling_depth_pro.py index 23e96f838c4d..75b8c213e1f0 100644 --- a/src/transformers/models/depth_pro/modeling_depth_pro.py +++ b/src/transformers/models/depth_pro/modeling_depth_pro.py @@ -603,7 +603,7 @@ class DepthProPreTrainedModel(PreTrainedModel): config: DepthProConfig base_model_prefix = "depth_pro" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _supports_sdpa = True _no_split_modules = ["DepthProPreActResidualLayer"] diff --git a/src/transformers/models/detr/modeling_detr.py b/src/transformers/models/detr/modeling_detr.py index 1727c9b33700..7f5aa1a458e6 100644 --- a/src/transformers/models/detr/modeling_detr.py +++ b/src/transformers/models/detr/modeling_detr.py @@ -725,7 +725,7 @@ class DetrPreTrainedModel(PreTrainedModel): config: DetrConfig base_model_prefix = "model" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = [r"DetrConvEncoder", r"DetrEncoderLayer", r"DetrDecoderLayer"] @torch.no_grad() diff --git a/src/transformers/models/dia/modeling_dia.py b/src/transformers/models/dia/modeling_dia.py index 3a0ddf6e3f90..c2e96fba9fb3 100644 --- a/src/transformers/models/dia/modeling_dia.py +++ b/src/transformers/models/dia/modeling_dia.py @@ -805,7 +805,7 @@ def forward( ) class DiaForConditionalGeneration(DiaPreTrainedModel, DiaGenerationMixin): base_model_prefix = "model" - output_modalities = "audio" + output_modalities = ("audio",) def __init__(self, config: DiaConfig): super().__init__(config) diff --git a/src/transformers/models/dia/modular_dia.py b/src/transformers/models/dia/modular_dia.py index 3c6a6b3d17cb..b15ba8b0dbb3 100644 --- a/src/transformers/models/dia/modular_dia.py +++ b/src/transformers/models/dia/modular_dia.py @@ -597,7 +597,7 @@ def forward( ) class DiaForConditionalGeneration(DiaPreTrainedModel, DiaGenerationMixin): base_model_prefix = "model" - output_modalities = "audio" + output_modalities = ("audio",) def __init__(self, config: DiaConfig): super().__init__(config) diff --git a/src/transformers/models/dinat/modeling_dinat.py b/src/transformers/models/dinat/modeling_dinat.py index d2b111226608..c9133497f5bc 100644 --- a/src/transformers/models/dinat/modeling_dinat.py +++ b/src/transformers/models/dinat/modeling_dinat.py @@ -559,7 +559,7 @@ class DinatPreTrainedModel(PreTrainedModel): config: DinatConfig base_model_prefix = "dinat" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) @auto_docstring diff --git a/src/transformers/models/dinov2/modeling_dinov2.py b/src/transformers/models/dinov2/modeling_dinov2.py index 1ce9ff9a63cf..be5c46d77615 100644 --- a/src/transformers/models/dinov2/modeling_dinov2.py +++ b/src/transformers/models/dinov2/modeling_dinov2.py @@ -404,7 +404,7 @@ class Dinov2PreTrainedModel(PreTrainedModel): config: Dinov2Config base_model_prefix = "dinov2" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["Dinov2Layer"] _supports_sdpa = True diff --git a/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py b/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py index 5feb0e7b5b44..5049969f437b 100644 --- a/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py +++ b/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py @@ -421,7 +421,7 @@ class Dinov2WithRegistersPreTrainedModel(PreTrainedModel): config: Dinov2WithRegistersConfig base_model_prefix = "dinov2_with_registers" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["Dinov2WithRegistersLayer"] _supports_sdpa = True diff --git a/src/transformers/models/dinov3_convnext/modeling_dinov3_convnext.py b/src/transformers/models/dinov3_convnext/modeling_dinov3_convnext.py index 3ed4f38217dd..49bc6fc17e4d 100644 --- a/src/transformers/models/dinov3_convnext/modeling_dinov3_convnext.py +++ b/src/transformers/models/dinov3_convnext/modeling_dinov3_convnext.py @@ -189,7 +189,7 @@ class DINOv3ConvNextPreTrainedModel(PreTrainedModel): config: DINOv3ConvNextConfig base_model_prefix = "dinov3_convnext" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["DINOv3ConvNextLayer"] @torch.no_grad() diff --git a/src/transformers/models/dinov3_vit/modeling_dinov3_vit.py b/src/transformers/models/dinov3_vit/modeling_dinov3_vit.py index 09edeed17543..473590315431 100644 --- a/src/transformers/models/dinov3_vit/modeling_dinov3_vit.py +++ b/src/transformers/models/dinov3_vit/modeling_dinov3_vit.py @@ -437,7 +437,7 @@ class DINOv3ViTPreTrainedModel(PreTrainedModel): config: DINOv3ViTConfig base_model_prefix = "dinov3_vit" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["DINOv3ViTLayer"] _supports_sdpa = True diff --git a/src/transformers/models/donut/modeling_donut_swin.py b/src/transformers/models/donut/modeling_donut_swin.py index 7e8a6ae5d90f..9886488c0542 100644 --- a/src/transformers/models/donut/modeling_donut_swin.py +++ b/src/transformers/models/donut/modeling_donut_swin.py @@ -786,7 +786,7 @@ class DonutSwinPreTrainedModel(PreTrainedModel): config: DonutSwinConfig base_model_prefix = "donut" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["DonutSwinStage"] diff --git a/src/transformers/models/dpt/modeling_dpt.py b/src/transformers/models/dpt/modeling_dpt.py index e5a3d7367c58..fb871c90d1e3 100755 --- a/src/transformers/models/dpt/modeling_dpt.py +++ b/src/transformers/models/dpt/modeling_dpt.py @@ -723,7 +723,7 @@ class DPTPreTrainedModel(PreTrainedModel): config: DPTConfig base_model_prefix = "dpt" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _supports_sdpa = True _supports_flash_attn = True diff --git a/src/transformers/models/edgetam/modeling_edgetam.py b/src/transformers/models/edgetam/modeling_edgetam.py index 59979abbcf69..11c2e130ce6e 100644 --- a/src/transformers/models/edgetam/modeling_edgetam.py +++ b/src/transformers/models/edgetam/modeling_edgetam.py @@ -304,7 +304,7 @@ class EdgeTamPreTrainedModel(PreTrainedModel): config_class = EdgeTamConfig base_model_prefix = "edgetam" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _supports_sdpa = True _supports_flash_attn_2 = True _supports_attention_backend = True @@ -911,7 +911,7 @@ def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores): """ ) class EdgeTamModel(EdgeTamPreTrainedModel): - input_modalities = ["image", "text"] + input_modalities = ("image", "text") _can_record_outputs = {"mask_decoder_attentions": OutputRecorder(EdgeTamTwoWayAttentionBlock, index=2)} _keys_to_ignore_on_load_unexpected = [ r"^memory_.*", diff --git a/src/transformers/models/edgetam_video/modeling_edgetam_video.py b/src/transformers/models/edgetam_video/modeling_edgetam_video.py index 2d8f3e133598..756a14397d0d 100644 --- a/src/transformers/models/edgetam_video/modeling_edgetam_video.py +++ b/src/transformers/models/edgetam_video/modeling_edgetam_video.py @@ -1973,7 +1973,7 @@ def get_1d_sine_pe(pos_inds, dim, temperature=10000): @auto_docstring class EdgeTamVideoModel(EdgeTamVideoPreTrainedModel): - input_modalities = ["video", "text"] + input_modalities = ("video", "text") _can_record_outputs = {"mask_decoder_attentions": OutputRecorder(EdgeTamVideoTwoWayAttentionBlock, index=2)} _keys_to_ignore_on_load_unexpected = [] _tied_weights_keys = { diff --git a/src/transformers/models/efficientloftr/modeling_efficientloftr.py b/src/transformers/models/efficientloftr/modeling_efficientloftr.py index ab467db4fd92..bcf0da0da2af 100644 --- a/src/transformers/models/efficientloftr/modeling_efficientloftr.py +++ b/src/transformers/models/efficientloftr/modeling_efficientloftr.py @@ -668,7 +668,7 @@ class EfficientLoFTRPreTrainedModel(PreTrainedModel): config_class = EfficientLoFTRConfig base_model_prefix = "efficientloftr" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True diff --git a/src/transformers/models/efficientnet/modeling_efficientnet.py b/src/transformers/models/efficientnet/modeling_efficientnet.py index b4635b3fcadb..dc535f052208 100644 --- a/src/transformers/models/efficientnet/modeling_efficientnet.py +++ b/src/transformers/models/efficientnet/modeling_efficientnet.py @@ -434,7 +434,7 @@ class EfficientNetPreTrainedModel(PreTrainedModel): config: EfficientNetConfig base_model_prefix = "efficientnet" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = [] @torch.no_grad() diff --git a/src/transformers/models/emu3/modeling_emu3.py b/src/transformers/models/emu3/modeling_emu3.py index 65671913b27f..2a3e06b0fd6c 100644 --- a/src/transformers/models/emu3/modeling_emu3.py +++ b/src/transformers/models/emu3/modeling_emu3.py @@ -927,7 +927,7 @@ class Emu3VQVAE(PreTrainedModel): config: Emu3VQVAEConfig base_model_prefix = "emuvideovq" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True @@ -1095,7 +1095,7 @@ def convert_bpe2img(self, img_batch: torch.Tensor) -> torch.Tensor: class Emu3PreTrainedModel(PreTrainedModel): config: Emu3Config base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = [ "Emu3DecoderLayer", @@ -1484,7 +1484,7 @@ def forward( class Emu3ForConditionalGeneration(Emu3PreTrainedModel, GenerationMixin): - output_modalities = ["image", "text"] + output_modalities = ("image", "text") _tied_weights_keys = {"lm_head.weight": "model.text_model.embed_tokens.weight"} _checkpoint_conversion_mapping = { "^text_model.model": "model.text_model", diff --git a/src/transformers/models/emu3/modular_emu3.py b/src/transformers/models/emu3/modular_emu3.py index 9e1e2ba4f5f0..bd6b3013c644 100644 --- a/src/transformers/models/emu3/modular_emu3.py +++ b/src/transformers/models/emu3/modular_emu3.py @@ -677,7 +677,7 @@ class Emu3VQVAE(PreTrainedModel): config: Emu3VQVAEConfig base_model_prefix = "emuvideovq" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True @@ -1038,7 +1038,7 @@ def forward( class Emu3ForConditionalGeneration(Emu3PreTrainedModel, GenerationMixin): - output_modalities = ["image", "text"] + output_modalities = ("image", "text") _tied_weights_keys = {"lm_head.weight": "model.text_model.embed_tokens.weight"} _checkpoint_conversion_mapping = { "^text_model.model": "model.text_model", diff --git a/src/transformers/models/emu3/processing_emu3.py b/src/transformers/models/emu3/processing_emu3.py index 52f39a913c54..568190295d53 100644 --- a/src/transformers/models/emu3/processing_emu3.py +++ b/src/transformers/models/emu3/processing_emu3.py @@ -230,5 +230,40 @@ def calculate_generate_size(self, ratio, image_area, spatial_factor): def postprocess(self, images: ImageInput, **kwargs): return self.image_processor.postprocess(images, **kwargs) + def post_process_multimodal_output( + self, generated_outputs, skip_special_tokens=True, generation_mode=None, **kwargs + ): + """ + Post-process the output of a multimodal model to return the requested modality output. + If the model cannot generated the requested modality, an error will be raised. + + Args: + generated_outputs (`torch.Tensor` or `np.ndarray`): + The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` + or `(sequence_length,)`. + skip_special_tokens (`bool`, *optional*, defaults to `True`): + Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. + generation_mode (`str`, *optional*): + Generation mode indicated which modality to output and can be one of `["text", "image", "audio"]`. + **kwargs: + Additional arguments to be passed to the tokenizer's `batch_decode method`. + + Returns: + `list[Union[str, PIL.Image.Image]]`: The decoded text or generated image. + """ + if generation_mode is None or generation_mode == "text": + return self.post_process_image_text_to_text( + generated_outputs, skip_special_tokens=skip_special_tokens, **kwargs + ) + + elif generation_mode == "image": + images = self.postprocess(generated_outputs, return_tensors="PIL.Image.Image") + return images["pixel_values"] + + else: + raise ValueError( + f"{self.__class__.__name__} got an unexpected generation_mode={generation_mode}. Supported options are only `text` and `image" + ) + __all__ = ["Emu3Processor"] diff --git a/src/transformers/models/eomt/modeling_eomt.py b/src/transformers/models/eomt/modeling_eomt.py index 344342b470ba..bf0bf12ed7c4 100644 --- a/src/transformers/models/eomt/modeling_eomt.py +++ b/src/transformers/models/eomt/modeling_eomt.py @@ -988,7 +988,7 @@ class EomtPreTrainedModel(PreTrainedModel): config: EomtConfig base_model_prefix = "eomt" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = False _no_split_modules = ["EomtLayer"] _supports_sdpa = True diff --git a/src/transformers/models/eomt/modular_eomt.py b/src/transformers/models/eomt/modular_eomt.py index c22571acbb97..2d45abe42d93 100644 --- a/src/transformers/models/eomt/modular_eomt.py +++ b/src/transformers/models/eomt/modular_eomt.py @@ -393,7 +393,7 @@ class EomtPreTrainedModel(PreTrainedModel): config: EomtConfig base_model_prefix = "eomt" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = False _no_split_modules = ["EomtLayer"] _supports_sdpa = True diff --git a/src/transformers/models/flava/modeling_flava.py b/src/transformers/models/flava/modeling_flava.py index d5f263137b3d..c6ffb49febd0 100644 --- a/src/transformers/models/flava/modeling_flava.py +++ b/src/transformers/models/flava/modeling_flava.py @@ -663,7 +663,7 @@ def forward(self, hidden_states: torch.Tensor): class FlavaPreTrainedModel(PreTrainedModel): config: FlavaConfig base_model_prefix = "flava" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True @torch.no_grad() @@ -690,7 +690,7 @@ class FlavaImageModel(FlavaPreTrainedModel): # This override allows us to load FlavaImageModel from FlavaModel/FlavaForPreTraining checkpoints. base_model_prefix = "flava.image_model" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: FlavaImageConfig, add_pooling_layer: bool = True): r""" @@ -770,7 +770,7 @@ class FlavaTextModel(FlavaPreTrainedModel): config: FlavaTextConfig # This override allows us to load FlavaTextModel from FlavaModel/FlavaForPreTraining checkpoints. base_model_prefix = "flava.text_model" - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: FlavaTextConfig, add_pooling_layer: bool = True): r""" @@ -1301,7 +1301,7 @@ class FlavaImageCodebook(FlavaPreTrainedModel): base_model_prefix = "model" config: FlavaImageCodebookConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = False def __init__( diff --git a/src/transformers/models/florence2/modeling_florence2.py b/src/transformers/models/florence2/modeling_florence2.py index 9e761c0a8c20..9212cd5d8f5f 100644 --- a/src/transformers/models/florence2/modeling_florence2.py +++ b/src/transformers/models/florence2/modeling_florence2.py @@ -485,7 +485,7 @@ def forward(self, hidden_states: torch.Tensor): class Florence2VisionPreTrainedModel(PreTrainedModel): config_class = Florence2VisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True @@ -616,7 +616,7 @@ class Florence2Seq2SeqLMOutput(Seq2SeqLMOutput): class Florence2PreTrainedModel(PreTrainedModel): config: Florence2Config base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/florence2/modular_florence2.py b/src/transformers/models/florence2/modular_florence2.py index 86bd02bda077..74ace79943e7 100644 --- a/src/transformers/models/florence2/modular_florence2.py +++ b/src/transformers/models/florence2/modular_florence2.py @@ -1366,7 +1366,7 @@ def forward(self, hidden_states: torch.Tensor): class Florence2VisionPreTrainedModel(PreTrainedModel): config_class = Florence2VisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True diff --git a/src/transformers/models/fuyu/modeling_fuyu.py b/src/transformers/models/fuyu/modeling_fuyu.py index c4983e007ba7..2bd58b0e6f38 100644 --- a/src/transformers/models/fuyu/modeling_fuyu.py +++ b/src/transformers/models/fuyu/modeling_fuyu.py @@ -35,7 +35,7 @@ class FuyuPreTrainedModel(PreTrainedModel): config: FuyuConfig base_model_prefix = "fuyu" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _supports_attention_backend = True _supports_flash_attn = True diff --git a/src/transformers/models/gemma3/modeling_gemma3.py b/src/transformers/models/gemma3/modeling_gemma3.py index 8e93ef9231b5..97a881c5edc8 100644 --- a/src/transformers/models/gemma3/modeling_gemma3.py +++ b/src/transformers/models/gemma3/modeling_gemma3.py @@ -465,7 +465,7 @@ class Gemma3PreTrainedModel(PreTrainedModel): "hidden_states": Gemma3DecoderLayer, "attentions": Gemma3Attention, } - input_modalities = ["image", "text"] + input_modalities = ("image", "text") @torch.no_grad() def _init_weights(self, module): @@ -493,7 +493,7 @@ def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool: @auto_docstring class Gemma3TextModel(Gemma3PreTrainedModel): config: Gemma3TextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: Gemma3TextConfig): super().__init__(config) @@ -1348,7 +1348,7 @@ class Gemma3TextForSequenceClassification(GenericForSequenceClassification, Gemm """ config: Gemma3TextConfig - input_modalities = "text" + input_modalities = ("text",) __all__ = [ diff --git a/src/transformers/models/gemma3/modular_gemma3.py b/src/transformers/models/gemma3/modular_gemma3.py index 7f9e987035ad..31f25550df03 100644 --- a/src/transformers/models/gemma3/modular_gemma3.py +++ b/src/transformers/models/gemma3/modular_gemma3.py @@ -562,7 +562,7 @@ def forward( class Gemma3PreTrainedModel(Gemma2PreTrainedModel): base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") _no_split_modules = [ "Gemma3DecoderLayer", "SiglipVisionEmbeddings", @@ -595,7 +595,7 @@ def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool: class Gemma3TextModel(Gemma2Model): config: Gemma3TextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: Gemma3TextConfig): super().__init__(config) @@ -1192,7 +1192,7 @@ class Gemma3TextForSequenceClassification(GenericForSequenceClassification, Gemm """ config: Gemma3TextConfig - input_modalities = "text" + input_modalities = ("text",) __all__ = [ diff --git a/src/transformers/models/gemma3n/modeling_gemma3n.py b/src/transformers/models/gemma3n/modeling_gemma3n.py index 3be6ad39ddca..7d1fbb9623b6 100644 --- a/src/transformers/models/gemma3n/modeling_gemma3n.py +++ b/src/transformers/models/gemma3n/modeling_gemma3n.py @@ -1600,7 +1600,7 @@ class Gemma3nPreTrainedModel(PreTrainedModel): "hidden_states": Gemma3nDecoderLayer, "attentions": Gemma3nAttention, } - input_modalities = ["image", "text", "audio"] + input_modalities = ("image", "text", "audio") @torch.no_grad() def _init_weights(self, module): @@ -1697,7 +1697,7 @@ def forward(self, x, position_ids, layer_type=None): @auto_docstring(custom_intro="The base Gemma 3n language model without a language modeling head.") class Gemma3nTextModel(Gemma3nPreTrainedModel): config: Gemma3nTextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: Gemma3nTextConfig): super().__init__(config) diff --git a/src/transformers/models/gemma3n/modular_gemma3n.py b/src/transformers/models/gemma3n/modular_gemma3n.py index a226824781cf..cc79bf00cdc5 100644 --- a/src/transformers/models/gemma3n/modular_gemma3n.py +++ b/src/transformers/models/gemma3n/modular_gemma3n.py @@ -1873,7 +1873,7 @@ def forward( class Gemma3nPreTrainedModel(Gemma2PreTrainedModel): config: Gemma3nConfig - input_modalities = ["image", "text", "audio"] + input_modalities = ("image", "text", "audio") _no_split_modules = ["Gemma3nTextDecoderLayer"] @torch.no_grad() diff --git a/src/transformers/models/git/modeling_git.py b/src/transformers/models/git/modeling_git.py index 66f11594e27c..682134b09c65 100644 --- a/src/transformers/models/git/modeling_git.py +++ b/src/transformers/models/git/modeling_git.py @@ -386,7 +386,7 @@ def forward( class GitPreTrainedModel(PreTrainedModel): config: GitConfig base_model_prefix = "git" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True @torch.no_grad() @@ -807,7 +807,7 @@ def forward( class GitVisionModel(GitPreTrainedModel): config: GitVisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) # Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.__init__ with CLIP->Git def __init__(self, config: GitVisionConfig): diff --git a/src/transformers/models/glm46v/modeling_glm46v.py b/src/transformers/models/glm46v/modeling_glm46v.py index e3da448e79fd..5950d5d7a2ab 100644 --- a/src/transformers/models/glm46v/modeling_glm46v.py +++ b/src/transformers/models/glm46v/modeling_glm46v.py @@ -40,7 +40,7 @@ class Glm46VPreTrainedModel(PreTrainedModel): config: Glm46VConfig base_model_prefix = "model" - input_modalities = ["image", "video", "text"] + input_modalities = ("image", "video", "text") supports_gradient_checkpointing = True _no_split_modules = None _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/glm4v/modeling_glm4v.py b/src/transformers/models/glm4v/modeling_glm4v.py index 2589e016c756..c843774c5242 100644 --- a/src/transformers/models/glm4v/modeling_glm4v.py +++ b/src/transformers/models/glm4v/modeling_glm4v.py @@ -691,7 +691,7 @@ class Glm4vModelOutputWithPast(ModelOutput): class Glm4vPreTrainedModel(PreTrainedModel): config: Glm4vConfig base_model_prefix = "model" - input_modalities = ["image", "video", "text"] + input_modalities = ("image", "video", "text") supports_gradient_checkpointing = True _no_split_modules = ["Glm4vTextDecoderLayer", "Glm4vVisionBlock"] _skip_keys_device_placement = "past_key_values" @@ -708,7 +708,7 @@ class Glm4vPreTrainedModel(PreTrainedModel): class Glm4vVisionModel(Glm4vPreTrainedModel): config: Glm4vVisionConfig - input_modalities = ["image", "video"] + input_modalities = ("image", "video") _no_split_modules = ["Glm4vVisionBlock"] def __init__(self, config) -> None: @@ -820,7 +820,7 @@ def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor) -> torch. @auto_docstring class Glm4vTextModel(Glm4vPreTrainedModel): config: Glm4vTextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: Glm4vTextConfig): super().__init__(config) diff --git a/src/transformers/models/glm4v/modular_glm4v.py b/src/transformers/models/glm4v/modular_glm4v.py index 41419951df90..5db661e318ff 100644 --- a/src/transformers/models/glm4v/modular_glm4v.py +++ b/src/transformers/models/glm4v/modular_glm4v.py @@ -733,7 +733,7 @@ class Glm4vPreTrainedModel(Qwen2_5_VLPreTrainedModel): class Glm4vVisionModel(Glm4vPreTrainedModel): config: Glm4vVisionConfig - input_modalities = ["image", "video"] + input_modalities = ("image", "video") _no_split_modules = ["Glm4vVisionBlock"] def __init__(self, config) -> None: diff --git a/src/transformers/models/glm4v_moe/modeling_glm4v_moe.py b/src/transformers/models/glm4v_moe/modeling_glm4v_moe.py index 9537d9018838..fbb167c762be 100644 --- a/src/transformers/models/glm4v_moe/modeling_glm4v_moe.py +++ b/src/transformers/models/glm4v_moe/modeling_glm4v_moe.py @@ -552,7 +552,7 @@ class Glm4vMoePreTrainedModel(PreTrainedModel): "attentions": Glm4vMoeTextAttention, "router_logits": OutputRecorder(nn.Linear, layer_name="mlp.gate", index=0), } - input_modalities = ["text", "image", "video"] + input_modalities = ("text", "image", "video") @torch.no_grad() def _init_weights(self, module): @@ -873,7 +873,7 @@ def forward( @auto_docstring class Glm4vMoeVisionModel(Glm4vMoePreTrainedModel): config: Glm4vMoeVisionConfig - input_modalities = ["image", "video"] + input_modalities = ("image", "video") _no_split_modules = ["Glm4vMoeVisionBlock"] def __init__(self, config) -> None: @@ -985,7 +985,7 @@ def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor) -> torch. @auto_docstring class Glm4vMoeTextModel(Glm4vMoePreTrainedModel): config: Glm4vMoeTextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: Glm4vMoeTextConfig): super().__init__(config) diff --git a/src/transformers/models/glm4v_moe/modular_glm4v_moe.py b/src/transformers/models/glm4v_moe/modular_glm4v_moe.py index 2e8e8ac39f63..c94ad0a9a8f6 100644 --- a/src/transformers/models/glm4v_moe/modular_glm4v_moe.py +++ b/src/transformers/models/glm4v_moe/modular_glm4v_moe.py @@ -476,7 +476,7 @@ def __init__(self, config: Glm4vMoeTextConfig, layer_idx: int): class Glm4vMoePreTrainedModel(Glm4MoePreTrainedModel): config: Glm4vMoeConfig base_model_prefix = "model" - input_modalities = ["text", "image", "video"] + input_modalities = ("text", "image", "video") _no_split_modules = ["Glm4vMoeTextDecoderLayer", "Glm4vMoeVisionBlock"] _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/glpn/modeling_glpn.py b/src/transformers/models/glpn/modeling_glpn.py index 42059a73bc4e..cea58e797ef1 100755 --- a/src/transformers/models/glpn/modeling_glpn.py +++ b/src/transformers/models/glpn/modeling_glpn.py @@ -386,7 +386,7 @@ class GLPNPreTrainedModel(PreTrainedModel): config: GLPNConfig base_model_prefix = "glpn" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = [] diff --git a/src/transformers/models/got_ocr2/modeling_got_ocr2.py b/src/transformers/models/got_ocr2/modeling_got_ocr2.py index 3fd5a2c2dbfb..a05bb0e25985 100644 --- a/src/transformers/models/got_ocr2/modeling_got_ocr2.py +++ b/src/transformers/models/got_ocr2/modeling_got_ocr2.py @@ -278,7 +278,7 @@ def forward(self, hidden_states: torch.Tensor) -> tuple[torch.FloatTensor]: class GotOcr2PreTrainedModel(PreTrainedModel): config: GotOcr2Config base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values" _supports_flash_attn = False @@ -402,7 +402,7 @@ def forward(self, hidden_states): class GotOcr2VisionEncoder(GotOcr2PreTrainedModel): _can_record_outputs = {"hidden_states": GotOcr2VisionLayer, "attentions": GotOcr2VisionAttention} - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: GotOcr2VisionConfig): super().__init__(config) diff --git a/src/transformers/models/got_ocr2/modular_got_ocr2.py b/src/transformers/models/got_ocr2/modular_got_ocr2.py index 36f55a80583d..e4fa64918bf6 100644 --- a/src/transformers/models/got_ocr2/modular_got_ocr2.py +++ b/src/transformers/models/got_ocr2/modular_got_ocr2.py @@ -249,11 +249,11 @@ def __init__(self, config, window_size): class GotOcr2PreTrainedModel(SamPreTrainedModel): - input_modalities = ["image", "text"] + input_modalities = ("image", "text") class GotOcr2VisionEncoder(SamVisionEncoder, GotOcr2PreTrainedModel): - input_modalities = "image" + input_modalities = ("image",) class GotOcr2MultiModalProjector(nn.Module): diff --git a/src/transformers/models/granite_speech/modeling_granite_speech.py b/src/transformers/models/granite_speech/modeling_granite_speech.py index 7aad1523def9..b1c124931d46 100644 --- a/src/transformers/models/granite_speech/modeling_granite_speech.py +++ b/src/transformers/models/granite_speech/modeling_granite_speech.py @@ -282,7 +282,7 @@ def forward(self, hidden_states: torch.Tensor): @auto_docstring class GraniteSpeechPreTrainedModel(PreTrainedModel): config: GraniteSpeechConfig - input_modalities = ["audio", "text"] + input_modalities = ("audio", "text") _supports_flash_attn = False # `blip_2_qformer` dependency does not allow for this _supports_sdpa = True diff --git a/src/transformers/models/grounding_dino/modeling_grounding_dino.py b/src/transformers/models/grounding_dino/modeling_grounding_dino.py index d1e8ea3e9fb8..8a0acf97ec5c 100644 --- a/src/transformers/models/grounding_dino/modeling_grounding_dino.py +++ b/src/transformers/models/grounding_dino/modeling_grounding_dino.py @@ -1368,7 +1368,7 @@ class GroundingDinoPreTrainedModel(PreTrainedModel): config: GroundingDinoConfig base_model_prefix = "model" main_input_name = "pixel_values" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") @torch.no_grad() def _init_weights(self, module): diff --git a/src/transformers/models/groupvit/modeling_groupvit.py b/src/transformers/models/groupvit/modeling_groupvit.py index 7e0877456e60..dfb43d94bd49 100644 --- a/src/transformers/models/groupvit/modeling_groupvit.py +++ b/src/transformers/models/groupvit/modeling_groupvit.py @@ -746,7 +746,7 @@ def forward( class GroupViTPreTrainedModel(PreTrainedModel): config: GroupViTConfig base_model_prefix = "groupvit" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True @torch.no_grad() @@ -1022,7 +1022,7 @@ def forward( class GroupViTTextModel(GroupViTPreTrainedModel): config: GroupViTTextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: GroupViTTextConfig): super().__init__(config) @@ -1127,7 +1127,7 @@ def forward( class GroupViTVisionModel(GroupViTPreTrainedModel): config: GroupViTVisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: GroupViTVisionConfig): super().__init__(config) diff --git a/src/transformers/models/hgnet_v2/modeling_hgnet_v2.py b/src/transformers/models/hgnet_v2/modeling_hgnet_v2.py index 2b412c8fa1dd..9b93243a5077 100644 --- a/src/transformers/models/hgnet_v2/modeling_hgnet_v2.py +++ b/src/transformers/models/hgnet_v2/modeling_hgnet_v2.py @@ -42,7 +42,7 @@ class HGNetV2PreTrainedModel(PreTrainedModel): config: HGNetV2Config base_model_prefix = "hgnetv2" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["HGNetV2BasicLayer"] diff --git a/src/transformers/models/hgnet_v2/modular_hgnet_v2.py b/src/transformers/models/hgnet_v2/modular_hgnet_v2.py index d07e3008da03..f152072635c8 100644 --- a/src/transformers/models/hgnet_v2/modular_hgnet_v2.py +++ b/src/transformers/models/hgnet_v2/modular_hgnet_v2.py @@ -167,7 +167,7 @@ class HGNetV2PreTrainedModel(PreTrainedModel): config: HGNetV2Config base_model_prefix = "hgnetv2" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["HGNetV2BasicLayer"] diff --git a/src/transformers/models/hiera/modeling_hiera.py b/src/transformers/models/hiera/modeling_hiera.py index 54a6931a6431..c0318128f17c 100644 --- a/src/transformers/models/hiera/modeling_hiera.py +++ b/src/transformers/models/hiera/modeling_hiera.py @@ -774,7 +774,7 @@ class HieraPreTrainedModel(PreTrainedModel): config: HieraConfig base_model_prefix = "hiera" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True @torch.no_grad() diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index fee52071b4c0..38e87cf17f52 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -818,7 +818,7 @@ def forward( class IdeficsPreTrainedModel(PreTrainedModel): config: IdeficsConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"] _supports_sdpa = True diff --git a/src/transformers/models/idefics2/modeling_idefics2.py b/src/transformers/models/idefics2/modeling_idefics2.py index dcfa6d9cd23b..214dcef45081 100644 --- a/src/transformers/models/idefics2/modeling_idefics2.py +++ b/src/transformers/models/idefics2/modeling_idefics2.py @@ -408,7 +408,7 @@ def forward( class Idefics2PreTrainedModel(PreTrainedModel): config: Idefics2Config base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["Idefics2VisionAttention", "Idefics2MLP", "Idefics2PerceiverLayer", "Idefics2DecoderLayer"] _skip_keys_device_placement = "past_key_values" @@ -434,7 +434,7 @@ def _init_weights(self, module): ) class Idefics2VisionTransformer(Idefics2PreTrainedModel): config: Idefics2VisionConfig - input_modalities = "image" + input_modalities = ("image",) _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True @@ -691,7 +691,7 @@ def forward( ) class Idefics2PerceiverResampler(Idefics2PreTrainedModel): config: Idefics2PerceiverConfig - input_modalities = "image" + input_modalities = ("image",) _supports_sdpa = True _supports_flash_attention_2 = True _supports_flex_attn = True diff --git a/src/transformers/models/idefics3/modeling_idefics3.py b/src/transformers/models/idefics3/modeling_idefics3.py index 38d6f29c3f04..996f5573115c 100644 --- a/src/transformers/models/idefics3/modeling_idefics3.py +++ b/src/transformers/models/idefics3/modeling_idefics3.py @@ -423,7 +423,7 @@ def forward(self, image_hidden_states): class Idefics3PreTrainedModel(PreTrainedModel): config: Idefics3Config base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["Idefics3VisionAttention", "Idefics3DecoderLayer"] _skip_keys_device_placement = "past_key_values" @@ -440,7 +440,7 @@ class Idefics3PreTrainedModel(PreTrainedModel): ) class Idefics3VisionTransformer(Idefics3PreTrainedModel): config: Idefics3VisionConfig - input_modalities = "image" + input_modalities = ("image",) _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True diff --git a/src/transformers/models/ijepa/modeling_ijepa.py b/src/transformers/models/ijepa/modeling_ijepa.py index c7f2fd39f824..709268e940a2 100644 --- a/src/transformers/models/ijepa/modeling_ijepa.py +++ b/src/transformers/models/ijepa/modeling_ijepa.py @@ -313,7 +313,7 @@ class IJepaPreTrainedModel(PreTrainedModel): config: IJepaConfig base_model_prefix = "ijepa" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["IJepaEmbeddings", "IJepaLayer"] _supports_sdpa = True diff --git a/src/transformers/models/imagegpt/modeling_imagegpt.py b/src/transformers/models/imagegpt/modeling_imagegpt.py index c5effca80166..5bb8e9d6fc30 100755 --- a/src/transformers/models/imagegpt/modeling_imagegpt.py +++ b/src/transformers/models/imagegpt/modeling_imagegpt.py @@ -363,7 +363,7 @@ class ImageGPTPreTrainedModel(PreTrainedModel): config: ImageGPTConfig base_model_prefix = "transformer" main_input_name = "input_ids" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["ImageGPTBlock"] diff --git a/src/transformers/models/informer/modeling_informer.py b/src/transformers/models/informer/modeling_informer.py index 7e3bc1d577bf..9849f333051f 100644 --- a/src/transformers/models/informer/modeling_informer.py +++ b/src/transformers/models/informer/modeling_informer.py @@ -248,7 +248,7 @@ class InformerPreTrainedModel(PreTrainedModel): config: InformerConfig base_model_prefix = "model" main_input_name = "past_values" - input_modalities = "time" + input_modalities = ("time",) supports_gradient_checkpointing = True @torch.no_grad() diff --git a/src/transformers/models/informer/modular_informer.py b/src/transformers/models/informer/modular_informer.py index 7c5a4e85f392..8df8afbaaa00 100644 --- a/src/transformers/models/informer/modular_informer.py +++ b/src/transformers/models/informer/modular_informer.py @@ -84,7 +84,7 @@ class InformerPreTrainedModel(PreTrainedModel): config: InformerConfig base_model_prefix = "model" main_input_name = "past_values" - input_modalities = "time" + input_modalities = ("time",) supports_gradient_checkpointing = True @torch.no_grad() diff --git a/src/transformers/models/instructblip/modeling_instructblip.py b/src/transformers/models/instructblip/modeling_instructblip.py index 5e4a2a7e864b..32f5d3cf968f 100644 --- a/src/transformers/models/instructblip/modeling_instructblip.py +++ b/src/transformers/models/instructblip/modeling_instructblip.py @@ -309,7 +309,7 @@ def forward( class InstructBlipPreTrainedModel(PreTrainedModel): config: InstructBlipConfig base_model_prefix = "blip" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _supports_attention_backend = True _supports_flash_attn = True @@ -373,7 +373,7 @@ def forward( # Copied from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->InstructBlip, BLIP->INSTRUCTBLIP class InstructBlipVisionModel(InstructBlipPreTrainedModel): main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) config: InstructBlipVisionConfig _can_record_outputs = { "hidden_states": InstructBlipEncoderLayer, diff --git a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py index 2268ba28893b..2c49be5599d2 100644 --- a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py +++ b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py @@ -132,7 +132,7 @@ def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: boo class InstructBlipVideoPreTrainedModel(PreTrainedModel): config: InstructBlipVideoConfig base_model_prefix = "blip" - input_modalities = ["video", "text"] + input_modalities = ("video", "text") supports_gradient_checkpointing = True _supports_attention_backend = True _supports_flash_attn = True diff --git a/src/transformers/models/instructblipvideo/modular_instructblipvideo.py b/src/transformers/models/instructblipvideo/modular_instructblipvideo.py index fe265f4e8aab..591cff7e692b 100644 --- a/src/transformers/models/instructblipvideo/modular_instructblipvideo.py +++ b/src/transformers/models/instructblipvideo/modular_instructblipvideo.py @@ -160,7 +160,7 @@ def __init__( class InstructBlipVideoPreTrainedModel(InstructBlipPreTrainedModel): - input_modalities = ["video", "text"] + input_modalities = ("video", "text") class InstructBlipVideoVisionModel(InstructBlipVisionModel): diff --git a/src/transformers/models/internvl/modeling_internvl.py b/src/transformers/models/internvl/modeling_internvl.py index 51691e0ba4ab..e2ed5fa9b375 100644 --- a/src/transformers/models/internvl/modeling_internvl.py +++ b/src/transformers/models/internvl/modeling_internvl.py @@ -399,7 +399,7 @@ class InternVLVisionPreTrainedModel(PreTrainedModel): config: InternVLVisionConfig base_model_prefix = "internvl_vision" main_input_name = "pixel_values" - input_modalities = ["image", "video"] + input_modalities = ("image", "video") supports_gradient_checkpointing = True _no_split_modules = ["InternVLVisionLayer"] _supports_sdpa = True @@ -474,7 +474,7 @@ def forward( class InternVLPreTrainedModel(PreTrainedModel): config: InternVLConfig base_model_prefix = "model" - input_modalities = ["image", "text", "video"] + input_modalities = ("image", "text", "video") supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/internvl/modular_internvl.py b/src/transformers/models/internvl/modular_internvl.py index f276a49ec5ce..d7a75e5d1677 100644 --- a/src/transformers/models/internvl/modular_internvl.py +++ b/src/transformers/models/internvl/modular_internvl.py @@ -356,7 +356,7 @@ class InternVLVisionPreTrainedModel(PreTrainedModel): config: InternVLVisionConfig base_model_prefix = "internvl_vision" main_input_name = "pixel_values" - input_modalities = ["image", "video"] + input_modalities = ("image", "video") supports_gradient_checkpointing = True _no_split_modules = ["InternVLVisionLayer"] _supports_sdpa = True @@ -428,7 +428,7 @@ def forward( class InternVLPreTrainedModel(LlavaPreTrainedModel): - input_modalities = ["image", "text", "video"] + input_modalities = ("image", "text", "video") INTERNVL_INPUTS_DOCSTRING = None diff --git a/src/transformers/models/janus/modeling_janus.py b/src/transformers/models/janus/modeling_janus.py index a5a930453567..8bf106074dda 100644 --- a/src/transformers/models/janus/modeling_janus.py +++ b/src/transformers/models/janus/modeling_janus.py @@ -49,7 +49,7 @@ class JanusPreTrainedModel(PreTrainedModel): config: JanusConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["LlamaDecoderLayer", "JanusVisionEncoderLayer"] _skip_keys_device_placement = ["past_key_values", "causal_mask"] @@ -545,7 +545,7 @@ def forward( @auto_docstring class JanusVisionModel(JanusPreTrainedModel): main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) config: JanusVisionConfig _can_record_outputs = { "hidden_states": JanusEncoderLayer, @@ -1164,7 +1164,7 @@ def forward( class JanusForConditionalGeneration(JanusPreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"} - output_modalities = ["image", "text"] + output_modalities = ("image", "text") _can_compile_fullgraph = True def __init__(self, config: JanusConfig): diff --git a/src/transformers/models/janus/modular_janus.py b/src/transformers/models/janus/modular_janus.py index 4791a8b14951..4aa52f274687 100644 --- a/src/transformers/models/janus/modular_janus.py +++ b/src/transformers/models/janus/modular_janus.py @@ -382,7 +382,7 @@ def __init__( class JanusPreTrainedModel(PreTrainedModel): config: JanusConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["LlamaDecoderLayer", "JanusVisionEncoderLayer"] _skip_keys_device_placement = ["past_key_values", "causal_mask"] @@ -980,7 +980,7 @@ def forward( class JanusForConditionalGeneration(JanusPreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"} - output_modalities = ["image", "text"] + output_modalities = ("image", "text") _can_compile_fullgraph = True def __init__(self, config: JanusConfig): diff --git a/src/transformers/models/janus/processing_janus.py b/src/transformers/models/janus/processing_janus.py index 354570314a78..26752f699e93 100644 --- a/src/transformers/models/janus/processing_janus.py +++ b/src/transformers/models/janus/processing_janus.py @@ -152,5 +152,41 @@ def postprocess(self, images: ImageInput, **kwargs): """ return self.image_processor.postprocess(images, **kwargs) + def post_process_multimodal_output( + self, generated_outputs, skip_special_tokens=True, generation_mode=None, **kwargs + ): + """ + Post-process the output of a multimodal model to return the requested modality output. + If the model cannot generated the requested modality, an error will be raised. + + Args: + generated_outputs (`torch.Tensor` or `np.ndarray`): + The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` + or `(sequence_length,)`. + skip_special_tokens (`bool`, *optional*, defaults to `True`): + Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. + generation_mode (`str`, *optional*): + Generation mode indicated which modality to output and can be one of `["text", "image", "audio"]`. + **kwargs: + Additional arguments to be passed to the tokenizer's `batch_decode method`. + + Returns: + `list[Union[str, PIL.Image.Image]]`: The decoded text or generated image. + """ + if generation_mode is None or generation_mode == "text": + return self.post_process_image_text_to_text( + generated_outputs, skip_special_tokens=skip_special_tokens, **kwargs + ) + + elif generation_mode == "image": + generated_outputs = list(generated_outputs.float()) + images = self.postprocess(generated_outputs, return_tensors="PIL.Image.Image") + return images["pixel_values"] + + else: + raise ValueError( + f"{self.__class__.__name__} got an unexpected generation_mode={generation_mode}. Supported options are only `text` and `image" + ) + __all__ = ["JanusProcessor"] diff --git a/src/transformers/models/kosmos2/modeling_kosmos2.py b/src/transformers/models/kosmos2/modeling_kosmos2.py index 3d2c64865201..e5c62aa7e1c1 100644 --- a/src/transformers/models/kosmos2/modeling_kosmos2.py +++ b/src/transformers/models/kosmos2/modeling_kosmos2.py @@ -1114,7 +1114,7 @@ def forward( @auto_docstring class Kosmos2PreTrainedModel(PreTrainedModel): config: Kosmos2Config - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["Kosmos2VisionEncoderLayer", "Kosmos2TextBlock"] _supports_attention_backend = True @@ -1178,7 +1178,7 @@ def _init_weights(self, module: nn.Module): class Kosmos2VisionModel(Kosmos2PreTrainedModel): config: Kosmos2VisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) # Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.__init__ with CLIP_VISION->KOSMOS2_VISION,CLIP->Kosmos2,self.vision_model->self.model def __init__(self, config: Kosmos2VisionConfig): @@ -1211,7 +1211,7 @@ def forward( class Kosmos2TextModel(Kosmos2PreTrainedModel): config: Kosmos2TextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: Kosmos2TextConfig): super().__init__(config) diff --git a/src/transformers/models/kosmos2_5/modeling_kosmos2_5.py b/src/transformers/models/kosmos2_5/modeling_kosmos2_5.py index a9a456d68d15..bad3cfa30530 100644 --- a/src/transformers/models/kosmos2_5/modeling_kosmos2_5.py +++ b/src/transformers/models/kosmos2_5/modeling_kosmos2_5.py @@ -1220,7 +1220,7 @@ class Kosmos2_5PreTrainedModel(PreTrainedModel): """ config_class = Kosmos2_5Config - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["Kosmos2_5VisionLayer", "Kosmos2_5TextBlock"] _supports_flash_attn_2 = True @@ -1257,7 +1257,7 @@ def _init_weights(self, module): class Kosmos2_5VisionModel(Kosmos2_5PreTrainedModel): config_class = Kosmos2_5VisionConfig - input_modalities = "text" + input_modalities = ("text",) # Copied from transformers.models.pix2struct.modeling_pix2struct.Pix2StructVisionModel.__init__ with Pix2Struct->Kosmos2_5 def __init__(self, config: Kosmos2_5VisionConfig): @@ -1319,7 +1319,7 @@ def forward( # Adapted from transformers.models.kosmos2.modeling_kosmos2.Kosmos2TextModel with KOSMOS2->KOSMOS2_5 class Kosmos2_5TextModel(Kosmos2_5PreTrainedModel): config_class = Kosmos2_5TextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: Kosmos2_5TextConfig): super().__init__(config) @@ -1505,7 +1505,7 @@ def forward( ) class Kosmos2_5TextForCausalLM(Kosmos2_5PreTrainedModel): config_class = Kosmos2_5TextConfig - input_modalities = "text" + input_modalities = ("text",) _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"} def __init__(self, config: Kosmos2_5TextConfig): diff --git a/src/transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py b/src/transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py index 1e95b92d528d..c0170f45ac43 100644 --- a/src/transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py +++ b/src/transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py @@ -98,7 +98,7 @@ def forward(self, x, layer_idx=None): class KyutaiSpeechToTextPreTrainedModel(PreTrainedModel): config: KyutaiSpeechToTextConfig base_model_prefix = "model" - input_modalities = ["audio", "text"] + input_modalities = ("audio", "text") supports_gradient_checkpointing = True _no_split_modules = ["KyutaiSpeechToTextDecoderLayer", "MimiTransformerLayer"] _supports_flash_attn = True @@ -1073,7 +1073,7 @@ class KyutaiSpeechToTextForConditionalGeneration(KyutaiSpeechToTextPreTrainedMod _tp_plan = {"lm_head": "colwise_rep"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} _keep_in_fp32_modules_strict = ["codec_model"] - output_modalities = ["audio", "text"] + output_modalities = ("audio", "text") def __init__(self, config): super().__init__(config) diff --git a/src/transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py b/src/transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py index 5b2b40900102..790482c33727 100644 --- a/src/transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py +++ b/src/transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py @@ -252,7 +252,7 @@ def __init__(self, config): class KyutaiSpeechToTextForConditionalGeneration(LlamaForCausalLM, GenerationMixin): _keep_in_fp32_modules_strict = ["codec_model"] - output_modalities = ["audio", "text"] + output_modalities = ("audio", "text") def __init__(self, config): super().__init__(config) diff --git a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py index c98b84ddd39c..d88c109f6161 100755 --- a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py @@ -457,7 +457,7 @@ def forward( class LayoutLMv2PreTrainedModel(PreTrainedModel): config: LayoutLMv2Config base_model_prefix = "layoutlmv2" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") @torch.no_grad() def _init_weights(self, module): diff --git a/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py b/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py index bb020f1614ab..681406ecd615 100644 --- a/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py @@ -202,7 +202,7 @@ def forward( class LayoutLMv3PreTrainedModel(PreTrainedModel): config: LayoutLMv3Config base_model_prefix = "layoutlmv3" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") @torch.no_grad() def _init_weights(self, module): diff --git a/src/transformers/models/levit/modeling_levit.py b/src/transformers/models/levit/modeling_levit.py index 250070b44b8f..2919728b7355 100644 --- a/src/transformers/models/levit/modeling_levit.py +++ b/src/transformers/models/levit/modeling_levit.py @@ -469,7 +469,7 @@ class LevitPreTrainedModel(PreTrainedModel): config: LevitConfig base_model_prefix = "levit" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["LevitResidualLayer"] diff --git a/src/transformers/models/lfm2_vl/modeling_lfm2_vl.py b/src/transformers/models/lfm2_vl/modeling_lfm2_vl.py index ce46c62baeab..4ef98a251bfa 100755 --- a/src/transformers/models/lfm2_vl/modeling_lfm2_vl.py +++ b/src/transformers/models/lfm2_vl/modeling_lfm2_vl.py @@ -77,7 +77,7 @@ def pixel_unshuffle(self, hidden_states: torch.Tensor): class Lfm2VlPreTrainedModel(PreTrainedModel): config: Lfm2VlConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/lightglue/modeling_lightglue.py b/src/transformers/models/lightglue/modeling_lightglue.py index 0d5044c5a40a..75504bc4dd57 100644 --- a/src/transformers/models/lightglue/modeling_lightglue.py +++ b/src/transformers/models/lightglue/modeling_lightglue.py @@ -423,7 +423,7 @@ class LightGluePreTrainedModel(PreTrainedModel): config: LightGlueConfig base_model_prefix = "lightglue" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = False _supports_flash_attn = True _supports_sdpa = True diff --git a/src/transformers/models/lightglue/modular_lightglue.py b/src/transformers/models/lightglue/modular_lightglue.py index f61e86a67e0d..7db4adc110cd 100644 --- a/src/transformers/models/lightglue/modular_lightglue.py +++ b/src/transformers/models/lightglue/modular_lightglue.py @@ -481,7 +481,7 @@ class LightGluePreTrainedModel(PreTrainedModel): config: LightGlueConfig base_model_prefix = "lightglue" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = False _supports_flash_attn = True _supports_sdpa = True diff --git a/src/transformers/models/llama4/modeling_llama4.py b/src/transformers/models/llama4/modeling_llama4.py index 231e04c8eba2..98082e7a45c6 100644 --- a/src/transformers/models/llama4/modeling_llama4.py +++ b/src/transformers/models/llama4/modeling_llama4.py @@ -463,7 +463,7 @@ def forward( @auto_docstring class Llama4PreTrainedModel(PreTrainedModel): config: Llama4Config - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = False @@ -493,7 +493,7 @@ def _init_weights(self, module): class Llama4TextModel(Llama4PreTrainedModel): _no_split_modules = ["Llama4TextDecoderLayer"] base_model_prefix = "model" - input_modalities = "text" + input_modalities = ("text",) config: Llama4TextConfig _can_record_outputs = { "attentions": Llama4TextAttention, @@ -1030,7 +1030,7 @@ def forward(self, hidden_states): class Llama4VisionModel(Llama4PreTrainedModel): base_model_prefix = "vision_model" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["Llama4VisionEncoderLayer"] config: Llama4VisionConfig diff --git a/src/transformers/models/llava/modeling_llava.py b/src/transformers/models/llava/modeling_llava.py index 2947295a4775..df80dd6716d2 100644 --- a/src/transformers/models/llava/modeling_llava.py +++ b/src/transformers/models/llava/modeling_llava.py @@ -111,7 +111,7 @@ def forward(self, image_features): class LlavaPreTrainedModel(PreTrainedModel): config: LlavaConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/llava_next/modeling_llava_next.py b/src/transformers/models/llava_next/modeling_llava_next.py index fffd56a941c5..cf0ebf1ce869 100644 --- a/src/transformers/models/llava_next/modeling_llava_next.py +++ b/src/transformers/models/llava_next/modeling_llava_next.py @@ -224,7 +224,7 @@ def forward(self, image_features): class LlavaNextPreTrainedModel(PreTrainedModel): config: LlavaNextConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["LlamaDecoderLayer"] _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/llava_next_video/modeling_llava_next_video.py b/src/transformers/models/llava_next_video/modeling_llava_next_video.py index 6e79d602eb94..eed31b38096d 100644 --- a/src/transformers/models/llava_next_video/modeling_llava_next_video.py +++ b/src/transformers/models/llava_next_video/modeling_llava_next_video.py @@ -165,7 +165,7 @@ def forward(self, image_features): class LlavaNextVideoPreTrainedModel(PreTrainedModel): config: LlavaNextVideoConfig base_model_prefix = "model" - input_modalities = ["image", "video", "text"] + input_modalities = ("image", "video", "text") supports_gradient_checkpointing = True _no_split_modules = ["LlamaDecoderLayer"] _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/llava_next_video/modular_llava_next_video.py b/src/transformers/models/llava_next_video/modular_llava_next_video.py index 92a3f51f8a71..4fec99df3b30 100644 --- a/src/transformers/models/llava_next_video/modular_llava_next_video.py +++ b/src/transformers/models/llava_next_video/modular_llava_next_video.py @@ -260,7 +260,7 @@ class LlavaNextVideoMultiModalProjector(LlavaNextMultiModalProjector): class LlavaNextVideoPreTrainedModel(LlavaNextPreTrainedModel): - input_modalities = ["image", "video", "text"] + input_modalities = ("image", "video", "text") class LlavaNextVideoModel(LlavaNextModel): diff --git a/src/transformers/models/llava_onevision/modeling_llava_onevision.py b/src/transformers/models/llava_onevision/modeling_llava_onevision.py index 70d17ff3e6d4..260177a63796 100644 --- a/src/transformers/models/llava_onevision/modeling_llava_onevision.py +++ b/src/transformers/models/llava_onevision/modeling_llava_onevision.py @@ -106,7 +106,7 @@ class LlavaOnevisionCausalLMOutputWithPast(ModelOutput): class LlavaOnevisionPreTrainedModel(PreTrainedModel): config: LlavaOnevisionConfig base_model_prefix = "model" - input_modalities = ["image", "video", "text"] + input_modalities = ("image", "video", "text") supports_gradient_checkpointing = True _no_split_modules = ["LlamaDecoderLayer"] _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/lxmert/modeling_lxmert.py b/src/transformers/models/lxmert/modeling_lxmert.py index a88dc32f8ff0..8e022806bf3b 100644 --- a/src/transformers/models/lxmert/modeling_lxmert.py +++ b/src/transformers/models/lxmert/modeling_lxmert.py @@ -672,7 +672,7 @@ def forward(self, sequence_output, pooled_output): class LxmertPreTrainedModel(PreTrainedModel): config: LxmertConfig base_model_prefix = "lxmert" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") @torch.no_grad() def _init_weights(self, module): diff --git a/src/transformers/models/mask2former/modeling_mask2former.py b/src/transformers/models/mask2former/modeling_mask2former.py index 5ac2b4a5f2ea..24c2652dd5c8 100644 --- a/src/transformers/models/mask2former/modeling_mask2former.py +++ b/src/transformers/models/mask2former/modeling_mask2former.py @@ -2101,7 +2101,7 @@ class Mask2FormerPreTrainedModel(PreTrainedModel): config: Mask2FormerConfig base_model_prefix = "model" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) @torch.no_grad() def _init_weights(self, module: nn.Module): diff --git a/src/transformers/models/maskformer/modeling_maskformer.py b/src/transformers/models/maskformer/modeling_maskformer.py index 22a29fe96b70..799ffe4ca51b 100644 --- a/src/transformers/models/maskformer/modeling_maskformer.py +++ b/src/transformers/models/maskformer/modeling_maskformer.py @@ -1435,7 +1435,7 @@ class MaskFormerPreTrainedModel(PreTrainedModel): config: MaskFormerConfig base_model_prefix = "model" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) @torch.no_grad() def _init_weights(self, module: nn.Module): diff --git a/src/transformers/models/maskformer/modeling_maskformer_swin.py b/src/transformers/models/maskformer/modeling_maskformer_swin.py index 05a58d4527df..19d91b8d7e7a 100644 --- a/src/transformers/models/maskformer/modeling_maskformer_swin.py +++ b/src/transformers/models/maskformer/modeling_maskformer_swin.py @@ -698,7 +698,7 @@ class MaskFormerSwinPreTrainedModel(PreTrainedModel): config: MaskFormerSwinConfig base_model_prefix = "model" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["MaskFormerSwinStage"] diff --git a/src/transformers/models/metaclip_2/modeling_metaclip_2.py b/src/transformers/models/metaclip_2/modeling_metaclip_2.py index 2407a4147fb1..b8dc9663364a 100644 --- a/src/transformers/models/metaclip_2/modeling_metaclip_2.py +++ b/src/transformers/models/metaclip_2/modeling_metaclip_2.py @@ -288,7 +288,7 @@ def forward( class MetaClip2PreTrainedModel(PreTrainedModel): config: MetaClip2Config base_model_prefix = "metaclip_2" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _supports_sdpa = True _supports_flash_attn = True @@ -500,7 +500,7 @@ class MetaClip2TextModel(MetaClip2PreTrainedModel): ```""" config: MetaClip2TextConfig - input_modalities = "text" + input_modalities = ("text",) _no_split_modules = ["MetaClip2TextEmbeddings", "MetaClip2EncoderLayer"] @@ -600,7 +600,7 @@ class MetaClip2TextModelWithProjection(MetaClip2PreTrainedModel): ```""" config: MetaClip2TextConfig - input_modalities = "text" + input_modalities = ("text",) _no_split_modules = ["MetaClip2TextEmbeddings", "MetaClip2EncoderLayer"] @@ -1032,7 +1032,7 @@ class MetaClip2VisionModel(MetaClip2PreTrainedModel): config: MetaClip2VisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["MetaClip2EncoderLayer"] def __init__(self, config: MetaClip2VisionConfig): @@ -1137,7 +1137,7 @@ class MetaClip2VisionModelWithProjection(MetaClip2PreTrainedModel): config: MetaClip2VisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: MetaClip2VisionConfig): super().__init__(config) @@ -1203,7 +1203,7 @@ def forward( ) class MetaClip2ForImageClassification(MetaClip2PreTrainedModel): main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: MetaClip2Config) -> None: super().__init__(config) diff --git a/src/transformers/models/mistral3/modeling_mistral3.py b/src/transformers/models/mistral3/modeling_mistral3.py index 0f6e2a1d3efc..a944ff4f9055 100644 --- a/src/transformers/models/mistral3/modeling_mistral3.py +++ b/src/transformers/models/mistral3/modeling_mistral3.py @@ -177,7 +177,7 @@ class Mistral3ModelOutputWithPast(BaseModelOutputWithPast): class Mistral3PreTrainedModel(PreTrainedModel): config: Mistral3Config base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/mlcd/modeling_mlcd.py b/src/transformers/models/mlcd/modeling_mlcd.py index 72e26db9bd1c..3c95edfd467c 100644 --- a/src/transformers/models/mlcd/modeling_mlcd.py +++ b/src/transformers/models/mlcd/modeling_mlcd.py @@ -506,7 +506,7 @@ def forward( class MLCDVisionModel(MLCDPreTrainedModel): config: MLCDVisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["MLCDEncoderLayer"] def __init__(self, config: MLCDVisionConfig): diff --git a/src/transformers/models/mllama/modeling_mllama.py b/src/transformers/models/mllama/modeling_mllama.py index a2d303782bdd..83efa15f2ab4 100644 --- a/src/transformers/models/mllama/modeling_mllama.py +++ b/src/transformers/models/mllama/modeling_mllama.py @@ -794,7 +794,7 @@ def forward(self, x, position_ids): class MllamaPreTrainedModel(PreTrainedModel): config: MllamaConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = [ "MllamaVisionEncoderLayer", @@ -982,7 +982,7 @@ def _prepare_4d_causal_attention_mask_with_cache_position( class MllamaVisionModel(MllamaPreTrainedModel): config: MllamaVisionConfig base_model_prefix = "vision_model" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: MllamaVisionConfig): super().__init__(config) @@ -1180,7 +1180,7 @@ def forward( class MllamaTextModel(MllamaPreTrainedModel): config: MllamaTextConfig base_model_prefix = "language_model.model" - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: MllamaTextConfig): super().__init__(config) diff --git a/src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py b/src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py index ec65fd185373..8f0f326426e0 100644 --- a/src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py +++ b/src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py @@ -505,7 +505,7 @@ class MMGroundingDinoPreTrainedModel(PreTrainedModel): config: MMGroundingDinoConfig base_model_prefix = "model" main_input_name = "pixel_values" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") @torch.no_grad() def _init_weights(self, module): diff --git a/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py b/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py index de8fefa142c3..e8c9dbdee15d 100755 --- a/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py +++ b/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py @@ -128,7 +128,7 @@ class MobileNetV1PreTrainedModel(PreTrainedModel): config: MobileNetV1Config base_model_prefix = "mobilenet_v1" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = False _no_split_modules = [] diff --git a/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py b/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py index 5a107d3fcddc..4f22a17d1b90 100755 --- a/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py +++ b/src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py @@ -254,7 +254,7 @@ class MobileNetV2PreTrainedModel(PreTrainedModel): config: MobileNetV2Config base_model_prefix = "mobilenet_v2" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = False _no_split_modules = [] diff --git a/src/transformers/models/mobilevit/modeling_mobilevit.py b/src/transformers/models/mobilevit/modeling_mobilevit.py index a41155c1bd71..3dbe653c7670 100755 --- a/src/transformers/models/mobilevit/modeling_mobilevit.py +++ b/src/transformers/models/mobilevit/modeling_mobilevit.py @@ -604,7 +604,7 @@ class MobileViTPreTrainedModel(PreTrainedModel): config: MobileViTConfig base_model_prefix = "mobilevit" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["MobileViTLayer"] diff --git a/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py b/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py index 9eefb9eb77e9..80800a3bf6a7 100644 --- a/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py +++ b/src/transformers/models/mobilevitv2/modeling_mobilevitv2.py @@ -571,7 +571,7 @@ class MobileViTV2PreTrainedModel(PreTrainedModel): config: MobileViTV2Config base_model_prefix = "mobilevitv2" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["MobileViTV2Layer"] diff --git a/src/transformers/models/moshi/modeling_moshi.py b/src/transformers/models/moshi/modeling_moshi.py index 09a2bcbaf430..973e0b57225c 100644 --- a/src/transformers/models/moshi/modeling_moshi.py +++ b/src/transformers/models/moshi/modeling_moshi.py @@ -819,7 +819,7 @@ def forward( class MoshiPreTrainedModel(PreTrainedModel): config: MoshiConfig base_model_prefix = "model" - input_modalities = ["audio", "text"] + input_modalities = ("audio", "text") supports_gradient_checkpointing = True _no_split_modules = ["MoshiDecoderLayer", "MimiTransformerLayer"] _supports_flash_attn = True @@ -1464,7 +1464,7 @@ def _prepare_4d_causal_attention_mask_with_cache_position( """ ) class MoshiForCausalLM(MoshiPreTrainedModel, GenerationMixin): - input_modalities = "text" + input_modalities = ("text",) # Copied from transformers.models.gemma.modeling_gemma.GemmaForCausalLM.__init__ with Gemma->Moshi def __init__(self, config): @@ -1582,7 +1582,7 @@ def forward( ) class MoshiForConditionalGeneration(MoshiPreTrainedModel, GenerationMixin): config: MoshiConfig - output_modalities = ["audio", "text"] + output_modalities = ("audio", "text") main_input_name = "input_ids" supports_gradient_checkpointing = True _supports_flash_attn = True diff --git a/src/transformers/models/musicgen/modeling_musicgen.py b/src/transformers/models/musicgen/modeling_musicgen.py index 1bb423c63d8b..c053adab8eb5 100644 --- a/src/transformers/models/musicgen/modeling_musicgen.py +++ b/src/transformers/models/musicgen/modeling_musicgen.py @@ -788,7 +788,7 @@ def forward( """ ) class MusicgenForCausalLM(MusicgenPreTrainedModel, GenerationMixin): - output_modalities = "audio" + output_modalities = ("audio",) def __init__(self, config: MusicgenDecoderConfig): super().__init__(config) @@ -1284,7 +1284,7 @@ def generate( ) class MusicgenForConditionalGeneration(MusicgenPreTrainedModel, GenerationMixin): config: MusicgenConfig - output_modalities = "audio" + output_modalities = ("audio",) base_model_prefix = "encoder_decoder" main_input_name = "input_ids" supports_gradient_checkpointing = True diff --git a/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py b/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py index 279c984fe6b6..f4ed374428ad 100644 --- a/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py +++ b/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py @@ -742,7 +742,7 @@ def forward( ) # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenForCausalLM with MUSICGEN->MUSICGEN_MELODY,Musicgen->MusicgenMelody,MusicGen->Musicgen Melody class MusicgenMelodyForCausalLM(MusicgenMelodyPreTrainedModel, GenerationMixin): - output_modalities = "audio" + output_modalities = ("audio",) def __init__(self, config: MusicgenMelodyDecoderConfig): super().__init__(config) @@ -1228,7 +1228,7 @@ def generate( class MusicgenMelodyForConditionalGeneration(PreTrainedModel, GenerationMixin): config: MusicgenMelodyConfig main_input_name = "input_ids" - output_modalities = "audio" + output_modalities = ("audio",) supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True diff --git a/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py b/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py index 35804ec9c9be..1c4cb425b9f2 100644 --- a/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py +++ b/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py @@ -986,7 +986,7 @@ class OmDetTurboPreTrainedModel(PreTrainedModel): config: OmDetTurboConfig base_model_prefix = "model" main_input_name = "pixel_values" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") @torch.no_grad() def _init_weights(self, module): diff --git a/src/transformers/models/oneformer/modeling_oneformer.py b/src/transformers/models/oneformer/modeling_oneformer.py index 86d5e8d670ce..cf04d775e4c8 100644 --- a/src/transformers/models/oneformer/modeling_oneformer.py +++ b/src/transformers/models/oneformer/modeling_oneformer.py @@ -2765,7 +2765,7 @@ class OneFormerPreTrainedModel(PreTrainedModel): config: OneFormerConfig base_model_prefix = "model" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) @torch.no_grad() def _init_weights(self, module: nn.Module): diff --git a/src/transformers/models/ovis2/modeling_ovis2.py b/src/transformers/models/ovis2/modeling_ovis2.py index d990e08190b6..c7480492d381 100644 --- a/src/transformers/models/ovis2/modeling_ovis2.py +++ b/src/transformers/models/ovis2/modeling_ovis2.py @@ -418,7 +418,7 @@ def forward(self, visual_tokens: torch.Tensor) -> torch.Tensor: class Ovis2PreTrainedModel(PreTrainedModel): config: Ovis2Config base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["Ovis2VisionAttention"] _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/ovis2/modular_ovis2.py b/src/transformers/models/ovis2/modular_ovis2.py index 0acf99daafe6..6faf887eb333 100644 --- a/src/transformers/models/ovis2/modular_ovis2.py +++ b/src/transformers/models/ovis2/modular_ovis2.py @@ -147,7 +147,7 @@ def forward(self, visual_tokens: torch.Tensor) -> torch.Tensor: class Ovis2PreTrainedModel(PreTrainedModel): config: Ovis2Config base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["Ovis2VisionAttention"] _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/owlv2/modeling_owlv2.py b/src/transformers/models/owlv2/modeling_owlv2.py index 16cfd68e49bb..5426d37b73c3 100644 --- a/src/transformers/models/owlv2/modeling_owlv2.py +++ b/src/transformers/models/owlv2/modeling_owlv2.py @@ -564,7 +564,7 @@ def forward( class Owlv2PreTrainedModel(PreTrainedModel): config: Owlv2Config base_model_prefix = "owlv2" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["Owlv2EncoderLayer"] @@ -771,7 +771,7 @@ def forward( # Copied from transformers.models.owlvit.modeling_owlvit.OwlViTTextModel with google/owlvit-base-patch32->google/owlv2-base-patch16, OWLVIT->OWLV2,OwlViT->Owlv2 class Owlv2TextModel(Owlv2PreTrainedModel): config: Owlv2TextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: Owlv2TextConfig): super().__init__(config) @@ -884,7 +884,7 @@ def forward( class Owlv2VisionModel(Owlv2PreTrainedModel): config: Owlv2VisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: Owlv2VisionConfig): super().__init__(config) diff --git a/src/transformers/models/owlvit/modeling_owlvit.py b/src/transformers/models/owlvit/modeling_owlvit.py index bd8b23ca38eb..a4619e6b11f1 100644 --- a/src/transformers/models/owlvit/modeling_owlvit.py +++ b/src/transformers/models/owlvit/modeling_owlvit.py @@ -551,7 +551,7 @@ def forward( class OwlViTPreTrainedModel(PreTrainedModel): config: OwlViTConfig base_model_prefix = "owlvit" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["OwlViTEncoderLayer"] @@ -755,7 +755,7 @@ def forward( class OwlViTTextModel(OwlViTPreTrainedModel): config: OwlViTTextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: OwlViTTextConfig): super().__init__(config) @@ -866,7 +866,7 @@ def forward( class OwlViTVisionModel(OwlViTPreTrainedModel): config: OwlViTVisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: OwlViTVisionConfig): super().__init__(config) diff --git a/src/transformers/models/paligemma/modeling_paligemma.py b/src/transformers/models/paligemma/modeling_paligemma.py index 63538043506b..d147f0b953ac 100644 --- a/src/transformers/models/paligemma/modeling_paligemma.py +++ b/src/transformers/models/paligemma/modeling_paligemma.py @@ -215,7 +215,7 @@ def create_causal_mask_mapping( class PaliGemmaPreTrainedModel(PreTrainedModel): config: PaliGemmaConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["PaliGemmaMultiModalProjector"] _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py b/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py index 410ef5abb7f4..61e29c17b6bb 100644 --- a/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py +++ b/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py @@ -683,7 +683,7 @@ class PatchTSMixerPreTrainedModel(PreTrainedModel): config: PatchTSMixerConfig base_model_prefix = "model" main_input_name = "past_values" - input_modalities = "time" + input_modalities = ("time",) supports_gradient_checkpointing = False @torch.no_grad() diff --git a/src/transformers/models/patchtst/modeling_patchtst.py b/src/transformers/models/patchtst/modeling_patchtst.py index 5317476b1a42..d482efa5b832 100755 --- a/src/transformers/models/patchtst/modeling_patchtst.py +++ b/src/transformers/models/patchtst/modeling_patchtst.py @@ -553,7 +553,7 @@ class PatchTSTPreTrainedModel(PreTrainedModel): config: PatchTSTConfig base_model_prefix = "model" main_input_name = "past_values" - input_modalities = "time" + input_modalities = ("time",) supports_gradient_checkpointing = False @torch.no_grad() diff --git a/src/transformers/models/perceiver/modeling_perceiver.py b/src/transformers/models/perceiver/modeling_perceiver.py index 1b22c7ca1802..9c4edf03c979 100755 --- a/src/transformers/models/perceiver/modeling_perceiver.py +++ b/src/transformers/models/perceiver/modeling_perceiver.py @@ -530,7 +530,7 @@ class PerceiverPreTrainedModel(PreTrainedModel): config: PerceiverConfig base_model_prefix = "perceiver" main_input_name = "inputs" - input_modalities = "image" # techinically can be anything but HF impl has only image processor + input_modalities = ("image",) # techinically can be anything but HF impl has only image processor @torch.no_grad() def _init_weights(self, module): diff --git a/src/transformers/models/perception_lm/modeling_perception_lm.py b/src/transformers/models/perception_lm/modeling_perception_lm.py index 6e6724288b06..cb41dfbb86aa 100644 --- a/src/transformers/models/perception_lm/modeling_perception_lm.py +++ b/src/transformers/models/perception_lm/modeling_perception_lm.py @@ -90,7 +90,7 @@ def forward(self, features): class PerceptionLMPreTrainedModel(PreTrainedModel): config: PerceptionLMConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py b/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py index eab15068d252..4d4bdab8ce44 100644 --- a/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py +++ b/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py @@ -243,7 +243,7 @@ def default_flax_embed_init(tensor): class Phi4MultimodalVisionPreTrainedModel(PreTrainedModel): config: Phi4MultimodalVisionConfig base_model_prefix = "phi4_vision" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["Phi4MultimodalVisionEncoderLayer"] @@ -1432,7 +1432,7 @@ class Phi4MultimodalPreTrainedModel(PreTrainedModel): "attentions": Phi4MultimodalAttention, } _version = "0.0.5" - input_modalities = ["image", "audio", "text"] + input_modalities = ("image", "audio", "text") @torch.no_grad() def _init_weights(self, module): diff --git a/src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py b/src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py index c5c673cf68c2..728a5244468a 100644 --- a/src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py +++ b/src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py @@ -534,7 +534,7 @@ def __init__(self, config: Phi4MultimodalVisionConfig): class Phi4MultimodalVisionPreTrainedModel(SiglipPreTrainedModel): config: Phi4MultimodalVisionConfig base_model_prefix = "phi4_vision" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["Phi4MultimodalVisionEncoderLayer"] @@ -1442,7 +1442,7 @@ def forward( class Phi4MultimodalPreTrainedModel(Phi3PreTrainedModel): - input_modalities = ["image", "audio", "text"] + input_modalities = ("image", "audio", "text") @torch.no_grad() def _init_weights(self, module): diff --git a/src/transformers/models/pix2struct/modeling_pix2struct.py b/src/transformers/models/pix2struct/modeling_pix2struct.py index 79565625b6ff..02215827e33c 100644 --- a/src/transformers/models/pix2struct/modeling_pix2struct.py +++ b/src/transformers/models/pix2struct/modeling_pix2struct.py @@ -336,7 +336,7 @@ def forward( @auto_docstring class Pix2StructPreTrainedModel(PreTrainedModel): config: Pix2StructConfig - input_modalities = ["image", "text"] + input_modalities = ("image", "text") _can_compile_fullgraph = False @@ -454,7 +454,7 @@ def _shift_right(self, input_ids): class Pix2StructVisionModel(Pix2StructPreTrainedModel): config: Pix2StructVisionConfig main_input_name = "flattened_patches" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["Pix2StructVisionLayer"] @@ -956,7 +956,7 @@ def forward( ) class Pix2StructTextModel(Pix2StructPreTrainedModel): config: Pix2StructTextConfig - input_modalities = "text" + input_modalities = ("text",) _no_split_modules = ["Pix2StructTextBlock"] _tied_weights_keys = {"lm_head.weight": "embed_tokens.weight"} supports_gradient_checkpointing = True diff --git a/src/transformers/models/pixtral/modeling_pixtral.py b/src/transformers/models/pixtral/modeling_pixtral.py index 3e6d468a8184..ef861b9ebdc1 100644 --- a/src/transformers/models/pixtral/modeling_pixtral.py +++ b/src/transformers/models/pixtral/modeling_pixtral.py @@ -433,7 +433,7 @@ class PixtralPreTrainedModel(PreTrainedModel): config: PixtralVisionConfig base_model_prefix = "model" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _supports_attention_backend = True _supports_flash_attn = True diff --git a/src/transformers/models/poolformer/modeling_poolformer.py b/src/transformers/models/poolformer/modeling_poolformer.py index 21f01878ef87..0ab490c97344 100755 --- a/src/transformers/models/poolformer/modeling_poolformer.py +++ b/src/transformers/models/poolformer/modeling_poolformer.py @@ -243,7 +243,7 @@ class PoolFormerPreTrainedModel(PreTrainedModel): config: PoolFormerConfig base_model_prefix = "poolformer" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["PoolFormerLayer"] @torch.no_grad() diff --git a/src/transformers/models/pop2piano/modeling_pop2piano.py b/src/transformers/models/pop2piano/modeling_pop2piano.py index 546e64af7550..9a68fea87c7b 100644 --- a/src/transformers/models/pop2piano/modeling_pop2piano.py +++ b/src/transformers/models/pop2piano/modeling_pop2piano.py @@ -538,7 +538,7 @@ def forward( class Pop2PianoPreTrainedModel(PreTrainedModel): config: Pop2PianoConfig base_model_prefix = "transformer" - output_modalities = "audio" + output_modalities = ("audio",) supports_gradient_checkpointing = True _can_compile_fullgraph = False diff --git a/src/transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py b/src/transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py index cf9b260a0fa3..1fa57eef92ad 100644 --- a/src/transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py +++ b/src/transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py @@ -243,7 +243,7 @@ class PromptDepthAnythingPreTrainedModel(PreTrainedModel): config: PromptDepthAnythingConfig base_model_prefix = "prompt_depth_anything" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True diff --git a/src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py b/src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py index 33dfab832e71..c61aeeb5343c 100644 --- a/src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py +++ b/src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py @@ -162,7 +162,7 @@ class PromptDepthAnythingPreTrainedModel(PreTrainedModel): config: PromptDepthAnythingConfig base_model_prefix = "prompt_depth_anything" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True diff --git a/src/transformers/models/pvt/modeling_pvt.py b/src/transformers/models/pvt/modeling_pvt.py index caee0f0dfb78..846d6755fb5a 100755 --- a/src/transformers/models/pvt/modeling_pvt.py +++ b/src/transformers/models/pvt/modeling_pvt.py @@ -419,7 +419,7 @@ class PvtPreTrainedModel(PreTrainedModel): config: PvtConfig base_model_prefix = "pvt" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = [] @torch.no_grad() diff --git a/src/transformers/models/pvt_v2/modeling_pvt_v2.py b/src/transformers/models/pvt_v2/modeling_pvt_v2.py index 9973c6dfcad6..8f9b7216be69 100644 --- a/src/transformers/models/pvt_v2/modeling_pvt_v2.py +++ b/src/transformers/models/pvt_v2/modeling_pvt_v2.py @@ -366,7 +366,7 @@ class PvtV2PreTrainedModel(PreTrainedModel): config: PvtV2Config base_model_prefix = "pvt_v2" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True @torch.no_grad() diff --git a/src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py b/src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py index 0826873a8f98..9b7070ce8bfc 100644 --- a/src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py +++ b/src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py @@ -42,6 +42,7 @@ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, check_torch_load_is_safe, logging +from ...utils.deprecation import deprecate_kwarg from ...utils.hub import cached_file from ..qwen2.modeling_qwen2 import Qwen2RMSNorm from .configuration_qwen2_5_omni import ( @@ -64,7 +65,7 @@ class Qwen2_5OmniPreTrainedModel(PreTrainedModel): config: Qwen2_5OmniConfig base_model_prefix = "model" - input_modalities = ["image", "video", "audio", "text"] + input_modalities = ("image", "video", "audio", "text") supports_gradient_checkpointing = True _no_split_modules = ["Qwen2_5OmniDecoderLayer", "Qwen2_5OmniVisionBlock"] _skip_keys_device_placement = "past_key_values" @@ -75,7 +76,7 @@ class Qwen2_5OmniPreTrainedModel(PreTrainedModel): class Qwen2_5OmniPreTrainedModelForConditionalGeneration(Qwen2_5OmniPreTrainedModel): - input_modalities = ["image", "video", "audio", "text"] + input_modalities = ("image", "video", "audio", "text") def _prepare_4d_causal_attention_mask_with_cache_position( self, @@ -1075,7 +1076,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class Qwen2_5OmniVisionEncoder(Qwen2_5OmniPreTrainedModel): config: Qwen2_5OmniVisionEncoderConfig _no_split_modules = ["Qwen2_5OmniVisionBlock"] - input_modalities = ["image", "video"] + input_modalities = ("image", "video") def __init__(self, config: Qwen2_5OmniVisionEncoderConfig, *inputs, **kwargs) -> None: super().__init__(config, *inputs, **kwargs) @@ -1530,7 +1531,7 @@ def forward( @auto_docstring class Qwen2_5OmniThinkerTextModel(Qwen2_5OmniPreTrainedModel): config: Qwen2_5OmniTextConfig - input_modalities = "text" + input_modalities = ("text",) _no_split_modules = ["Qwen2_5OmniDecoderLayer"] def __init__(self, config: Qwen2_5OmniTextConfig): @@ -1825,7 +1826,7 @@ def get_placeholder_mask( special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel(): raise ValueError( - f"Videos features and image tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}" + f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}" ) special_audio_mask = special_audio_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) @@ -2105,7 +2106,7 @@ class Qwen2_5OmniTalkerCausalLMOutputWithPast(ModelOutput): @auto_docstring class Qwen2_5OmniTalkerModel(Qwen2_5OmniPreTrainedModel): config: Qwen2_5OmniTalkerConfig - input_modalities = ["image", "video", "audio", "text"] + input_modalities = ("image", "video", "audio", "text") _no_split_modules = ["Qwen2_5OmniTalkerDecoderLayer"] @@ -2263,7 +2264,7 @@ def forward( class Qwen2_5OmniTalkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForConditionalGeneration, GenerationMixin): config: Qwen2_5OmniTalkerConfig base_model_prefix = "talker" - output_modalities = "audio" + output_modalities = ("audio",) def __init__(self, config: Qwen2_5OmniTalkerConfig): super().__init__(config) @@ -3765,7 +3766,7 @@ def forward( ) class Qwen2_5OmniForConditionalGeneration(Qwen2_5OmniPreTrainedModel, GenerationMixin): config: Qwen2_5OmniConfig - output_modalities = ["audio", "text"] + output_modalities = ("audio", "text") _no_split_modules = [ "Qwen2_5OmniTalkerForConditionalGeneration", "Qwen2_5OmniToken2WavModel", @@ -3849,13 +3850,13 @@ def from_pretrained( return model @torch.no_grad() + @deprecate_kwarg("return_audio", version="v5", new_name="generation_mode") # TODO: raushan, defaults should be saved in generation config def generate( self, input_ids: Optional[torch.Tensor] = None, speaker: str = "Chelsie", use_audio_in_video: bool = False, - return_audio: Optional[bool] = None, thinker_max_new_tokens: int = 1024, talker_max_new_tokens: int = 4096, talker_do_sample: bool = True, @@ -3876,8 +3877,8 @@ def generate( Which speaker should be used in audio response. use_audio_in_video (`bool`, defaults to False): Whether or not use audio track in video, should same as the parameter in `process_audio_info`. - return_audio (`Optional[bool]`, *optional*): - Whether or not return response in audio format. When `return_audio=None`, this parameter is same as `config.enable_audio_output`. + generation_mode (`Optional[str]`, *optional*): + Whether or not return response in audio format. When `generation_mode="audio"`, this parameter is same as `config.enable_audio_output`. kwargs (*optional*): - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model. - With a *thinker_*, *talker_*, *token2wav_* prefix, they will be input for the `generate` method of the @@ -3889,6 +3890,10 @@ def generate( - **Text** (`torch.Tensor`): Generated text token sequence. - **Audio waveform** (`torch.Tensor`): Generated audio waveform. """ + # check `False` on purpose because the paramter can be `str/bool`. This is needed for BC + generation_mode = kwargs.pop("generation_mode", None) + return_audio = generation_mode != "text" and generation_mode is not False + if speaker not in self.speaker_map: raise ValueError(f"{speaker} is not available, available speakers: {self.speaker_map.keys()}") if return_audio and not self.has_talker: diff --git a/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py b/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py index 685c6b5c86f0..553213af7e77 100644 --- a/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py +++ b/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py @@ -54,6 +54,7 @@ check_torch_load_is_safe, logging, ) +from ...utils.deprecation import deprecate_kwarg from ...utils.hub import cached_file @@ -1064,12 +1065,12 @@ def get_text_config(self, *args, **kwargs): class Qwen2_5OmniPreTrainedModel(Qwen2_5_VLPreTrainedModel): config: Qwen2_5OmniConfig - input_modalities = ["image", "video", "audio", "text"] + input_modalities = ("image", "video", "audio", "text") _can_compile_fullgraph = False class Qwen2_5OmniPreTrainedModelForConditionalGeneration(Qwen2_5OmniPreTrainedModel): - input_modalities = ["image", "video", "audio", "text"] + input_modalities = ("image", "video", "audio", "text") def _prepare_4d_causal_attention_mask_with_cache_position( self, @@ -1933,7 +1934,7 @@ def forward( class Qwen2_5OmniVisionEncoder(Qwen2_5_VisionTransformerPretrainedModel): config: Qwen2_5OmniVisionEncoderConfig - input_modalities = ["image", "video"] + input_modalities = ("image", "video") _no_split_modules = ["Qwen2_5OmniVisionBlock"] def __init__(self, config: Qwen2_5OmniVisionEncoderConfig, *inputs, **kwargs) -> None: @@ -2189,7 +2190,7 @@ def get_placeholder_mask( special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel(): raise ValueError( - f"Videos features and image tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}" + f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}" ) special_audio_mask = special_audio_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) @@ -2468,7 +2469,7 @@ class Qwen2_5OmniTalkerCausalLMOutputWithPast(ModelOutput): class Qwen2_5OmniTalkerModel(Qwen2_5_VLTextModel): config: Qwen2_5OmniTalkerConfig - input_modalities = ["image", "video", "audio", "text"] + input_modalities = ("image", "video", "audio", "text") _no_split_modules = ["Qwen2_5OmniTalkerDecoderLayer"] @@ -2480,7 +2481,7 @@ def __init__(self, config: Qwen2_5OmniTalkerConfig): class Qwen2_5OmniTalkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForConditionalGeneration, GenerationMixin): config: Qwen2_5OmniTalkerConfig base_model_prefix = "talker" - output_modalities = "audio" + output_modalities = ("audio",) def __init__(self, config: Qwen2_5OmniTalkerConfig): super().__init__(config) @@ -3939,7 +3940,7 @@ def forward( ) class Qwen2_5OmniForConditionalGeneration(Qwen2_5OmniPreTrainedModel, GenerationMixin): config: Qwen2_5OmniConfig - output_modalities = ["audio", "text"] + output_modalities = ("audio", "text") _no_split_modules = [ "Qwen2_5OmniTalkerForConditionalGeneration", "Qwen2_5OmniToken2WavModel", @@ -4023,13 +4024,13 @@ def from_pretrained( return model @torch.no_grad() + @deprecate_kwarg("return_audio", version="v5", new_name="generation_mode") # TODO: raushan, defaults should be saved in generation config def generate( self, input_ids: Optional[torch.Tensor] = None, speaker: str = "Chelsie", use_audio_in_video: bool = False, - return_audio: Optional[bool] = None, thinker_max_new_tokens: int = 1024, talker_max_new_tokens: int = 4096, talker_do_sample: bool = True, @@ -4050,8 +4051,8 @@ def generate( Which speaker should be used in audio response. use_audio_in_video (`bool`, defaults to False): Whether or not use audio track in video, should same as the parameter in `process_audio_info`. - return_audio (`Optional[bool]`, *optional*): - Whether or not return response in audio format. When `return_audio=None`, this parameter is same as `config.enable_audio_output`. + generation_mode (`Optional[str]`, *optional*): + Whether or not return response in audio format. When `generation_mode="audio"`, this parameter is same as `config.enable_audio_output`. kwargs (*optional*): - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model. - With a *thinker_*, *talker_*, *token2wav_* prefix, they will be input for the `generate` method of the @@ -4063,6 +4064,10 @@ def generate( - **Text** (`torch.Tensor`): Generated text token sequence. - **Audio waveform** (`torch.Tensor`): Generated audio waveform. """ + # check `False` on purpose because the paramter can be `str/bool`. This is needed for BC + generation_mode = kwargs.pop("generation_mode", None) + return_audio = generation_mode != "text" and generation_mode is not False + if speaker not in self.speaker_map: raise ValueError(f"{speaker} is not available, available speakers: {self.speaker_map.keys()}") if return_audio and not self.has_talker: diff --git a/src/transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py b/src/transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py index ead9dbe10da4..55906c8f8364 100644 --- a/src/transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py +++ b/src/transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py @@ -330,6 +330,60 @@ def apply_chat_template(self, conversations, chat_template=None, **kwargs): return super().apply_chat_template(conversations, chat_template, **kwargs) + def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, **kwargs): + """ + Post-process the output of a vlm to decode the text. + + Args: + generated_outputs (`torch.Tensor` or `np.ndarray`): + The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` + or `(sequence_length,)`. + skip_special_tokens (`bool`, *optional*, defaults to `True`): + Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. + **kwargs: + Additional arguments to be passed to the tokenizer's `batch_decode method`. + + Returns: + `list[str]`: The decoded text. + """ + return self.tokenizer.batch_decode(generated_outputs[0], skip_special_tokens=skip_special_tokens, **kwargs) + + def post_process_multimodal_output( + self, generated_outputs, skip_special_tokens=True, generation_mode=None, **kwargs + ): + """ + Post-process the output of a multimodal model to return the requested modality output. + If the model cannot generated the requested modality, an error will be raised. + + Args: + generated_outputs (`torch.Tensor` or `np.ndarray`): + The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` + or `(sequence_length,)`. + skip_special_tokens (`bool`, *optional*, defaults to `True`): + Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. + generation_mode (`str`, *optional*): + Generation mode indicated which modality to output and can be one of `["text", "image", "audio"]`. + **kwargs: + Additional arguments to be passed to the tokenizer's `batch_decode method`. + + Returns: + `list[Inion[str, np.ndarray]]`: The decoded text or generated audio. + """ + if generation_mode is None or generation_mode == "text": + return self.post_process_image_text_to_text( + generated_outputs, skip_special_tokens=skip_special_tokens, **kwargs + ) + + elif generation_mode == "audio": + # model supports only bs=1, so we will never get several audio outputs + audio = generated_outputs[1].reshape(-1).detach().cpu().numpy() + return [audio] + + else: + raise ValueError( + f"{self.__class__.__name__} got an unexpected generation_mode={generation_mode}. Supported options are only `text` and `audio" + ) + @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names diff --git a/src/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py b/src/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py index b18e1e9f24dd..c0dd5b983cd6 100644 --- a/src/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +++ b/src/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py @@ -293,7 +293,7 @@ def forward( class Qwen2_5_VLPreTrainedModel(PreTrainedModel): config: Qwen2_5_VLConfig base_model_prefix = "model" - input_modalities = ["image", "video", "text"] + input_modalities = ("image", "video", "text") supports_gradient_checkpointing = True _no_split_modules = ["Qwen2_5_VLDecoderLayer", "Qwen2_5_VLVisionBlock"] _skip_keys_device_placement = "past_key_values" @@ -794,7 +794,7 @@ def forward( @auto_docstring class Qwen2_5_VLTextModel(Qwen2_5_VLPreTrainedModel): config: Qwen2_5_VLTextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: Qwen2_5_VLTextConfig): super().__init__(config) diff --git a/src/transformers/models/qwen2_audio/modeling_qwen2_audio.py b/src/transformers/models/qwen2_audio/modeling_qwen2_audio.py index 7cad197e7e72..142a8f76d816 100644 --- a/src/transformers/models/qwen2_audio/modeling_qwen2_audio.py +++ b/src/transformers/models/qwen2_audio/modeling_qwen2_audio.py @@ -250,7 +250,7 @@ def forward( class Qwen2AudioPreTrainedModel(PreTrainedModel): config: Qwen2AudioConfig base_model_prefix = "model" - input_modalities = ["audio", "text"] + input_modalities = ("audio", "text") supports_gradient_checkpointing = True _no_split_modules = ["Qwen2AudioAttention"] _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py b/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py index e058cceb1fa9..593a160ec799 100644 --- a/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py +++ b/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py @@ -655,7 +655,7 @@ def forward( class Qwen2VLPreTrainedModel(PreTrainedModel): config: Qwen2VLConfig base_model_prefix = "model" - input_modalities = ["image", "video", "text"] + input_modalities = ("image", "video", "text") supports_gradient_checkpointing = True _no_split_modules = ["Qwen2VLDecoderLayer", "Qwen2VLVisionBlock"] _skip_keys_device_placement = "past_key_values" @@ -669,7 +669,7 @@ class Qwen2VLPreTrainedModel(PreTrainedModel): @auto_docstring class Qwen2VisionTransformerPretrainedModel(Qwen2VLPreTrainedModel): config: Qwen2VLVisionConfig - input_modalities = ["image", "video"] + input_modalities = ("image", "video") _no_split_modules = ["Qwen2VLVisionBlock"] def __init__(self, config) -> None: @@ -767,7 +767,7 @@ def forward( @auto_docstring class Qwen2VLTextModel(Qwen2VLPreTrainedModel): config: Qwen2VLTextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: Qwen2VLTextConfig): super().__init__(config) diff --git a/src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py b/src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py index 1be0487cea98..69b6be3f4fcc 100644 --- a/src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py +++ b/src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py @@ -68,7 +68,7 @@ class Qwen3OmniMoePreTrainedModel(PreTrainedModel): config: Qwen3OmniMoeConfig base_model_prefix = "model" - input_modalities = ["image", "video", "audio", "text"] + input_modalities = ("image", "video", "audio", "text") supports_gradient_checkpointing = True _no_split_modules = ["Qwen3OmniMoeDecoderLayer", "Qwen3OmniMoeVisionBlock"] _skip_keys_device_placement = "past_key_values" @@ -99,7 +99,7 @@ def _get_feat_extract_output_lengths(input_lengths): class Qwen3OmniMoePreTrainedModelForConditionalGeneration(Qwen3OmniMoePreTrainedModel): - input_modalities = ["image", "video", "audio", "text"] + input_modalities = ("image", "video", "audio", "text") def _prepare_4d_causal_attention_mask_with_cache_position( self, @@ -2011,7 +2011,7 @@ def get_placeholder_mask( special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel(): raise ValueError( - f"Videos features and image tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}" + f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}" ) special_audio_mask = special_audio_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) @@ -3787,7 +3787,7 @@ def chunked_decode(self, codes, chunk_size=300, left_context_size=25): class Qwen3OmniMoeForConditionalGeneration(Qwen3OmniMoePreTrainedModel, GenerationMixin): config_class = Qwen3OmniMoeConfig - output_modalities = ["text", "audio"] + output_modalities = ("text", "audio") def __init__(self, config: Qwen3OmniMoeConfig): super().__init__(config) diff --git a/src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py b/src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py index 7beea60d6d16..cd9f94681b9d 100644 --- a/src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py +++ b/src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py @@ -2304,7 +2304,7 @@ def chunked_decode(self, codes, chunk_size=300, left_context_size=25): class Qwen3OmniMoeForConditionalGeneration(Qwen3OmniMoePreTrainedModel, GenerationMixin): config_class = Qwen3OmniMoeConfig - output_modalities = ["text", "audio"] + output_modalities = ("text", "audio") def __init__(self, config: Qwen3OmniMoeConfig): super().__init__(config) diff --git a/src/transformers/models/qwen3_omni_moe/processing_qwen3_omni_moe.py b/src/transformers/models/qwen3_omni_moe/processing_qwen3_omni_moe.py index ceacd2b854d2..5d8b35f744c6 100644 --- a/src/transformers/models/qwen3_omni_moe/processing_qwen3_omni_moe.py +++ b/src/transformers/models/qwen3_omni_moe/processing_qwen3_omni_moe.py @@ -329,6 +329,60 @@ def _iter(): def apply_chat_template(self, conversations, chat_template=None, **kwargs): return super().apply_chat_template(conversations, chat_template, **kwargs) + def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, **kwargs): + """ + Post-process the output of a vlm to decode the text. + + Args: + generated_outputs (`torch.Tensor` or `np.ndarray`): + The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` + or `(sequence_length,)`. + skip_special_tokens (`bool`, *optional*, defaults to `True`): + Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. + **kwargs: + Additional arguments to be passed to the tokenizer's `batch_decode method`. + + Returns: + `list[str]`: The decoded text. + """ + return self.tokenizer.batch_decode(generated_outputs[0], skip_special_tokens=skip_special_tokens, **kwargs) + + def post_process_multimodal_output( + self, generated_outputs, skip_special_tokens=True, generation_mode=None, **kwargs + ): + """ + Post-process the output of a multimodal model to return the requested modality output. + If the model cannot generated the requested modality, an error will be raised. + + Args: + generated_outputs (`torch.Tensor` or `np.ndarray`): + The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` + or `(sequence_length,)`. + skip_special_tokens (`bool`, *optional*, defaults to `True`): + Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. + generation_mode (`str`, *optional*): + Generation mode indicated which modality to output and can be one of `["text", "image", "audio"]`. + **kwargs: + Additional arguments to be passed to the tokenizer's `batch_decode method`. + + Returns: + `list[Inion[str, np.ndarray]]`: The decoded text or generated audio. + """ + if generation_mode is None or generation_mode == "text": + return self.post_process_image_text_to_text( + generated_outputs, skip_special_tokens=skip_special_tokens, **kwargs + ) + + elif generation_mode == "audio": + # model supports only bs=1, so we will never get several audio outputs + audio = generated_outputs[1].reshape(-1).detach().cpu().numpy() + return [audio] + + else: + raise ValueError( + f"{self.__class__.__name__} got an unexpected generation_mode={generation_mode}. Supported options are only `text` and `audio" + ) + @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names diff --git a/src/transformers/models/qwen3_vl/modeling_qwen3_vl.py b/src/transformers/models/qwen3_vl/modeling_qwen3_vl.py index aab768b1cf1c..1b41f5eee01d 100644 --- a/src/transformers/models/qwen3_vl/modeling_qwen3_vl.py +++ b/src/transformers/models/qwen3_vl/modeling_qwen3_vl.py @@ -576,7 +576,7 @@ class Qwen3VLModelOutputWithPast(ModelOutput): class Qwen3VLPreTrainedModel(PreTrainedModel): config: Qwen3VLConfig base_model_prefix = "model" - input_modalities = ["image", "video", "text"] + input_modalities = ("image", "video", "text") supports_gradient_checkpointing = True _no_split_modules = ["Qwen3VLTextDecoderLayer", "Qwen3VLVisionBlock"] _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/resnet/modeling_resnet.py b/src/transformers/models/resnet/modeling_resnet.py index 3f4fec571cd7..aa3be5d54be3 100644 --- a/src/transformers/models/resnet/modeling_resnet.py +++ b/src/transformers/models/resnet/modeling_resnet.py @@ -248,7 +248,7 @@ class ResNetPreTrainedModel(PreTrainedModel): config: ResNetConfig base_model_prefix = "resnet" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["ResNetConvLayer", "ResNetShortCut"] @torch.no_grad() diff --git a/src/transformers/models/rt_detr/modeling_rt_detr.py b/src/transformers/models/rt_detr/modeling_rt_detr.py index bfdf4dd75fae..b5f02de709a1 100644 --- a/src/transformers/models/rt_detr/modeling_rt_detr.py +++ b/src/transformers/models/rt_detr/modeling_rt_detr.py @@ -1007,7 +1007,7 @@ class RTDetrPreTrainedModel(PreTrainedModel): config: RTDetrConfig base_model_prefix = "rt_detr" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = [r"RTDetrHybridEncoder", r"RTDetrDecoderLayer"] @torch.no_grad() diff --git a/src/transformers/models/rt_detr/modeling_rt_detr_resnet.py b/src/transformers/models/rt_detr/modeling_rt_detr_resnet.py index 69d99dce3d86..80908a1bcca9 100644 --- a/src/transformers/models/rt_detr/modeling_rt_detr_resnet.py +++ b/src/transformers/models/rt_detr/modeling_rt_detr_resnet.py @@ -302,7 +302,7 @@ class RTDetrResNetPreTrainedModel(PreTrainedModel): config: RTDetrResNetConfig base_model_prefix = "resnet" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["RTDetrResNetConvLayer", "RTDetrResNetShortCut"] @torch.no_grad() diff --git a/src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py b/src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py index 33cefeb1729c..4136b6a9f851 100644 --- a/src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py +++ b/src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py @@ -454,7 +454,7 @@ class RTDetrV2PreTrainedModel(PreTrainedModel): config: RTDetrV2Config base_model_prefix = "rt_detr_v2" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = [r"RTDetrV2HybridEncoder", r"RTDetrV2DecoderLayer"] @torch.no_grad() diff --git a/src/transformers/models/sam/modeling_sam.py b/src/transformers/models/sam/modeling_sam.py index 00e6bd1ab23c..aef70f382ff0 100644 --- a/src/transformers/models/sam/modeling_sam.py +++ b/src/transformers/models/sam/modeling_sam.py @@ -999,7 +999,7 @@ class SamPreTrainedModel(PreTrainedModel): config: SamConfig base_model_prefix = "sam" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["SamVisionAttention"] supports_gradient_checkpointing = True _supports_sdpa = True @@ -1103,7 +1103,7 @@ def forward( """ ) class SamModel(SamPreTrainedModel): - input_modalities = ["image", "text"] + input_modalities = ("image", "text") _can_record_outputs = {"mask_decoder_attentions": OutputRecorder(SamTwoWayAttentionBlock, index=2)} def __init__(self, config: SamConfig): diff --git a/src/transformers/models/sam2/modeling_sam2.py b/src/transformers/models/sam2/modeling_sam2.py index 39a091d7b2a4..e739468e2270 100644 --- a/src/transformers/models/sam2/modeling_sam2.py +++ b/src/transformers/models/sam2/modeling_sam2.py @@ -552,7 +552,7 @@ class Sam2PreTrainedModel(PreTrainedModel): config_class = Sam2Config base_model_prefix = "sam2" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _supports_sdpa = True _supports_flash_attn_2 = True _supports_attention_backend = True @@ -1268,7 +1268,7 @@ def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores): """ ) class Sam2Model(Sam2PreTrainedModel): - input_modalities = ["image", "text"] + input_modalities = ("image", "text") _can_record_outputs = {"mask_decoder_attentions": OutputRecorder(Sam2TwoWayAttentionBlock, index=2)} _keys_to_ignore_on_load_unexpected = [ r"^memory_.*", diff --git a/src/transformers/models/sam2/modular_sam2.py b/src/transformers/models/sam2/modular_sam2.py index a564a2b4dbea..178734a6a397 100644 --- a/src/transformers/models/sam2/modular_sam2.py +++ b/src/transformers/models/sam2/modular_sam2.py @@ -668,7 +668,7 @@ class Sam2PreTrainedModel(PreTrainedModel): config_class = Sam2Config base_model_prefix = "sam2" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _supports_sdpa = True _supports_flash_attn_2 = True _supports_attention_backend = True diff --git a/src/transformers/models/sam2_video/modeling_sam2_video.py b/src/transformers/models/sam2_video/modeling_sam2_video.py index 8ab33daf5415..d491e10d5692 100644 --- a/src/transformers/models/sam2_video/modeling_sam2_video.py +++ b/src/transformers/models/sam2_video/modeling_sam2_video.py @@ -1556,7 +1556,7 @@ def get_1d_sine_pe(pos_inds, dim, temperature=10000): @auto_docstring class Sam2VideoModel(Sam2VideoPreTrainedModel): - input_modalities = ["video", "text"] + input_modalities = ("video", "text") _can_record_outputs = {"mask_decoder_attentions": OutputRecorder(Sam2VideoTwoWayAttentionBlock, index=2)} _keys_to_ignore_on_load_unexpected = [] _tied_weights_keys = { diff --git a/src/transformers/models/sam2_video/modular_sam2_video.py b/src/transformers/models/sam2_video/modular_sam2_video.py index 88103217c2ad..65ff51bdc3e3 100644 --- a/src/transformers/models/sam2_video/modular_sam2_video.py +++ b/src/transformers/models/sam2_video/modular_sam2_video.py @@ -1445,7 +1445,7 @@ def get_1d_sine_pe(pos_inds, dim, temperature=10000): @auto_docstring class Sam2VideoModel(Sam2Model): - input_modalities = ["video", "text"] + input_modalities = ("video", "text") _tied_weights_keys = { "prompt_encoder.shared_embedding.positional_embedding": "shared_image_embedding.positional_embedding" } diff --git a/src/transformers/models/sam3_tracker/modeling_sam3_tracker.py b/src/transformers/models/sam3_tracker/modeling_sam3_tracker.py index f3d36e33fe5d..829fe01b9436 100644 --- a/src/transformers/models/sam3_tracker/modeling_sam3_tracker.py +++ b/src/transformers/models/sam3_tracker/modeling_sam3_tracker.py @@ -112,7 +112,7 @@ class Sam3TrackerPreTrainedModel(PreTrainedModel): config_class = Sam3TrackerConfig base_model_prefix = "sam3_tracker" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _supports_sdpa = True _supports_flash_attn_2 = True _supports_attention_backend = True @@ -755,7 +755,7 @@ class Sam3TrackerVisionEncoderOutput(ModelOutput): """ ) class Sam3TrackerModel(Sam3TrackerPreTrainedModel): - input_modalities = ["image", "text"] + input_modalities = ("image", "text") _can_record_outputs = {"mask_decoder_attentions": OutputRecorder(Sam3TrackerTwoWayAttentionBlock, index=2)} _keys_to_ignore_on_load_unexpected = [ r"^detector_model.", diff --git a/src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py b/src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py index 95056fee3fc1..d31ff21f80d3 100644 --- a/src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py +++ b/src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py @@ -1564,7 +1564,7 @@ def get_1d_sine_pe(pos_inds, dim, temperature=10000): @auto_docstring class Sam3TrackerVideoModel(Sam3TrackerVideoPreTrainedModel): - input_modalities = ["video", "text"] + input_modalities = ("video", "text") _can_record_outputs = {"mask_decoder_attentions": OutputRecorder(Sam3TrackerVideoTwoWayAttentionBlock, index=2)} _keys_to_ignore_on_load_unexpected = [r"^detector_model."] _tied_weights_keys = {} diff --git a/src/transformers/models/sam_hq/modeling_sam_hq.py b/src/transformers/models/sam_hq/modeling_sam_hq.py index db01c871bf11..5c74dfde9c94 100644 --- a/src/transformers/models/sam_hq/modeling_sam_hq.py +++ b/src/transformers/models/sam_hq/modeling_sam_hq.py @@ -418,7 +418,7 @@ class SamHQPreTrainedModel(PreTrainedModel): config: SamHQConfig base_model_prefix = "sam_hq" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = ["SamHQVisionAttention"] supports_gradient_checkpointing = True _supports_sdpa = True @@ -1230,7 +1230,7 @@ def forward( """ ) class SamHQModel(SamHQPreTrainedModel): - input_modalities = ["image", "text"] + input_modalities = ("image", "text") _can_record_outputs = {"mask_decoder_attentions": OutputRecorder(SamHQTwoWayAttentionBlock, index=2)} _keys_to_ignore_on_load_missing = ["prompt_encoder.shared_embedding.positional_embedding"] diff --git a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py index 13ebc98ef56d..acd95ec9f0d4 100755 --- a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py @@ -2940,7 +2940,7 @@ def generate( """ ) class SeamlessM4TForTextToSpeech(SeamlessM4TPreTrainedModel, GenerationMixin): - output_modalities = "audio" + output_modalities = ("audio",) _keys_to_ignore_on_load_missing = ["speech_encoder"] main_input_name = "input_ids" @@ -3259,7 +3259,7 @@ def generate( ) class SeamlessM4TForSpeechToSpeech(SeamlessM4TPreTrainedModel, GenerationMixin): input_modalities = "audio" - output_modalities = "audio" + output_modalities = ("audio",) _keys_to_ignore_on_load_missing = ["text_encoder"] main_input_name = "input_features" @@ -3581,8 +3581,8 @@ def generate( """ ) class SeamlessM4TModel(SeamlessM4TPreTrainedModel, GenerationMixin): - input_modalities = ["audio", "text"] - output_modalities = ["audio", "text"] + input_modalities = ("audio", "text") + output_modalities = ("audio", "text") _tied_weights_keys = { "lm_head.weight": "shared.weight", "text_encoder.embed_tokens.weight": "shared.weight", diff --git a/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py b/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py index 9096c40e9f0c..bbb5b13bc9c7 100644 --- a/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py +++ b/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py @@ -3152,7 +3152,7 @@ def generate( """ ) class SeamlessM4Tv2ForTextToSpeech(SeamlessM4Tv2PreTrainedModel, GenerationMixin): - output_modalities = "audio" + output_modalities = ("audio",) _keys_to_ignore_on_load_missing = ["speech_encoder"] main_input_name = "input_ids" @@ -3508,7 +3508,7 @@ def generate( ) class SeamlessM4Tv2ForSpeechToSpeech(SeamlessM4Tv2PreTrainedModel, GenerationMixin): input_modalities = "audio" - output_modalities = "audio" + output_modalities = ("audio",) _keys_to_ignore_on_load_missing = ["text_encoder"] main_input_name = "input_features" @@ -3866,8 +3866,8 @@ def generate( """ ) class SeamlessM4Tv2Model(SeamlessM4Tv2PreTrainedModel, GenerationMixin): - input_modalities = ["audio", "text"] - output_modalities = ["audio", "text"] + input_modalities = ("audio", "text") + output_modalities = ("audio", "text") _tied_weights_keys = { "lm_head.weight": "shared.weight", "text_encoder.embed_tokens.weight": "shared.weight", diff --git a/src/transformers/models/segformer/modeling_segformer.py b/src/transformers/models/segformer/modeling_segformer.py index fe18dd33cd3c..7bf80b7643c6 100755 --- a/src/transformers/models/segformer/modeling_segformer.py +++ b/src/transformers/models/segformer/modeling_segformer.py @@ -412,7 +412,7 @@ class SegformerPreTrainedModel(PreTrainedModel): config: SegformerConfig base_model_prefix = "segformer" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) @auto_docstring diff --git a/src/transformers/models/seggpt/modeling_seggpt.py b/src/transformers/models/seggpt/modeling_seggpt.py index 7ff7922029d0..17ccbda1ca51 100644 --- a/src/transformers/models/seggpt/modeling_seggpt.py +++ b/src/transformers/models/seggpt/modeling_seggpt.py @@ -592,7 +592,7 @@ class SegGptPreTrainedModel(PreTrainedModel): config: SegGptConfig base_model_prefix = "model" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["SegGptEmbeddings", "SegGptLayer"] diff --git a/src/transformers/models/shieldgemma2/modeling_shieldgemma2.py b/src/transformers/models/shieldgemma2/modeling_shieldgemma2.py index 7f317bb3b5f0..11c0a77d22f6 100644 --- a/src/transformers/models/shieldgemma2/modeling_shieldgemma2.py +++ b/src/transformers/models/shieldgemma2/modeling_shieldgemma2.py @@ -44,7 +44,7 @@ class ShieldGemma2ImageClassifierOutputWithNoAttention(ImageClassifierOutputWith @auto_docstring class ShieldGemma2ForImageClassification(PreTrainedModel): config: ShieldGemma2Config - input_modalities = ["image", "text"] + input_modalities = ("image", "text") _checkpoint_conversion_mapping = { "model.language_model.model": "model.model.language_model", "model.vision_tower": "model.model.vision_tower", diff --git a/src/transformers/models/siglip/modeling_siglip.py b/src/transformers/models/siglip/modeling_siglip.py index 2b89f8748477..875814cef197 100644 --- a/src/transformers/models/siglip/modeling_siglip.py +++ b/src/transformers/models/siglip/modeling_siglip.py @@ -401,7 +401,7 @@ def forward( class SiglipPreTrainedModel(PreTrainedModel): config: SiglipConfig base_model_prefix = "siglip" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = [ @@ -565,7 +565,7 @@ def forward( ) class SiglipTextModel(SiglipPreTrainedModel): config: SiglipTextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: SiglipTextConfig): super().__init__(config) @@ -689,7 +689,7 @@ def forward(self, hidden_state): class SiglipVisionModel(SiglipPreTrainedModel): config: SiglipVisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: SiglipVisionConfig): super().__init__(config) @@ -949,7 +949,7 @@ def forward( ) class SiglipForImageClassification(SiglipPreTrainedModel): main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: SiglipConfig) -> None: super().__init__(config) diff --git a/src/transformers/models/siglip2/modeling_siglip2.py b/src/transformers/models/siglip2/modeling_siglip2.py index 0a2a990ef52a..9b24f5a33f93 100644 --- a/src/transformers/models/siglip2/modeling_siglip2.py +++ b/src/transformers/models/siglip2/modeling_siglip2.py @@ -383,7 +383,7 @@ def default_flax_embed_init(tensor): class Siglip2PreTrainedModel(PreTrainedModel): config: Siglip2Config base_model_prefix = "siglip2" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = [ @@ -651,7 +651,7 @@ def forward( ) class Siglip2TextModel(Siglip2PreTrainedModel): config: Siglip2TextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: Siglip2TextConfig): super().__init__(config) @@ -738,7 +738,7 @@ def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Ten class Siglip2VisionModel(Siglip2PreTrainedModel): config: Siglip2VisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: Siglip2VisionConfig): super().__init__(config) @@ -1028,7 +1028,7 @@ def forward( ) class Siglip2ForImageClassification(Siglip2PreTrainedModel): main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: Siglip2Config) -> None: super().__init__(config) diff --git a/src/transformers/models/smolvlm/modeling_smolvlm.py b/src/transformers/models/smolvlm/modeling_smolvlm.py index dc72ed10a670..02a080385aa6 100644 --- a/src/transformers/models/smolvlm/modeling_smolvlm.py +++ b/src/transformers/models/smolvlm/modeling_smolvlm.py @@ -53,7 +53,7 @@ class SmolVLMPreTrainedModel(PreTrainedModel): config: SmolVLMConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["SmolVLMVisionAttention", "SmolVLMDecoderLayer"] _skip_keys_device_placement = "past_key_values" @@ -312,7 +312,7 @@ def forward( ) class SmolVLMVisionTransformer(SmolVLMPreTrainedModel): config: SmolVLMVisionConfig - input_modalities = "image" + input_modalities = ("image",) _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True diff --git a/src/transformers/models/speech_to_text/modeling_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_speech_to_text.py index e2078910afe5..c0f56a8ed32f 100755 --- a/src/transformers/models/speech_to_text/modeling_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_speech_to_text.py @@ -1007,7 +1007,7 @@ def forward( """ ) class Speech2TextForConditionalGeneration(Speech2TextPreTrainedModel, GenerationMixin): - input_modalities = ["audio", "text"] + input_modalities = ("audio", "text") base_model_prefix = "model" _tied_weights_keys = {"lm_head.weight": "model.decoder.embed_tokens.weight"} diff --git a/src/transformers/models/speecht5/modeling_speecht5.py b/src/transformers/models/speecht5/modeling_speecht5.py index 919b22ba84f5..1d4e4a1e3c13 100644 --- a/src/transformers/models/speecht5/modeling_speecht5.py +++ b/src/transformers/models/speecht5/modeling_speecht5.py @@ -2309,7 +2309,7 @@ def _generate_speech( """ ) class SpeechT5ForTextToSpeech(SpeechT5PreTrainedModel): - input_modalities = "text" + input_modalities = ("text",) main_input_name = "input_ids" def __init__(self, config: SpeechT5Config): diff --git a/src/transformers/models/superglue/modeling_superglue.py b/src/transformers/models/superglue/modeling_superglue.py index d0af50a5709c..831118db592b 100644 --- a/src/transformers/models/superglue/modeling_superglue.py +++ b/src/transformers/models/superglue/modeling_superglue.py @@ -468,7 +468,7 @@ class SuperGluePreTrainedModel(PreTrainedModel): config: SuperGlueConfig base_model_prefix = "superglue" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) @torch.no_grad() def _init_weights(self, module: nn.Module) -> None: diff --git a/src/transformers/models/superpoint/modeling_superpoint.py b/src/transformers/models/superpoint/modeling_superpoint.py index e9f808e6037c..570a5d63894e 100644 --- a/src/transformers/models/superpoint/modeling_superpoint.py +++ b/src/transformers/models/superpoint/modeling_superpoint.py @@ -325,7 +325,7 @@ class SuperPointPreTrainedModel(PreTrainedModel): config: SuperPointConfig base_model_prefix = "superpoint" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = False def extract_one_channel_pixel_values(self, pixel_values: torch.FloatTensor) -> torch.FloatTensor: diff --git a/src/transformers/models/swiftformer/modeling_swiftformer.py b/src/transformers/models/swiftformer/modeling_swiftformer.py index 2440c2cc0550..006c8a0ec001 100644 --- a/src/transformers/models/swiftformer/modeling_swiftformer.py +++ b/src/transformers/models/swiftformer/modeling_swiftformer.py @@ -386,7 +386,7 @@ class SwiftFormerPreTrainedModel(PreTrainedModel): config: SwiftFormerConfig base_model_prefix = "swiftformer" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["SwiftFormerEncoderBlock"] diff --git a/src/transformers/models/swin/modeling_swin.py b/src/transformers/models/swin/modeling_swin.py index d28146e90ba2..5fbfda424810 100644 --- a/src/transformers/models/swin/modeling_swin.py +++ b/src/transformers/models/swin/modeling_swin.py @@ -808,7 +808,7 @@ class SwinPreTrainedModel(PreTrainedModel): config: SwinConfig base_model_prefix = "swin" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["SwinStage"] diff --git a/src/transformers/models/swin2sr/modeling_swin2sr.py b/src/transformers/models/swin2sr/modeling_swin2sr.py index 2cc6724be0b1..3ba0823f904c 100644 --- a/src/transformers/models/swin2sr/modeling_swin2sr.py +++ b/src/transformers/models/swin2sr/modeling_swin2sr.py @@ -689,7 +689,7 @@ class Swin2SRPreTrainedModel(PreTrainedModel): config: Swin2SRConfig base_model_prefix = "swin2sr" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True @torch.no_grad() diff --git a/src/transformers/models/swinv2/modeling_swinv2.py b/src/transformers/models/swinv2/modeling_swinv2.py index c27f8fc745ed..91e68c318d65 100644 --- a/src/transformers/models/swinv2/modeling_swinv2.py +++ b/src/transformers/models/swinv2/modeling_swinv2.py @@ -883,7 +883,7 @@ class Swinv2PreTrainedModel(PreTrainedModel): config: Swinv2Config base_model_prefix = "swinv2" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["Swinv2Stage"] diff --git a/src/transformers/models/table_transformer/modeling_table_transformer.py b/src/transformers/models/table_transformer/modeling_table_transformer.py index b788b2c66359..09c5a1d9c029 100644 --- a/src/transformers/models/table_transformer/modeling_table_transformer.py +++ b/src/transformers/models/table_transformer/modeling_table_transformer.py @@ -688,7 +688,7 @@ class TableTransformerPreTrainedModel(PreTrainedModel): config: TableTransformerConfig base_model_prefix = "model" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = [ r"TableTransformerConvEncoder", r"TableTransformerEncoderLayer", diff --git a/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py index d2476457d7f9..114387714c67 100644 --- a/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py +++ b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py @@ -608,7 +608,7 @@ class TimeSeriesTransformerPreTrainedModel(PreTrainedModel): config: TimeSeriesTransformerConfig base_model_prefix = "model" main_input_name = "past_values" - input_modalities = "time" + input_modalities = ("time",) supports_gradient_checkpointing = True # TODO: tests would need a rewrite to check for correct implementation # Current tests always assume certain inputs to be passed diff --git a/src/transformers/models/timesfm/modeling_timesfm.py b/src/transformers/models/timesfm/modeling_timesfm.py index 0ec1b90f89da..e9251102556e 100644 --- a/src/transformers/models/timesfm/modeling_timesfm.py +++ b/src/transformers/models/timesfm/modeling_timesfm.py @@ -304,7 +304,7 @@ class TimesFmPreTrainedModel(PreTrainedModel): base_model_prefix = "timesfm" _no_split_modules = ["TimesFmDecoderLayer"] main_input_name = "past_values" - input_modalities = "time" + input_modalities = ("time",) _supports_sdpa = True @torch.no_grad() diff --git a/src/transformers/models/timesfm/modular_timesfm.py b/src/transformers/models/timesfm/modular_timesfm.py index bc2ba05add70..a265fb54f421 100644 --- a/src/transformers/models/timesfm/modular_timesfm.py +++ b/src/transformers/models/timesfm/modular_timesfm.py @@ -260,7 +260,7 @@ class TimesFmPreTrainedModel(PreTrainedModel): base_model_prefix = "timesfm" _no_split_modules = ["TimesFmDecoderLayer"] main_input_name = "past_values" - input_modalities = "time" + input_modalities = ("time",) _supports_sdpa = True @torch.no_grad() diff --git a/src/transformers/models/timesformer/modeling_timesformer.py b/src/transformers/models/timesformer/modeling_timesformer.py index ee1f6fcab8e4..147b2a663d77 100644 --- a/src/transformers/models/timesformer/modeling_timesformer.py +++ b/src/transformers/models/timesformer/modeling_timesformer.py @@ -452,7 +452,7 @@ class TimesformerPreTrainedModel(PreTrainedModel): config: TimesformerConfig base_model_prefix = "timesformer" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["TimesformerLayer"] diff --git a/src/transformers/models/timm_backbone/modeling_timm_backbone.py b/src/transformers/models/timm_backbone/modeling_timm_backbone.py index d0ad3dd401bf..4b90ea27f120 100644 --- a/src/transformers/models/timm_backbone/modeling_timm_backbone.py +++ b/src/transformers/models/timm_backbone/modeling_timm_backbone.py @@ -39,7 +39,7 @@ class TimmBackbone(PreTrainedModel, BackboneMixin): """ main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = False config: TimmBackboneConfig diff --git a/src/transformers/models/timm_wrapper/modeling_timm_wrapper.py b/src/transformers/models/timm_wrapper/modeling_timm_wrapper.py index 6e364cf1da05..aa541e0b15fb 100644 --- a/src/transformers/models/timm_wrapper/modeling_timm_wrapper.py +++ b/src/transformers/models/timm_wrapper/modeling_timm_wrapper.py @@ -82,7 +82,7 @@ def _create_timm_model_with_error_handling(config: "TimmWrapperConfig", **model_ class TimmWrapperPreTrainedModel(PreTrainedModel): base_model_prefix = "timm_model" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) config: TimmWrapperConfig _no_split_modules = [] model_tags = ["timm"] diff --git a/src/transformers/models/tvp/modeling_tvp.py b/src/transformers/models/tvp/modeling_tvp.py index 7648c3d00038..24775baf7ab0 100644 --- a/src/transformers/models/tvp/modeling_tvp.py +++ b/src/transformers/models/tvp/modeling_tvp.py @@ -520,7 +520,7 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: class TvpPreTrainedModel(PreTrainedModel): config: TvpConfig base_model_prefix = "model" - input_modalities = ["video", "text"] + input_modalities = ("video", "text") supports_gradient_checkpointing = True @torch.no_grad() diff --git a/src/transformers/models/udop/modeling_udop.py b/src/transformers/models/udop/modeling_udop.py index c20fae835ad5..bce23ba28726 100644 --- a/src/transformers/models/udop/modeling_udop.py +++ b/src/transformers/models/udop/modeling_udop.py @@ -252,7 +252,7 @@ def forward(self, pixel_values): class UdopPreTrainedModel(PreTrainedModel): config: UdopConfig base_model_prefix = "transformer" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _can_compile_fullgraph = False diff --git a/src/transformers/models/upernet/modeling_upernet.py b/src/transformers/models/upernet/modeling_upernet.py index c897f1c81f8c..af212b57c68d 100644 --- a/src/transformers/models/upernet/modeling_upernet.py +++ b/src/transformers/models/upernet/modeling_upernet.py @@ -269,7 +269,7 @@ def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor: class UperNetPreTrainedModel(PreTrainedModel): config: UperNetConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _no_split_modules = [] diff --git a/src/transformers/models/video_llama_3/modeling_video_llama_3.py b/src/transformers/models/video_llama_3/modeling_video_llama_3.py index 8442a4a7c175..d635977ca151 100644 --- a/src/transformers/models/video_llama_3/modeling_video_llama_3.py +++ b/src/transformers/models/video_llama_3/modeling_video_llama_3.py @@ -370,7 +370,7 @@ def forward( class VideoLlama3PreTrainedModel(PreTrainedModel): config: VideoLlama3Config base_model_prefix = "model" - input_modalities = ["image", "video", "text"] + input_modalities = ("image", "video", "text") supports_gradient_checkpointing = True _no_split_modules = ["VideoLlama3VisionEncoderLayer"] _skip_keys_device_placement = "past_key_values" @@ -384,7 +384,7 @@ class VideoLlama3PreTrainedModel(PreTrainedModel): class VideoLlama3VisionModel(VideoLlama3PreTrainedModel): config: VideoLlama3VisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _can_record_outputs = { "hidden_states": VideoLlama3VisionEncoderLayer, "attentions": VideoLlama3VisionAttention, diff --git a/src/transformers/models/video_llama_3/modular_video_llama_3.py b/src/transformers/models/video_llama_3/modular_video_llama_3.py index 0bd05ec9acf8..9389b9112ff9 100644 --- a/src/transformers/models/video_llama_3/modular_video_llama_3.py +++ b/src/transformers/models/video_llama_3/modular_video_llama_3.py @@ -437,7 +437,7 @@ class VideoLlama3PreTrainedModel(Qwen2VLPreTrainedModel): class VideoLlama3VisionModel(VideoLlama3PreTrainedModel): config: VideoLlama3VisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) _can_record_outputs = { "hidden_states": VideoLlama3VisionEncoderLayer, "attentions": VideoLlama3VisionAttention, diff --git a/src/transformers/models/video_llava/modeling_video_llava.py b/src/transformers/models/video_llava/modeling_video_llava.py index 29184ca8a165..dfa2c6559f8c 100644 --- a/src/transformers/models/video_llava/modeling_video_llava.py +++ b/src/transformers/models/video_llava/modeling_video_llava.py @@ -126,7 +126,7 @@ def forward(self, image_features): class VideoLlavaPreTrainedModel(PreTrainedModel): config: VideoLlavaConfig base_model_prefix = "model" - input_modalities = ["image", "video", "text"] + input_modalities = ("image", "video", "text") supports_gradient_checkpointing = True _no_split_modules = ["VideoLlavaVisionAttention"] _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/vilt/modeling_vilt.py b/src/transformers/models/vilt/modeling_vilt.py index fc4d226eb098..5fc0fe2b9e15 100755 --- a/src/transformers/models/vilt/modeling_vilt.py +++ b/src/transformers/models/vilt/modeling_vilt.py @@ -512,7 +512,7 @@ def forward( class ViltPreTrainedModel(PreTrainedModel): config: ViltConfig base_model_prefix = "vilt" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _no_split_modules = ["ViltEmbeddings", "ViltSelfAttention"] diff --git a/src/transformers/models/vipllava/modeling_vipllava.py b/src/transformers/models/vipllava/modeling_vipllava.py index 1f1d5b04b5a5..dcd50dcb02b6 100644 --- a/src/transformers/models/vipllava/modeling_vipllava.py +++ b/src/transformers/models/vipllava/modeling_vipllava.py @@ -114,7 +114,7 @@ def forward(self, hidden_states): class VipLlavaPreTrainedModel(PreTrainedModel): config: VipLlavaConfig base_model_prefix = "model" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py index 5bf7b956cad3..d91633ed8809 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py @@ -65,7 +65,7 @@ class VisionEncoderDecoderModel(PreTrainedModel, GenerationMixin): config: VisionEncoderDecoderConfig base_model_prefix = "vision_encoder_decoder" main_input_name = "pixel_values" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True _supports_flash_attn = True _supports_sdpa = True diff --git a/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py b/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py index 52f1144bf874..ee1ddfa0f6e4 100755 --- a/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py +++ b/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py @@ -47,7 +47,7 @@ def clip_loss(similarity: torch.Tensor) -> torch.Tensor: class VisionTextDualEncoderModel(PreTrainedModel): config: VisionTextDualEncoderConfig base_model_prefix = "vision_text_dual_encoder" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") _supports_flash_attn = True _supports_sdpa = True diff --git a/src/transformers/models/visual_bert/modeling_visual_bert.py b/src/transformers/models/visual_bert/modeling_visual_bert.py index 0ce9ee4b8a12..3364428a0cf2 100755 --- a/src/transformers/models/visual_bert/modeling_visual_bert.py +++ b/src/transformers/models/visual_bert/modeling_visual_bert.py @@ -458,7 +458,7 @@ def forward(self, sequence_output, pooled_output): class VisualBertPreTrainedModel(PreTrainedModel): config: VisualBertConfig base_model_prefix = "visual_bert" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True @torch.no_grad() diff --git a/src/transformers/models/vit/modeling_vit.py b/src/transformers/models/vit/modeling_vit.py index 79221145486e..85fc66fe0b76 100644 --- a/src/transformers/models/vit/modeling_vit.py +++ b/src/transformers/models/vit/modeling_vit.py @@ -354,7 +354,7 @@ class ViTPreTrainedModel(PreTrainedModel): config: ViTConfig base_model_prefix = "vit" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["ViTEmbeddings", "ViTLayer"] _supports_sdpa = True diff --git a/src/transformers/models/vit_mae/modeling_vit_mae.py b/src/transformers/models/vit_mae/modeling_vit_mae.py index 281a142e6f26..7f2a3fb5ff44 100755 --- a/src/transformers/models/vit_mae/modeling_vit_mae.py +++ b/src/transformers/models/vit_mae/modeling_vit_mae.py @@ -522,7 +522,7 @@ class ViTMAEPreTrainedModel(PreTrainedModel): config: ViTMAEConfig base_model_prefix = "vit" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _supports_sdpa = True _supports_flash_attn = True diff --git a/src/transformers/models/vit_msn/modeling_vit_msn.py b/src/transformers/models/vit_msn/modeling_vit_msn.py index 21e702970dc8..b0e6b44491a8 100644 --- a/src/transformers/models/vit_msn/modeling_vit_msn.py +++ b/src/transformers/models/vit_msn/modeling_vit_msn.py @@ -357,7 +357,7 @@ class ViTMSNPreTrainedModel(PreTrainedModel): config: ViTMSNConfig base_model_prefix = "vit" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["ViTMSNAttention", "ViTMSNSdpaAttention"] _supports_sdpa = True diff --git a/src/transformers/models/vitdet/modeling_vitdet.py b/src/transformers/models/vitdet/modeling_vitdet.py index c8aac7a89910..ba255295dac5 100644 --- a/src/transformers/models/vitdet/modeling_vitdet.py +++ b/src/transformers/models/vitdet/modeling_vitdet.py @@ -576,7 +576,7 @@ class VitDetPreTrainedModel(PreTrainedModel): config: VitDetConfig base_model_prefix = "vitdet" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = [] diff --git a/src/transformers/models/vitmatte/modeling_vitmatte.py b/src/transformers/models/vitmatte/modeling_vitmatte.py index 31be5264a81d..dfa408e12633 100644 --- a/src/transformers/models/vitmatte/modeling_vitmatte.py +++ b/src/transformers/models/vitmatte/modeling_vitmatte.py @@ -55,7 +55,7 @@ class ImageMattingOutput(ModelOutput): class VitMattePreTrainedModel(PreTrainedModel): config: VitMatteConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = [] diff --git a/src/transformers/models/vitpose/modeling_vitpose.py b/src/transformers/models/vitpose/modeling_vitpose.py index 2a391f119560..b2d24979625e 100644 --- a/src/transformers/models/vitpose/modeling_vitpose.py +++ b/src/transformers/models/vitpose/modeling_vitpose.py @@ -64,7 +64,7 @@ class VitPosePreTrainedModel(PreTrainedModel): config: VitPoseConfig base_model_prefix = "vit" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True @torch.no_grad() diff --git a/src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py b/src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py index a615e04f6b0c..52a8c78a267b 100644 --- a/src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py +++ b/src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py @@ -349,7 +349,7 @@ class VitPoseBackbonePreTrainedModel(PreTrainedModel): config: VitPoseBackboneConfig base_model_prefix = "vit" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = ["VitPoseBackboneEmbeddings", "VitPoseBackboneLayer"] _supports_sdpa = True diff --git a/src/transformers/models/voxtral/modeling_voxtral.py b/src/transformers/models/voxtral/modeling_voxtral.py index 9f3321c6fa48..c2edced1a119 100644 --- a/src/transformers/models/voxtral/modeling_voxtral.py +++ b/src/transformers/models/voxtral/modeling_voxtral.py @@ -220,7 +220,7 @@ def forward( class VoxtralPreTrainedModel(PreTrainedModel): config: VoxtralConfig base_model_prefix = "model" - input_modalities = ["audio", "text"] + input_modalities = ("audio", "text") supports_gradient_checkpointing = True _no_split_modules = None _skip_keys_device_placement = "past_key_values" diff --git a/src/transformers/models/whisper/modeling_whisper.py b/src/transformers/models/whisper/modeling_whisper.py index c143e7a2ec8f..f32d984e5899 100644 --- a/src/transformers/models/whisper/modeling_whisper.py +++ b/src/transformers/models/whisper/modeling_whisper.py @@ -530,7 +530,7 @@ class WhisperPreTrainedModel(PreTrainedModel): config: WhisperConfig base_model_prefix = "model" main_input_name = "input_features" - input_modalities = ["audio", "text"] + input_modalities = ("audio", "text") supports_gradient_checkpointing = True _no_split_modules = ["WhisperEncoderLayer", "WhisperDecoderLayer"] _supports_flash_attn = True diff --git a/src/transformers/models/x_clip/modeling_x_clip.py b/src/transformers/models/x_clip/modeling_x_clip.py index 8b0695d1d140..81f540460228 100644 --- a/src/transformers/models/x_clip/modeling_x_clip.py +++ b/src/transformers/models/x_clip/modeling_x_clip.py @@ -502,7 +502,7 @@ def forward( class XCLIPPreTrainedModel(PreTrainedModel): config: XCLIPConfig base_model_prefix = "x_clip" - input_modalities = ["image", "text"] + input_modalities = ("image", "text") supports_gradient_checkpointing = True @torch.no_grad() @@ -714,7 +714,7 @@ def forward( class XCLIPTextModel(XCLIPPreTrainedModel): config: XCLIPTextConfig - input_modalities = "text" + input_modalities = ("text",) def __init__(self, config: XCLIPTextConfig): super().__init__(config) @@ -909,7 +909,7 @@ def forward( class XCLIPVisionModel(XCLIPPreTrainedModel): config: XCLIPVisionConfig main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) def __init__(self, config: XCLIPVisionConfig): super().__init__(config) diff --git a/src/transformers/models/yolos/modeling_yolos.py b/src/transformers/models/yolos/modeling_yolos.py index 77df8447a256..a296174b8b74 100755 --- a/src/transformers/models/yolos/modeling_yolos.py +++ b/src/transformers/models/yolos/modeling_yolos.py @@ -433,7 +433,7 @@ class YolosPreTrainedModel(PreTrainedModel): config: YolosConfig base_model_prefix = "vit" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True _no_split_modules = [] _supports_sdpa = True diff --git a/src/transformers/models/zoedepth/modeling_zoedepth.py b/src/transformers/models/zoedepth/modeling_zoedepth.py index 8a23f8380375..6faabf639bd3 100644 --- a/src/transformers/models/zoedepth/modeling_zoedepth.py +++ b/src/transformers/models/zoedepth/modeling_zoedepth.py @@ -1208,7 +1208,7 @@ class ZoeDepthPreTrainedModel(PreTrainedModel): config: ZoeDepthConfig base_model_prefix = "zoedepth" main_input_name = "pixel_values" - input_modalities = "image" + input_modalities = ("image",) supports_gradient_checkpointing = True diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index 5b8e3f6b221c..573a102d9fc3 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -45,6 +45,7 @@ is_torch_available, logging, ) +from .any_to_any import AnyToAnyPipeline from .audio_classification import AudioClassificationPipeline from .automatic_speech_recognition import AutomaticSpeechRecognitionPipeline from .base import ( @@ -107,6 +108,7 @@ AutoModelForKeypointMatching, AutoModelForMaskedLM, AutoModelForMaskGeneration, + AutoModelForMultimodalLM, AutoModelForObjectDetection, AutoModelForQuestionAnswering, AutoModelForSemanticSegmentation, @@ -328,6 +330,17 @@ "default": {"model": ("magic-leap-community/superglue_outdoor", "f4041f8")}, "type": "image", }, + "any-to-any": { + "impl": AnyToAnyPipeline, + "tf": (), + "pt": (AutoModelForMultimodalLM,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("google/gemma-3n-E4B-it", "c1221e9"), + } + }, + "type": "multimodal", + }, } PIPELINE_REGISTRY = PipelineRegistry(supported_tasks=SUPPORTED_TASKS, task_aliases=TASK_ALIASES) @@ -433,6 +446,8 @@ def clean_custom_task(task_info): @overload def pipeline(task: Literal[None], model: Optional[Union[str, "PreTrainedModel"]] = None, config: Optional[Union[str, PreTrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> Pipeline: ... @overload +def pipeline(task: Literal["any-to-any"], model: Optional[Union[str, "PreTrainedModel"]] = None, config: Optional[Union[str, PreTrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> AnyToAnyPipeline: ... +@overload def pipeline(task: Literal["audio-classification"], model: Optional[Union[str, "PreTrainedModel"]] = None, config: Optional[Union[str, PreTrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> AudioClassificationPipeline: ... @overload def pipeline(task: Literal["automatic-speech-recognition"], model: Optional[Union[str, "PreTrainedModel"]] = None, config: Optional[Union[str, PreTrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> AutomaticSpeechRecognitionPipeline: ... diff --git a/src/transformers/pipelines/any_to_any.py b/src/transformers/pipelines/any_to_any.py new file mode 100644 index 000000000000..e5febf875d7e --- /dev/null +++ b/src/transformers/pipelines/any_to_any.py @@ -0,0 +1,505 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +from typing import Any, Optional, Union, overload + +import numpy as np + +from ..audio_utils import AudioInput +from ..generation import GenerationConfig +from ..image_utils import ImageInput +from ..processing_utils import ProcessingKwargs, Unpack +from ..utils import ( + add_end_docstrings, + is_torch_available, + is_vision_available, + logging, + requires_backends, +) +from ..video_utils import VideoInput +from .base import Pipeline, build_pipeline_init_args + + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import MODEL_FOR_MULTIMODAL_LM_MAPPING_NAMES + from .pt_utils import KeyDataset + +if is_vision_available(): + from PIL import Image + +logger = logging.get_logger(__name__) + + +class ReturnType(enum.Enum): + TENSORS = 0 + NEW_TEXT = 1 + FULL_TEXT = 2 + + +class Chat: + """This class is intended to just be used internally in this pipeline and not exposed to users. We convert chats + to this format because the rest of the pipeline code tends to assume that lists of messages are + actually a batch of samples rather than messages in the same conversation.""" + + def __init__(self, messages: list[dict]): + for message in messages: + if not ("role" in message and "content" in message): + raise ValueError("When passing chat dicts as input, each dict must have a 'role' and 'content' key.") + self.messages = messages + + +@add_end_docstrings(build_pipeline_init_args(has_processor=True)) +class AnyToAnyPipeline(Pipeline): + """ + Multimodal Generation pipeline using an `AutoModelForMultimodalLM`. This pipeline generates text given any + combination of multimodal data and text.When the underlying model is a conversational model, it can also + accept one or more chats, in which case the pipeline will operate in chat mode and will continue the + chat(s) by adding its response(s). Each chat takes the form of a list of dicts, where each dict contains + "role" and "content" keys. + + Unless the model you're using explicitly sets these generation parameters in its configuration files + (`generation_config.json`), the following default values will be used: + - max_new_tokens: 256 + + Example: + + ```python + >>> from transformers import pipeline + + >>> pipe = pipeline(task="any-to-any", model="google/gemma-3n-E4B-it") + >>> pipe("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", text="A photo of") + [{'generated_text': 'a photo of two birds'}] + ``` + + ```python + >>> from transformers import pipeline + + >>> pipe = pipeline("any-to-any", model="google/gemma-3n-E4B-it") + >>> messages = [ + >>> { + >>> "role": "user", + >>> "content": [ + >>> { + >>> "type": "image", + >>> "url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg", + >>> }, + >>> {"type": "text", "text": "Describe this image."}, + >>> ], + >>> }, + >>> { + >>> "role": "assistant", + >>> "content": [ + >>> {"type": "text", "text": "There is a dog and"}, + >>> ], + >>> }, + >>> ] + >>> pipe(text=messages, max_new_tokens=20, return_full_text=False) + [{'input_text': [{'role': 'user', + 'content': [{'type': 'image', + 'url': 'https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg'}, + {'type': 'text', 'text': 'Describe this image.'}]}, + {'role': 'assistant', + 'content': [{'type': 'text', 'text': 'There is a dog and'}]}], + 'generated_text': ' a person in the image. The dog is sitting on the sand, and the person is sitting on'}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This multimodal pipeline can currently be loaded from pipeline() using the following task identifier: + "any-to-any". + + See the list of available models on + [huggingface.co/models](https://huggingface.co/models?pipeline_tag=any-to-any). + """ + + _load_processor = True + _load_image_processor = False + _load_feature_extractor = False + _load_tokenizer = False + + _pipeline_calls_generate = True + # Make sure the docstring is updated when the default generation config is changed + _default_generation_config = GenerationConfig( + max_new_tokens=256, + ) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + if "image" in self.model.input_modalities or "video" in self.model.input_modalities: + requires_backends(self, "vision") + requires_backends(self, "torchvision") + if "audio" in self.model.input_modalities: + requires_backends(self, "librosa") + self.check_model_type(MODEL_FOR_MULTIMODAL_LM_MAPPING_NAMES) + + def _sanitize_parameters( + self, + max_new_tokens=None, + generate_kwargs=None, + timeout=None, + return_full_text=None, + return_tensors=None, + return_type=None, + clean_up_tokenization_spaces=None, + stop_sequence=None, + continue_final_message=None, + skip_special_tokens=None, + generation_mode=None, + **kwargs: Unpack[ProcessingKwargs], + ): + forward_kwargs = {} + preprocess_params = {} + postprocess_params = {} + + # Preprocess params + preprocess_params.update(kwargs) + if timeout is not None: + preprocess_params["timeout"] = timeout + if continue_final_message is not None: + preprocess_params["continue_final_message"] = continue_final_message + + # Forward kwargs + forward_kwargs["generate_kwargs"] = generate_kwargs or {} + if generation_mode is not None and generation_mode != "text": + forward_kwargs["generate_kwargs"]["generation_mode"] = generation_mode + if kwargs.get("load_audio_from_video"): + forward_kwargs["generate_kwargs"]["use_audio_in_video"] = True + if stop_sequence is not None: + if isinstance(stop_sequence, str): + stop_sequence = [stop_sequence] + forward_kwargs["generate_kwargs"]["stop_strings"] = stop_sequence + forward_kwargs["generate_kwargs"]["tokenizer"] = self.processor.tokenizer + + if max_new_tokens is not None: + if generate_kwargs is not None and "max_new_tokens" in generate_kwargs: + raise ValueError( + "'max_new_tokens' is defined twice, once in 'generate_kwargs' and " + "once as a direct argument. Please use only one." + ) + forward_kwargs["generate_kwargs"]["max_new_tokens"] = max_new_tokens + + if return_full_text is not None and return_type is None: + if return_tensors is not None: + raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`") + return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT + elif return_tensors is not None and return_type is None: + return_type = ReturnType.TENSORS + # We don't want to set the global default to FULLTEXT at init time. That is why + # `_postprocess_params` is checked before setting the default value + elif return_type is None and generation_mode in [None, "text"] and hasattr(self, "_postprocess_params"): + return_type = ReturnType.FULL_TEXT + + # Postprocess params + if generation_mode not in [None, "text"] and return_type is not None: + raise ValueError( + f"`return_type` cannot be set to {return_type} when generation_mode={generation_mode}. " + "Set `return_type=None` or generation_mode='text'" + ) + if generation_mode not in [None, "text", "image", "audio"]: + raise ValueError( + f"`generation_mode` can be only one of the `text`, `audio`, `image` but got generation_mode[={generation_mode}]" + ) + elif generation_mode is not None and generation_mode not in self.model.output_modalities: + raise ValueError( + f"`generation_mode={generation_mode}` is not supported for {self.model.__class__.__name__}. " + f"The model can only output the following modalities: {self.model.output_modalities}" + ) + + if return_type is not None: + postprocess_params["return_type"] = return_type + if continue_final_message is not None: + postprocess_params["continue_final_message"] = continue_final_message + if clean_up_tokenization_spaces is not None: + postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces + if skip_special_tokens is not None: + postprocess_params["skip_special_tokens"] = skip_special_tokens + postprocess_params["generation_mode"] = generation_mode + return preprocess_params, forward_kwargs, postprocess_params + + @overload + def __call__( + self, + text: Optional[str] = None, + images: Optional[Union[str, "Image.Image"]] = None, + videos: Optional[Union[str, "np.ndarray", "torch.Tensor"]] = None, + audio: Optional[Union[str, "np.ndarray"]] = None, + **kwargs: Any, + ) -> list[dict[str, Any]]: ... + + @overload + def __call__( + self, + text: Optional[list[str]] = None, + images: Optional[Union[list[str], list["Image.Image"]]] = None, + videos: Optional[Union[list[str], list["np.ndarray"], list["torch.Tensor"]]] = None, + audio: Optional[Union[list[str], list["np.ndarray"]]] = None, + **kwargs: Any, + ) -> list[list[dict[str, Any]]]: ... + + def __call__( + self, + text: Union[str, list[str], list[dict]], + images: Optional[ + Union[ + str, + list[str], + list[list[str]], + ImageInput, + ] + ] = None, + videos: Optional[Union[str, list[str], VideoInput]] = None, + audio: Optional[Union[str, list[str], AudioInput]] = None, + **kwargs, + ) -> Union[list[dict[str, Any]], list[list[dict[str, Any]]]]: + """ + Generate a text given text and optionally multimodal data passed as inputs. + + Args: + text (`str`, `list[str]`, `list[dict]`): + The text to be used for generation. If a list of strings is passed, the length of the list should be + the same as the number of images. Text can also follow the chat format: a list of dictionaries where + each dictionary represents a message in a conversation. Each dictionary should have two keys: 'role' + and 'content'. 'role' should be one of 'user', 'system' or 'assistant'. 'content' should be a list of + dictionary containing the text of the message and the type of the message. + images (`str`, `list[str]`, `ImageInput`): + The pipeline handles three types of images: + + - A string containing a HTTP(s) link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images. Finally, this pipeline also supports + the chat format (see `text`) containing images and text in this argument. + videos (`str`, `list[str]`, `VideoInput`): + The pipeline handles three types of videos: + + - A string containing a HTTP(s) link pointing to a video + - A string containing a local path to a video + - A video loaded and decoded to array format + + The pipeline accepts either a single video or a batch of videos. Finally, this pipeline also supports + the chat format (see `text`) containing videos and text in this argument. + audio (`str`, `list[str]`, `AudioInput`): + The pipeline handles three types of audios: + + - A string containing a HTTP(s) link pointing to an audio + - A string containing a local path to an audio + - An audio loaded in PIL directly + + The pipeline accepts either a single audios or a batch of audios. Finally, this pipeline also supports + the chat format (see `text`) containing audios and text in this argument. + return_tensors (`bool`, *optional*, defaults to `False`): + Returns the tensors of predictions (as token indices) in the outputs. If set to + `True`, the decoded text is not returned. + return_text (`bool`, *optional*): + Returns the decoded texts in the outputs. + return_full_text (`bool`, *optional*, defaults to `True`): + If set to `False` only added text is returned, otherwise the full text is returned. Cannot be + specified at the same time as `return_text`. + clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): + Whether or not to clean up the potential extra spaces in the text output. + continue_final_message( `bool`, *optional*): This indicates that you want the model to continue the + last message in the input chat rather than starting a new one, allowing you to "prefill" its response. + By default this is `True` when the final message in the input chat has the `assistant` role and + `False` otherwise, but you can manually override that behaviour by setting this flag. + + Return: + A list or a list of list of `dict`: Each result comes as a dictionary with the following key (cannot + return a combination of both `generated_text` and `generated_token_ids`): + + - **generated_text** (`str`, present when `return_text=True` and `generation_mode="text") -- The generated text. + - **generated_audio** (`np.ndarray`, present when `generation_mode="audio") -- The generated audio. + - **generated_image** (`PIL.Image.Image`, present when `generation_mode="image") -- The generated image. + - **generated_token_ids** (`torch.Tensor`, present when `return_tensors=True` and `generation_mode="text") -- The token + ids of the generated text. + - **input_text** (`str`) -- The input text. + """ + if images is None and text is None: + raise ValueError("You must at least provide either text or images.") + + if isinstance(text, (list, tuple, KeyDataset)) and isinstance(text[0], (list, tuple, dict)): + # We have one or more prompts in list-of-dicts format, so this is chat mode + if isinstance(text[0], dict) and "role" in text[0]: + return super().__call__(Chat(text), **kwargs) + elif isinstance(text[0], (list, tuple)) and isinstance(text[0][0], dict) and "role" in text[0][0]: + chats = [Chat(chat) for chat in text] # 🐈 🐈 🐈 + return super().__call__(chats, **kwargs) + + if text is not None and not (isinstance(text, str) or (isinstance(text, list) and isinstance(text[0], str))): + """ + Supports the following format + - {"text": text, "image": image, "video": video, "audio": audio} + - [{"text": text, "image": image, "video": video, "audio": audio}] + - Generator and datasets + This is a common pattern in other multimodal pipelines, so we support it here as well. + """ + return super().__call__(text, **kwargs) + + # encourage the user to use the chat format if supported + if getattr(self.processor, "chat_template", None) is not None: + logger.warning_once( + "The input data was not formatted as a chat with dicts containing 'role' and 'content' keys, even " + "though this model supports chat. Consider using the chat format for better results. For more " + "information, see https://huggingface.co/docs/transformers/en/chat_templating" + ) + + return super().__call__({"text": text, "images": images, "video": videos, "audio": audio}, **kwargs) + + def preprocess(self, inputs=None, timeout=None, continue_final_message=None, **processing_kwargs): + if isinstance(inputs, Chat): + # If the user passes a chat that ends in an assistant message, we treat it as a prefill by default + # because very few models support multiple separate, consecutive assistant messages + if continue_final_message is None: + continue_final_message = inputs.messages[-1]["role"] == "assistant" + + # Handle Mistral tokenizer which does not accept processing kwargs + chat_template_kwargs = {"add_generation_prompt": not continue_final_message, **processing_kwargs} + if self.processor.tokenizer.__class__.__name__ == "MistralCommonTokenizer": + chat_template_kwargs = { + k: v for k, v in chat_template_kwargs.items() if k in ["padding", "truncation", "max_length"] + } + + model_inputs = self.processor.apply_chat_template( + inputs.messages, + continue_final_message=continue_final_message, + return_tensors="pt", + tokenize=True, + return_dict=True, + **chat_template_kwargs, + ).to(dtype=self.dtype) + model_inputs["text"] = inputs + return model_inputs + + # In case we only have text inputs + if isinstance(inputs, (list, tuple, str)): + text = inputs + inputs = {} + else: + inputs = inputs.copy() # avoid in-place changes if users passed dict + text = inputs.pop("text") + + # Feature extractor do not load audio files and expect a decode array + if "audio" in inputs and hasattr(self.processor, "feature_extractor"): + inputs["audio"] = self.processor.feature_extractor.fetch_audio(inputs["audio"]) + + # If batched text inputs, we set padding to True unless specified otherwise + if isinstance(text, (list, tuple)) and len(text) > 1: + processing_kwargs.setdefault("padding", True) + + # Multimodal data is loaded in preprocessors so we pass all ipnuts directly to `self.processor` + model_inputs = self.processor(text=text, **inputs, return_tensors="pt", **processing_kwargs).to( + dtype=self.dtype + ) + model_inputs["text"] = text + return model_inputs + + def _forward(self, model_inputs, generate_kwargs=None): + generate_kwargs = {} if generate_kwargs is None else generate_kwargs + prompt_text = model_inputs.pop("text") + input_ids = model_inputs.get("input_ids", model_inputs.get("decoder_input_ids")) + + # User-defined `generation_config` passed to the pipeline call take precedence + if "generation_config" not in generate_kwargs: + generate_kwargs["generation_config"] = self.generation_config + + generated_sequence = self.model.generate(**model_inputs, **generate_kwargs) + return {"generated_sequence": generated_sequence, "prompt_text": prompt_text, "input_ids": input_ids} + + def postprocess( + self, + model_outputs, + return_type=None, + continue_final_message=None, + skip_special_tokens=None, + **postprocess_kwargs, + ): + input_texts = model_outputs["prompt_text"] + input_texts = [input_texts] if isinstance(input_texts, (str, Chat)) else input_texts + generated_sequence = model_outputs["generated_sequence"] + input_ids = model_outputs["input_ids"] + if return_type == ReturnType.TENSORS: + return [ + {"input_text": input_texts[i], "generated_token_ids": generated_sequence[i]} + for i in range(len(input_texts)) + ] + + # Decode inputs and outputs the same way to remove input text from generated text if present + skip_special_tokens = skip_special_tokens if skip_special_tokens is not None else True + generation_mode = postprocess_kwargs["generation_mode"] or "text" + if generation_mode == "image" and hasattr(self.model, "decode_image_tokens"): + generated_sequence = self.model.decode_image_tokens(generated_sequence.to(self.model.device)) + generated_outputs = self.processor.post_process_multimodal_output( + generated_sequence, skip_special_tokens=skip_special_tokens, **postprocess_kwargs + ) + + # Force consistent behavior for including the input text in the output + if return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: + # Remove the input text from the generated text if the generated text starts with the input text + # (accounting for the possibility of a space between the input and generated text) + new_generated_texts = [] + postprocess_kwargs["generation_mode"] = "text" + decoded_inputs = self.processor.post_process_multimodal_output( + input_ids, skip_special_tokens=skip_special_tokens, **postprocess_kwargs + ) + for text_generated, decoded_input in zip(generated_outputs, decoded_inputs): + # There can be added characters before the input text, so we need to find the beginning of the input text in the generated text + index_input_text = text_generated.find(decoded_input) + # Limit the search to 2 residual characters, like spaces or new lines, to avoid removing a large part of the answer + if 0 <= index_input_text <= 2: + # If the input text is found, we remove it + new_generated_texts.append(text_generated[index_input_text + len(decoded_input) :]) + else: + new_generated_texts.append(text_generated) + generated_outputs = new_generated_texts + if return_type == ReturnType.FULL_TEXT: + full_texts = [] + for prompt_text, generated_text in zip(input_texts, generated_outputs): + if isinstance(prompt_text, str): + generated_text = prompt_text + generated_text + elif isinstance(prompt_text, Chat): + if continue_final_message is None: + # If the user passes a chat ending in an assistant message, we treat it as a prefill by + # default because very few models support multiple separate, consecutive assistant messages + continue_final_message = prompt_text.messages[-1]["role"] == "assistant" + if continue_final_message: + # With assistant prefill, concat onto the end of the last message + new_text = dict(prompt_text.messages[-1]["content"][-1].items()) + new_text["text"] += generated_text + generated_text = list(prompt_text.messages)[:-1] + [ + { + "role": prompt_text.messages[-1]["role"], + "content": prompt_text.messages[-1]["content"][:-1] + [new_text], + } + ] + else: + # When we're not starting from a prefill, the output is a new assistant message + generated_text = list(prompt_text.messages) + [ + {"role": "assistant", "content": generated_text} + ] + full_texts.append(generated_text) + generated_outputs = full_texts + + records = [ + { + "input_text": input_text.messages if isinstance(input_text, Chat) else input_text, + f"generated_{generation_mode}": generated_output, + } + for input_text, generated_output in zip(input_texts, generated_outputs) + ] + + return records diff --git a/src/transformers/pipelines/base.py b/src/transformers/pipelines/base.py index 1635f379c5d2..58677f862738 100644 --- a/src/transformers/pipelines/base.py +++ b/src/transformers/pipelines/base.py @@ -937,6 +937,17 @@ def __init__( # then we should keep working self.image_processor = self.feature_extractor + def __repr__(self): + pipe_information = { + "model": self.model.__class__.__name__, + "dtype": str(self.dtype).split(".")[-1], + "device": self.device.type, + "input_modalities": self.model.input_modalities, + } + if self.model.can_generate(): + pipe_information["output_modalities"] = self.model.output_modalities + return f"{self.__class__.__name__}: {pipe_information}" + def save_pretrained( self, save_directory: str | os.PathLike, diff --git a/src/transformers/processing_utils.py b/src/transformers/processing_utils.py index 8422294e9773..9d268f93c603 100644 --- a/src/transformers/processing_utils.py +++ b/src/transformers/processing_utils.py @@ -1749,6 +1749,35 @@ def apply_chat_template( return out["input_ids"] return prompt + def post_process_multimodal_output( + self, generated_outputs, skip_special_tokens=True, generation_mode=None, **kwargs + ): + """ + Post-process the output of a multimodal model to return the requested modality output. + If the model cannot generated the requested modality, an error will be raised. + + Args: + generated_outputs (`torch.Tensor` or `np.ndarray`): + The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` + or `(sequence_length,)`. + skip_special_tokens (`bool`, *optional*, defaults to `True`): + Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. + generation_mode (`str`, *optional*): + Generation mode indicated which modality to output and can be one of `["text", "image", "audio"]`. + **kwargs: + Additional arguments to be passed to the tokenizer's `batch_decode method`. + + Returns: + `list[str]`: The decoded text. + """ + if generation_mode is not None and generation_mode != "text": + raise ValueError( + f"{self.__class__.__name__} got an unexpected generation_mode={generation_mode}. Supported options are only [`text`]" + ) + return self.post_process_image_text_to_text( + generated_outputs, skip_special_tokens=skip_special_tokens, **kwargs + ) + def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, **kwargs): """ Post-process the output of a vlm to decode the text. diff --git a/src/transformers/tokenization_mistral_common.py b/src/transformers/tokenization_mistral_common.py index 1851b0f183c7..d1385bbcc9a6 100644 --- a/src/transformers/tokenization_mistral_common.py +++ b/src/transformers/tokenization_mistral_common.py @@ -1457,12 +1457,13 @@ def apply_chat_template( def _maybe_adapt_message(message: dict[str, Any]) -> None: """Adapt message to `mistral-common` format and leave validation to `mistral-common`.""" if not isinstance(message, dict): - return + return message maybe_list_content: str | list[dict[str, str | dict[str, Any]]] | None = message.get("content") if not maybe_list_content or isinstance(maybe_list_content, str): - return + return message normalized_content: list[dict[str, str | dict[str, Any]]] = [] + message = message.copy() for content in maybe_list_content: content_type = content.get("type", None) if not content_type: @@ -1498,6 +1499,7 @@ def _maybe_adapt_message(message: dict[str, Any]) -> None: else: normalized_content.append(content) message["content"] = normalized_content + return message outputs = [] images: list[np.ndarray] = [] @@ -1506,7 +1508,7 @@ def _maybe_adapt_message(message: dict[str, Any]) -> None: for conversation in conversations: messages: list[dict[str, str | list[dict[str, str | dict[str, Any]]]]] = [] for message in conversation: - _maybe_adapt_message(message) + message = _maybe_adapt_message(message) messages.append(message) chat_request = ChatCompletionRequest.from_openai( @@ -1758,12 +1760,11 @@ def from_pretrained( if init_inputs: raise ValueError("`init_inputs` are not supported by `MistralCommonTokenizer.from_pretrained`.") - # Handle kwargs and AutoTokenizer case - ignore_subset = {"_from_auto", "trust_remote_code"} - if kwargs and not (set_kwargs := set(kwargs.keys())).issubset(ignore_subset): - raise ValueError( - f"Kwargs {list(set_kwargs - ignore_subset)} are not supported by `MistralCommonTokenizer.from_pretrained`." - ) + # Handle kwargs and AutoTokenizer/AutoProcessor case + if kwargs and not set(kwargs.keys()).issubset( + {"trust_remote_code", "_from_pipeline", "_commit_hash", "dtype", "_from_auto"} + ): + raise ValueError(f"Some kwargs in {kwargs} are not supported by `MistralCommonTokenizer.from_pretrained`.") if not os.path.isdir(pretrained_model_name_or_path): tokenizer_path = download_tokenizer_from_hf_hub( diff --git a/tests/models/aya_vision/test_modeling_aya_vision.py b/tests/models/aya_vision/test_modeling_aya_vision.py index ce1975cadd8a..1856ea982a54 100644 --- a/tests/models/aya_vision/test_modeling_aya_vision.py +++ b/tests/models/aya_vision/test_modeling_aya_vision.py @@ -166,6 +166,7 @@ class AyaVisionModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTester pipeline_model_mapping = ( { "image-text-to-text": AyaVisionForConditionalGeneration, + "any-to-any": AyaVisionForConditionalGeneration, } if is_torch_available() else {} diff --git a/tests/models/blip_2/test_modeling_blip_2.py b/tests/models/blip_2/test_modeling_blip_2.py index 8115c2f89ec2..2448d3221cf7 100644 --- a/tests/models/blip_2/test_modeling_blip_2.py +++ b/tests/models/blip_2/test_modeling_blip_2.py @@ -788,6 +788,7 @@ class Blip2ModelTest(ModelTesterMixin, PipelineTesterMixin, GenerationTesterMixi "image-to-text": Blip2ForConditionalGeneration, "visual-question-answering": Blip2ForConditionalGeneration, "image-text-to-text": Blip2ForConditionalGeneration, + "any-to-any": Blip2ForConditionalGeneration, } if is_torch_available() else {} diff --git a/tests/models/chameleon/test_modeling_chameleon.py b/tests/models/chameleon/test_modeling_chameleon.py index 4237db1107d9..50050dd3b469 100644 --- a/tests/models/chameleon/test_modeling_chameleon.py +++ b/tests/models/chameleon/test_modeling_chameleon.py @@ -246,11 +246,12 @@ def prepare_config_and_inputs_for_common(self): @require_torch -class ChameleonVision2SeqModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): +class ChameleonVision2SeqModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ChameleonModel, ChameleonForConditionalGeneration) if is_torch_available() else () pipeline_model_mapping = ( { "image-text-to-text": ChameleonForConditionalGeneration, + "any-to-any": ChameleonForConditionalGeneration, } if is_torch_available() else {} diff --git a/tests/models/cohere2_vision/test_modeling_cohere2_vision.py b/tests/models/cohere2_vision/test_modeling_cohere2_vision.py index eaffd4368735..0aabcf5f2e6a 100644 --- a/tests/models/cohere2_vision/test_modeling_cohere2_vision.py +++ b/tests/models/cohere2_vision/test_modeling_cohere2_vision.py @@ -153,6 +153,7 @@ class Cohere2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi pipeline_model_mapping = ( { "image-text-to-text": Cohere2VisionForConditionalGeneration, + "any-to-any": Cohere2VisionForConditionalGeneration, } if is_torch_available() else {} diff --git a/tests/models/deepseek_vl/test_modeling_deepseek_vl.py b/tests/models/deepseek_vl/test_modeling_deepseek_vl.py index cded2b224c00..9639a25430f4 100644 --- a/tests/models/deepseek_vl/test_modeling_deepseek_vl.py +++ b/tests/models/deepseek_vl/test_modeling_deepseek_vl.py @@ -35,6 +35,7 @@ from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -127,12 +128,13 @@ def prepare_config_and_inputs_for_common(self): @require_torch -class DeepseekVLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): +class DeepseekVLModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DeepseekVLModel, DeepseekVLForConditionalGeneration) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": DeepseekVLModel, "image-text-to-text": DeepseekVLForConditionalGeneration, + "any-to-any": DeepseekVLForConditionalGeneration, } if is_torch_available() else {} diff --git a/tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py b/tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py index 57e74b7af827..e4b74b0620f9 100644 --- a/tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py +++ b/tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py @@ -35,6 +35,7 @@ from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -154,7 +155,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch -class DeepseekVLHybridModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): +class DeepseekVLHybridModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (DeepseekVLHybridModel, DeepseekVLHybridForConditionalGeneration) if is_torch_available() else () ) @@ -162,6 +163,7 @@ class DeepseekVLHybridModelTest(ModelTesterMixin, GenerationTesterMixin, unittes { "feature-extraction": DeepseekVLHybridModel, "image-text-to-text": DeepseekVLHybridForConditionalGeneration, + "any-to-any": DeepseekVLHybridForConditionalGeneration, } if is_torch_available() else {} diff --git a/tests/models/emu3/test_modeling_emu3.py b/tests/models/emu3/test_modeling_emu3.py index 0feaddb02318..5803b0be37b6 100644 --- a/tests/models/emu3/test_modeling_emu3.py +++ b/tests/models/emu3/test_modeling_emu3.py @@ -284,7 +284,11 @@ class Emu3Vision2TextModelTest(ModelTesterMixin, GenerationTesterMixin, Pipeline if is_torch_available() else () ) - pipeline_model_mapping = {} + pipeline_model_mapping = ( + {"any-to-any": Emu3ForConditionalGeneration, "image-text-to-text": Emu3ForConditionalGeneration} + if is_torch_available() + else {} + ) def setUp(self): self.model_tester = Emu3Vision2TextModelTester(self) diff --git a/tests/models/florence2/test_modeling_florence2.py b/tests/models/florence2/test_modeling_florence2.py index 2fe7ff21bd8b..c9ee2e46f873 100644 --- a/tests/models/florence2/test_modeling_florence2.py +++ b/tests/models/florence2/test_modeling_florence2.py @@ -36,6 +36,7 @@ from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -221,7 +222,9 @@ def test_sdpa_can_dispatch_on_flash(self): @require_torch -class Florence2ForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): +class Florence2ForConditionalGenerationModelTest( + ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase +): """ Model tester for `Florence2ForConditionalGeneration`. """ @@ -231,6 +234,7 @@ class Florence2ForConditionalGenerationModelTest(ModelTesterMixin, GenerationTes { "image-to-text": Florence2ForConditionalGeneration, "image-text-to-text": Florence2ForConditionalGeneration, + "any-to-any": Florence2ForConditionalGeneration, } if is_torch_available() else {} diff --git a/tests/models/git/test_modeling_git.py b/tests/models/git/test_modeling_git.py index 19831b427181..28b4b134e59c 100644 --- a/tests/models/git/test_modeling_git.py +++ b/tests/models/git/test_modeling_git.py @@ -376,6 +376,7 @@ class GitModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, "image-to-text": GitForCausalLM, "text-generation": GitForCausalLM, "image-text-to-text": GitForCausalLM, + "any-to-any": GitForCausalLM, } if is_torch_available() else {} diff --git a/tests/models/got_ocr2/test_modeling_got_ocr2.py b/tests/models/got_ocr2/test_modeling_got_ocr2.py index 79997080f1f5..7cf71c7c0234 100644 --- a/tests/models/got_ocr2/test_modeling_got_ocr2.py +++ b/tests/models/got_ocr2/test_modeling_got_ocr2.py @@ -149,6 +149,7 @@ class GotOcr2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi { "image-to-text": GotOcr2ForConditionalGeneration, "image-text-to-text": GotOcr2ForConditionalGeneration, + "any-to-any": GotOcr2ForConditionalGeneration, } if is_torch_available() else {} diff --git a/tests/models/granite_speech/test_modeling_granite_speech.py b/tests/models/granite_speech/test_modeling_granite_speech.py index 66a0545c0ea7..86feec01543b 100644 --- a/tests/models/granite_speech/test_modeling_granite_speech.py +++ b/tests/models/granite_speech/test_modeling_granite_speech.py @@ -43,6 +43,7 @@ floats_tensor, ids_tensor, ) +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -211,13 +212,15 @@ def create_and_check_granite_speech_model_fp16_autocast_forward( @require_torch -class GraniteSpeechForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): +class GraniteSpeechForConditionalGenerationModelTest( + ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase +): """ Model tester for `GraniteSpeechForConditionalGeneration`. """ all_model_classes = (GraniteSpeechForConditionalGeneration,) if is_torch_available() else () - + pipeline_model_mapping = {"any-to-any": GraniteSpeechForConditionalGeneration} if is_torch_available() else {} _is_composite = True def setUp(self): diff --git a/tests/models/idefics/test_modeling_idefics.py b/tests/models/idefics/test_modeling_idefics.py index eba24af6f51b..2148861e2ad3 100644 --- a/tests/models/idefics/test_modeling_idefics.py +++ b/tests/models/idefics/test_modeling_idefics.py @@ -319,7 +319,11 @@ def prepare_pixel_values(self): class IdeficsModelTest(ModelTesterMixin, PipelineTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (IdeficsModel, IdeficsForVisionText2Text) if is_torch_available() else () pipeline_model_mapping = ( - {"feature-extraction": IdeficsModel, "image-text-to-text": IdeficsForVisionText2Text} + { + "feature-extraction": IdeficsModel, + "image-text-to-text": IdeficsForVisionText2Text, + "any-to-any": IdeficsForVisionText2Text, + } if is_torch_available() else {} ) diff --git a/tests/models/janus/test_modeling_janus.py b/tests/models/janus/test_modeling_janus.py index 72e2a72c265f..84fd2e13cf6b 100644 --- a/tests/models/janus/test_modeling_janus.py +++ b/tests/models/janus/test_modeling_janus.py @@ -45,6 +45,7 @@ from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -190,10 +191,14 @@ def prepare_config_and_inputs_for_common(self): @require_torch -class JanusVisionText2TextModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): +class JanusVisionText2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (JanusModel, JanusForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (JanusForConditionalGeneration,) if is_torch_available() else () - + pipeline_model_mapping = ( + {"any-to-any": JanusForConditionalGeneration, "image-text-to-text": JanusForConditionalGeneration} + if is_torch_available() + else {} + ) _is_composite = True def setUp(self): diff --git a/tests/models/kyutai_speech_to_text/test_modeling_kyutai_speech_to_text.py b/tests/models/kyutai_speech_to_text/test_modeling_kyutai_speech_to_text.py index 83ff0d9ec223..9265375731f0 100644 --- a/tests/models/kyutai_speech_to_text/test_modeling_kyutai_speech_to_text.py +++ b/tests/models/kyutai_speech_to_text/test_modeling_kyutai_speech_to_text.py @@ -248,6 +248,7 @@ class KyutaiSpeechToTextModelTest(ModelTesterMixin, GenerationTesterMixin, Pipel { "feature-extraction": KyutaiSpeechToTextModel, "automatic-speech-recognition": KyutaiSpeechToTextForConditionalGeneration, + "any-to-any": KyutaiSpeechToTextForConditionalGeneration, } if is_torch_available() else {} diff --git a/tests/models/llava/test_modeling_llava.py b/tests/models/llava/test_modeling_llava.py index c8df5a0b93fb..9db92220b888 100644 --- a/tests/models/llava/test_modeling_llava.py +++ b/tests/models/llava/test_modeling_llava.py @@ -42,6 +42,7 @@ from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -167,7 +168,9 @@ def prepare_config_and_inputs_for_common(self): @require_torch -class LlavaForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): +class LlavaForConditionalGenerationModelTest( + ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase +): """ Model tester for `LlavaForConditionalGeneration`. """ @@ -181,7 +184,11 @@ class LlavaForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterM else () ) pipeline_model_mapping = ( - {"image-to-text": LlavaForConditionalGeneration, "image-text-to-text": LlavaForConditionalGeneration} + { + "image-to-text": LlavaForConditionalGeneration, + "image-text-to-text": LlavaForConditionalGeneration, + "any-to-any": LlavaForConditionalGeneration, + } if is_torch_available() else {} ) diff --git a/tests/models/llava_next/test_modeling_llava_next.py b/tests/models/llava_next/test_modeling_llava_next.py index 547a37957be9..cb6a6c934803 100644 --- a/tests/models/llava_next/test_modeling_llava_next.py +++ b/tests/models/llava_next/test_modeling_llava_next.py @@ -191,8 +191,14 @@ class LlavaNextForConditionalGenerationModelTest(ModelTesterMixin, GenerationTes if is_torch_available() else () ) - pipeline_model_mapping = {"image-text-to-text": LlavaNextForConditionalGeneration} if is_torch_available() else {} - + pipeline_model_mapping = ( + { + "image-text-to-text": LlavaNextForConditionalGeneration, + "any-to-any": LlavaNextForConditionalGeneration, + } + if is_torch_available() + else {} + ) _is_composite = True def setUp(self): diff --git a/tests/models/llava_onevision/test_modeling_llava_onevision.py b/tests/models/llava_onevision/test_modeling_llava_onevision.py index 0dfa7aa6c7f0..603bd260ad75 100644 --- a/tests/models/llava_onevision/test_modeling_llava_onevision.py +++ b/tests/models/llava_onevision/test_modeling_llava_onevision.py @@ -193,7 +193,12 @@ class LlavaOnevisionForConditionalGenerationModelTest(ModelTesterMixin, Generati else () ) pipeline_model_mapping = ( - {"image-text-to-text": LlavaOnevisionForConditionalGeneration} if is_torch_available() else {} + { + "image-text-to-text": LlavaOnevisionForConditionalGeneration, + "any-to-any": LlavaOnevisionForConditionalGeneration, + } + if is_torch_available() + else {} ) # MP works but offload doesn't work when the MultiheadAttention is offloaded diff --git a/tests/models/ovis2/test_modeling_ovis2.py b/tests/models/ovis2/test_modeling_ovis2.py index 36853299f766..8e6931420390 100644 --- a/tests/models/ovis2/test_modeling_ovis2.py +++ b/tests/models/ovis2/test_modeling_ovis2.py @@ -39,6 +39,7 @@ floats_tensor, ids_tensor, ) +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -160,7 +161,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch -class Ovis2ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): +class Ovis2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Model tester for `Ovis2ForConditionalGeneration`. """ @@ -173,7 +174,11 @@ class Ovis2ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase) if is_torch_available() else () ) - pipeline_model_mapping = {"image-text-to-text": Ovis2ForConditionalGeneration} if is_torch_available() else {} + pipeline_model_mapping = ( + {"image-text-to-text": Ovis2ForConditionalGeneration, "any-to-any": Ovis2ForConditionalGeneration} + if is_torch_available() + else {} + ) _is_composite = True def setUp(self): diff --git a/tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py b/tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py index e5179f179e30..a2a131fee64f 100644 --- a/tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py +++ b/tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py @@ -49,6 +49,7 @@ floats_tensor, ids_tensor, ) +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -249,14 +250,25 @@ def create_and_check_qwenomnithinker_model_fp16_forward(self, config, input_ids, @require_torch -class Qwen2_5OmniThinkerForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): +class Qwen2_5OmniThinkerForConditionalGenerationModelTest( + ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase +): """ Model tester for `Qwen2_5OmniThinkerForConditionalGeneration`. """ all_model_classes = (Qwen2_5OmniThinkerForConditionalGeneration,) if is_torch_available() else () all_generative_model_classes = (Qwen2_5OmniThinkerForConditionalGeneration,) if is_torch_available() else () - + # pipeline_model_mapping = ( + # { + # "any-to-any": Qwen2_5OmniForConditionalGeneration, + # "image-text-to-text": Qwen2_5OmniThinkerForConditionalGeneration, + # } + # if is_torch_available() + # else {} + # ) + # FIXME @raushan Omni tests take ages because the model is big. Try to make it even smaller + pipeline_model_mapping = {} _is_composite = True model_split_percents = [0.5, 0.9] diff --git a/tests/models/qwen2_audio/test_modeling_qwen2_audio.py b/tests/models/qwen2_audio/test_modeling_qwen2_audio.py index 25e1200332de..4df16b9f6f4b 100644 --- a/tests/models/qwen2_audio/test_modeling_qwen2_audio.py +++ b/tests/models/qwen2_audio/test_modeling_qwen2_audio.py @@ -37,6 +37,7 @@ from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -134,13 +135,15 @@ def prepare_config_and_inputs_for_common(self): @require_torch -class Qwen2AudioForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): +class Qwen2AudioForConditionalGenerationModelTest( + ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase +): """ Model tester for `Qwen2AudioForConditionalGeneration`. """ all_model_classes = (Qwen2AudioForConditionalGeneration,) if is_torch_available() else () - + pipeline_model_mapping = {"any-to-any": Qwen2AudioForConditionalGeneration} if is_torch_available() else {} _is_composite = True def setUp(self): diff --git a/tests/models/qwen2_vl/test_modeling_qwen2_vl.py b/tests/models/qwen2_vl/test_modeling_qwen2_vl.py index e06c2872ed5d..d9dcc335cdba 100644 --- a/tests/models/qwen2_vl/test_modeling_qwen2_vl.py +++ b/tests/models/qwen2_vl/test_modeling_qwen2_vl.py @@ -46,6 +46,7 @@ floats_tensor, ids_tensor, ) +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -165,7 +166,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch -class Qwen2VLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): +class Qwen2VLModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Model tester for `Qwen2VLForConditionalGeneration`. """ @@ -178,8 +179,10 @@ class Qwen2VLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCas if is_torch_available() else () ) - pipeline_model_mapping = {"image-text-to-text": Qwen2VLForConditionalGeneration} - + pipeline_model_mapping = { + "image-text-to-text": Qwen2VLForConditionalGeneration, + "any-to-any": Qwen2VLForConditionalGeneration, + } _is_composite = True def setUp(self): diff --git a/tests/models/smolvlm/test_modeling_smolvlm.py b/tests/models/smolvlm/test_modeling_smolvlm.py index 5d2383ded88d..8005b7f88a87 100644 --- a/tests/models/smolvlm/test_modeling_smolvlm.py +++ b/tests/models/smolvlm/test_modeling_smolvlm.py @@ -38,6 +38,7 @@ from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -321,15 +322,23 @@ def test_resize_embeddings_untied(self): @require_torch -class SmolVLMForConditionalGenerationModelTest(GenerationTesterMixin, ModelTesterMixin, unittest.TestCase): +class SmolVLMForConditionalGenerationModelTest( + GenerationTesterMixin, ModelTesterMixin, PipelineTesterMixin, unittest.TestCase +): """ Model tester for `SmolVLMForConditionalGeneration`. """ all_model_classes = (SmolVLMForConditionalGeneration,) if is_torch_available() else () all_generative_model_classes = (SmolVLMForConditionalGeneration,) if is_torch_available() else () - pipeline_model_mapping = {"image-text-to-text": SmolVLMForConditionalGeneration} if is_torch_available() else () - + pipeline_model_mapping = ( + { + "image-text-to-text": SmolVLMForConditionalGeneration, + "any-to-any": SmolVLMForConditionalGeneration, + } + if is_torch_available() + else () + ) test_resize_embeddings = True def setUp(self): diff --git a/tests/models/voxtral/test_modeling_voxtral.py b/tests/models/voxtral/test_modeling_voxtral.py index 213c28538c11..0cff2a66779b 100644 --- a/tests/models/voxtral/test_modeling_voxtral.py +++ b/tests/models/voxtral/test_modeling_voxtral.py @@ -33,6 +33,7 @@ from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor +from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): @@ -129,14 +130,16 @@ def prepare_config_and_inputs_for_common(self): @require_torch -class VoxtralForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): +class VoxtralForConditionalGenerationModelTest( + ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase +): """ Model tester for `VoxtralForConditionalGeneration`. """ all_model_classes = (VoxtralForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( - {"text-to-speech": VoxtralForConditionalGeneration, "audio-text-to-text": VoxtralForConditionalGeneration} + {"text-to-speech": VoxtralForConditionalGeneration, "any-to-any": VoxtralForConditionalGeneration} if is_torch_available() else {} ) diff --git a/tests/pipelines/test_pipelines_any_to_any.py b/tests/pipelines/test_pipelines_any_to_any.py new file mode 100644 index 000000000000..e047b6393d68 --- /dev/null +++ b/tests/pipelines/test_pipelines_any_to_any.py @@ -0,0 +1,373 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +import numpy as np + +from transformers import MODEL_FOR_MULTIMODAL_LM_MAPPING, is_vision_available +from transformers.pipelines import AnyToAnyPipeline, pipeline +from transformers.testing_utils import ( + is_pipeline_test, + require_librosa, + require_torch, + require_vision, + slow, +) + +from .test_pipelines_common import ANY + + +sys.path.append(".") +from utils.fetch_hub_objects_for_ci import url_to_local_path + + +if is_vision_available(): + import PIL + + +@is_pipeline_test +@require_vision +@require_librosa +@require_torch +class AnyToAnyPipelineTests(unittest.TestCase): + model_mapping = MODEL_FOR_MULTIMODAL_LM_MAPPING + + # We only need `processor` but the Mixin will pass all possible preprocessing classes for a model. + # So we add them all in signature + def get_test_pipeline( + self, model, tokenizer, processor, image_processor=None, feature_extractor=None, dtype="float32" + ): + _is_images_supported = hasattr(processor, "image_processor") + _is_videos_supported = hasattr(processor, "video_processor") + _is_audios_supported = hasattr(processor, "feature_extractor") + + image_token = getattr(processor.tokenizer, "image_token", "") + video_token = getattr(processor.tokenizer, "video_token", "") + audio_token = getattr(processor.tokenizer, "audio_token", "") + + images_examples = [ + { + "images": "./tests/fixtures/tests_samples/COCO/000000039769.png", + "text": f"{image_token}This is a ", + }, + { + "images": "./tests/fixtures/tests_samples/COCO/000000039769.png", + "text": f"{image_token}Here I see a ", + }, + ] + + videos_examples = [ + { + "videos": url_to_local_path( + "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Big_Buck_Bunny_720_10s_10MB.mp4" + ), + "text": f"{video_token}This video shows a ", + }, + { + "video": url_to_local_path( + "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4" + ), + "text": f"{video_token}In the video I see a ", + }, + ] + + audio_examples = [ + { + "audio": url_to_local_path( + "https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/glass-breaking-151256.mp3" + ), + "text": f"{audio_token}This is sound of a ", + }, + { + "audio": url_to_local_path( + "https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/f2641_0_throatclearing.wav" + ), + "text": f"{audio_token}Here I hear a ", + }, + ] + + examples = [] + if _is_images_supported: + examples.extend(images_examples) + if _is_videos_supported: + examples.extend(videos_examples) + if _is_audios_supported: + examples.extend(audio_examples) + + pipe = AnyToAnyPipeline(model=model, processor=processor, dtype=dtype, max_new_tokens=10) + + return pipe, examples + + def run_pipeline_test(self, pipe, examples): + # Single + outputs = pipe(examples[0]) + self.assertEqual( + outputs, + [ + {"input_text": ANY(str), "generated_text": ANY(str)}, + ], + ) + + # Batched but limit to last 2 examples + outputs = pipe(examples[:2]) + self.assertEqual( + outputs, + [ + [ + {"input_text": ANY(str), "generated_text": ANY(str)}, + ], + [ + {"input_text": ANY(str), "generated_text": ANY(str)}, + ], + ], + ) + + # `generation_mode` raises errors when dosn't match with other params + with self.assertRaises(ValueError): + pipe(examples, generation_mode="video") + + with self.assertRaises(ValueError): + pipe(examples, generation_mode="audio", return_full_text=True) + + with self.assertRaises(ValueError): + pipe(examples, generation_mode="image", return_type=1) + + # Chat template + if getattr(pipe.processor, "chat_template", None) is not None: + messages = [] + for example in examples[:2]: + example.pop("text") + modality_type, modality_data = list(example.items())[0] + message = { + "role": "user", + "content": [ + {"type": "text", "text": "This is a "}, + {"type": modality_type, "path": modality_data}, + ], + } + messages.append([message]) + outputs = pipe(messages, return_full_text=True, max_new_tokens=10) + + self.assertEqual( + outputs, + [ + [ + {"input_text": ANY(str), "generated_text": ANY(str)}, + ], + [ + {"input_text": ANY(str), "generated_text": ANY(str)}, + ], + ], + ) + + @slow + def test_small_model_pt_token_text_only(self): + pipe = pipeline("any-to-any", model="google/gemma-3n-E4B-it") + text = "What is the capital of France? Assistant:" + + outputs = pipe(text=text, generate_kwargs={"do_sample": False}) + self.assertEqual( + outputs, + [ + { + "input_text": "What is the capital of France? Assistant:", + "generated_None": "What is the capital of France? Assistant: The capital of France is Paris.\n", + } + ], + ) + + messages = [ + [ + { + "role": "user", + "content": [ + {"type": "text", "text": "Write a poem on Hugging Face, the company"}, + ], + }, + ], + [ + { + "role": "user", + "content": [ + {"type": "text", "text": "What is the capital of France?"}, + ], + }, + ], + ] + outputs = pipe(text=messages, generate_kwargs={"do_sample": False}) + self.assertEqual( + outputs, + [ + [ + { + "input_text": [ + { + "role": "user", + "content": [{"type": "text", "text": "Write a poem on Hugging Face, the company"}], + } + ], + "generated_None": [ + { + "role": "user", + "content": [{"type": "text", "text": "Write a poem on Hugging Face, the company"}], + }, + { + "role": "assistant", + "content": "A digital embrace, a friendly face,\nHugging Face rises, setting the pace.\nFor AI's heart, a vibrant core,\nOpen source models, and so much more.\n\nFrom transformers deep, a powerful might,\nNLP's future, shining so bright.\nDatasets curated, a treasure trove found,\nFor researchers and builders, on fertile ground.\n\nA community thriving, a collaborative art,\nSharing knowledge, playing a vital part.\nSpaces to showcase, creations unfold,\nStories in code, bravely told.\n\nWith libraries sleek, and tools so refined,\nDemocratizing AI, for all humankind.\nFrom sentiment analysis to text generation's grace,\nHugging Face empowers, at a rapid pace.\n\nA platform of learning, a place to explore,\nUnlocking potential, and asking for more.\nSo let's give a cheer, for this innovative team,\nHugging Face's vision, a beautiful dream. \n", + }, + ], + } + ], + [ + { + "input_text": [ + {"role": "user", "content": [{"type": "text", "text": "What is the capital of France?"}]} + ], + "generated_None": [ + {"role": "user", "content": [{"type": "text", "text": "What is the capital of France?"}]}, + {"role": "assistant", "content": "The capital of France is **Paris**. \n"}, + ], + } + ], + ], + ) + + @slow + def test_small_model_pt_token_audio_input(self): + pipe = pipeline("any-to-any", model="google/gemma-3n-E4B-it") + + audio_path = url_to_local_path( + "https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/f2641_0_throatclearing.wav" + ) + messages = [ + { + "role": "user", + "content": [ + {"type": "text", "text": "What do you hear in this audio?"}, + {"type": "audio", "url": audio_path}, + ], + }, + ] + outputs = pipe(text=messages, return_type=1, generate_kwargs={"do_sample": False}) # return new text + self.assertEqual( + outputs, + [ + { + "input_text": [ + { + "role": "user", + "content": [ + {"type": "text", "text": "What do you hear in this audio?"}, + { + "type": "audio", + "url": "https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/f2641_0_throatclearing.wav", + }, + ], + } + ], + "generated_None": "user\nWhat do you hear in this audio?\n\n\n\n\nmodel\nThe audio contains the repeated sound of someone **coughing**. It's a fairly consistent, forceful cough throughout the duration.", + } + ], + ) + + @slow + def test_small_model_pt_token_audio_gen(self): + pipe = pipeline("any-to-any", model="Qwen/Qwen2.5-Omni-3B", dtype="bfloat16") + + video_path = url_to_local_path( + "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Cooking_cake.mp4" + ) + messages = [ + { + "role": "user", + "content": [ + {"type": "text", "text": "Describe this video."}, + {"type": "video", "video": video_path}, + ], + }, + ] + outputs = pipe( + text=messages, + num_frames=16, + max_new_tokens=50, + load_audio_from_video=True, + generate_kwargs={"use_audio_in_video": True, "talker_do_sample": False, "do_sample": False}, + ) + self.assertEqual( + outputs, + [ + { + "input_text": [ + { + "role": "user", + "content": [ + {"type": "text", "text": "Describe this video."}, + { + "type": "video", + "video": "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Cooking_cake.mp4", + }, + ], + } + ], + "generated_None": [ + { + "role": "user", + "content": [ + {"type": "text", "text": "Describe this video."}, + { + "type": "video", + "video": "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Cooking_cake.mp4", + }, + ], + }, + { + "role": "assistant", + "content": "system\nYou are a helpful assistant.\nuser\nDescribe this video.\nassistant\nThe video begins with a man standing in a kitchen, wearing a black shirt. He is holding a large glass bowl filled with flour and a spoon. The man starts to mix the flour in the bowl, creating a dough. As he mixes, he continues to talk to the camera, explaining the process. The kitchen has wooden cabinets and a white refrigerator in the background. The man's movements are deliberate and focused as he works with the dough. The video ends with the man still mixing the dough in the bowl. Overall, the video provides a clear and detailed demonstration of how to make dough using flour and a spoon.", + }, + ], + } + ], + ) + + outputs = pipe(text=messages, generation_mode="audio", num_frames=16, max_new_tokens=20) + + self.assertEqual(len(outputs), len(messages)) + self.assertIsInstance(outputs[0], dict) + for out in outputs: + self.assertTrue("input_text" in out) + self.assertTrue("generated_audio" in out) + self.assertIsInstance(out["generated_audio"], np.ndarray) + + @slow + def test_small_model_pt_image_gen(self): + pipe = pipeline("any-to-any", model="deepseek-community/Janus-Pro-1B") + + messages = [ + { + "role": "user", + "content": [ + {"type": "text", "text": "A dog running under the rain."}, + ], + }, + ] + outputs = pipe(text=messages, generation_mode="image") + + self.assertEqual(len(outputs), len(messages)) + self.assertIsInstance(outputs[0], dict) + for out in outputs: + self.assertTrue("input_text" in out) + self.assertTrue("generated_image" in out) + self.assertIsInstance(out["generated_image"], PIL.Image.Image) diff --git a/tests/test_pipeline_mixin.py b/tests/test_pipeline_mixin.py index 5002a3e9b946..a99445c487c1 100644 --- a/tests/test_pipeline_mixin.py +++ b/tests/test_pipeline_mixin.py @@ -60,6 +60,7 @@ ) from transformers.utils import direct_transformers_import, logging +from .pipelines.test_pipelines_any_to_any import AnyToAnyPipelineTests from .pipelines.test_pipelines_audio_classification import AudioClassificationPipelineTests from .pipelines.test_pipelines_automatic_speech_recognition import AutomaticSpeechRecognitionPipelineTests from .pipelines.test_pipelines_depth_estimation import DepthEstimationPipelineTests @@ -105,6 +106,7 @@ "image-to-image": {"test": ImageToImagePipelineTests}, "image-to-text": {"test": ImageToTextPipelineTests}, "mask-generation": {"test": MaskGenerationPipelineTests}, + "any-to-any": {"test": AnyToAnyPipelineTests}, "object-detection": {"test": ObjectDetectionPipelineTests}, "question-answering": {"test": QAPipelineTests}, "summarization": {"test": SummarizationPipelineTests}, @@ -590,6 +592,18 @@ def test_pipeline_image_text_to_text(self): def test_pipeline_image_text_to_text_fp16(self): self.run_task_tests(task="image-text-to-text", dtype="float16") + @is_pipeline_test + @require_vision + @require_torch + def test_pipeline_any_to_any(self): + self.run_task_tests(task="any-to-any") + + @is_pipeline_test + @require_vision + @require_torch + def test_pipeline_any_to_any_fp16(self): + self.run_task_tests(task="any-to-any", dtype="float16") + @is_pipeline_test @require_vision def test_pipeline_image_to_text(self): diff --git a/tests/utils/tiny_model_summary.json b/tests/utils/tiny_model_summary.json index 6c36448e5aba..134c6df34b18 100644 --- a/tests/utils/tiny_model_summary.json +++ b/tests/utils/tiny_model_summary.json @@ -5000,6 +5000,17 @@ ], "sha": "c40765c382515ae627652d60e9077b6478448d48" }, + "Qwen2_5OmniForConditionalGeneration": { + "tokenizer_classes": [ + "Qwen2Tokenizer", + "Qwen2TokenizerFast" + ], + "processor_classes": ["Qwen2_5OmniProcessor"], + "model_classes": [ + "Qwen2_5OmniForConditionalGeneration" + ], + "sha": "9bc7a812cc447b430acb994f8d42e8e64c7b61f6" + }, "ReformerForMaskedLM": { "tokenizer_classes": [ "ReformerTokenizer", diff --git a/utils/check_docstrings.py b/utils/check_docstrings.py index 0f3a976c32d3..66e3d75266b0 100644 --- a/utils/check_docstrings.py +++ b/utils/check_docstrings.py @@ -249,6 +249,7 @@ class DecoratedItem: "ImageGPTConfig", "ImageSegmentationPipeline", "ImageTextToTextPipeline", + "AnyToAnyPipeline", "ImageToImagePipeline", "ImageToTextPipeline", "InformerConfig", diff --git a/utils/update_metadata.py b/utils/update_metadata.py index e122b64ce849..f4fda1b78cfb 100755 --- a/utils/update_metadata.py +++ b/utils/update_metadata.py @@ -66,6 +66,7 @@ ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("image-text-to-text", "MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES", "AutoModelForImageTextToText"), + ("any-to-any", "MODEL_FOR_MULTIMODAL_LM_MAPPING_NAMES", "AutoModelForMultimodalLM"), ("image-to-image", "MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES", "AutoModelForImageToImage"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),