diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index 0d2baea6d85f..9cb5cc1558ac 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -40,13 +40,6 @@ ) from ..integrations.deepspeed import is_deepspeed_zero3_enabled from ..modeling_outputs import CausalLMOutputWithPast, Seq2SeqLMOutput -from ..models.auto import ( - MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, - MODEL_FOR_CAUSAL_LM_MAPPING, - MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, - MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, - MODEL_FOR_VISION_2_SEQ_MAPPING, -) from ..pytorch_utils import isin_mps_friendly from ..tokenization_utils import ExtensionsTrie from ..utils import ( @@ -1113,32 +1106,6 @@ def compute_transition_scores( return transition_scores - def _validate_model_class(self): - """ - Confirms that the model class is compatible with generation. If not, raises an exception that points to the - right class to use. - """ - if not is_torchdynamo_compiling() and not self.can_generate(): - generate_compatible_mappings = [ - MODEL_FOR_CAUSAL_LM_MAPPING, - MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, - MODEL_FOR_VISION_2_SEQ_MAPPING, - MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, - MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, - ] - generate_compatible_classes = set() - for model_mapping in generate_compatible_mappings: - supported_models = model_mapping.get(type(self.config), default=None) - if supported_models is not None: - generate_compatible_classes.add(supported_models.__name__) - exception_message = ( - f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " - "it doesn't have a language model head." - ) - if generate_compatible_classes: - exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" - raise TypeError(exception_message) - def _validate_assistant(self, assistant_model): if assistant_model is None: return @@ -1777,7 +1744,6 @@ def generate( - [`~generation.GenerateBeamEncoderDecoderOutput`] """ # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call - self._validate_model_class() tokenizer = kwargs.pop("tokenizer", None) # Pull this out first, we only use it for stopping criteria generation_config, model_kwargs = self._prepare_generation_config(generation_config, **kwargs) self._validate_model_kwargs(model_kwargs.copy()) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index b943b5e7989f..b5353995c017 100755 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -212,7 +212,7 @@ def _skip_init(*args, **kwargs): setattr(torch.nn.init, name, init_func) -def get_parameter_device(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): +def get_parameter_device(parameter: Union[nn.Module, "ModuleUtilsMixin"]): try: return next(parameter.parameters()).device except StopIteration: @@ -227,7 +227,7 @@ def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: return first_tuple[1].device -def get_first_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): +def get_first_parameter_dtype(parameter: Union[nn.Module, "ModuleUtilsMixin"]): """ Returns the first parameter dtype (can be non-floating) or asserts if none were found. """ @@ -245,7 +245,7 @@ def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: return first_tuple[1].dtype -def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]): +def get_parameter_dtype(parameter: Union[nn.Module, "ModuleUtilsMixin"]): """ Returns the first found floating dtype in parameters if there is one, otherwise returns the last dtype it found. """ @@ -1295,7 +1295,7 @@ def floating_point_ops( return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings) -class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin, PeftAdapterMixin): +class PreTrainedModel(nn.Module, ModuleUtilsMixin, PushToHubMixin, PeftAdapterMixin): r""" Base class for all models. @@ -1624,11 +1624,7 @@ def can_generate(cls) -> bool: Returns: `bool`: Whether this model can generate sequences with `.generate()`. """ - # Detects whether `prepare_inputs_for_generation` has been overwritten, which is a requirement for generation. - # Alternativelly, the model can also have a custom `generate` function. - if "GenerationMixin" in str(cls.prepare_inputs_for_generation) and "GenerationMixin" in str(cls.generate): - return False - return True + return issubclass(cls, GenerationMixin) @classmethod def _check_and_enable_flash_attn_2( @@ -4716,6 +4712,36 @@ def _is_quantized_training_enabled(self): return self.hf_quantizer.is_trainable + def generate(self, *args, **kwargs): + raise NotImplementedError( + f"{self.__class__.__name__} doesn't have a `generate` method. If you were not " + "expecting this exception, here are some things to check:" + "\n 1. If you are using a custom model (or `trust_remote_code=True`): Load your model with an " + "`AutoModel...` class. If that is not possible, make sure your model class inherits from " + "`GenerationMixin` as its first inherited class. `PreTrainedModel` no longer inherits " + "`GenerationMixin` as of v4.45" + "\n 2. If you are using a transformers model: You might be using a class that is not meant for " + "generation. Your model must be compatible with one of the following auto classes: " + "`AutoModelForCausalLM`, `ForConditionalGeneration`, `AutoModelForSpeechSeq2Seq`, and " + "`AutoModelForVision2Seq`" + "\n 3. If none of the cases above apply, please open an issue on GitHub 🤗" + ) + + def prepare_inputs_for_generation(self, *args, **kwargs): + raise NotImplementedError( + f"{self.__class__.__name__} doesn't have a `prepare_inputs_for_generation` method. If you were not " + "expecting this exception, here are some things to check:" + "\n 1. If you are using a custom model (or `trust_remote_code=True`): Load your model with an " + "`AutoModel...` class. If that is not possible, make sure your model class inherits from " + "`GenerationMixin` as its first inherited class. `PreTrainedModel` no longer inherits " + "`GenerationMixin` as of v4.45" + "\n 2. If you are using a transformers model: You might be using a class that is not meant for " + "generation. Your model must be compatible with one of the following auto classes: " + "`AutoModelForCausalLM`, `ForConditionalGeneration`, `AutoModelForSpeechSeq2Seq`, and " + "`AutoModelForVision2Seq`" + "\n 3. If none of the cases above apply, please open an issue on GitHub 🤗" + ) + PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub) if PreTrainedModel.push_to_hub.__doc__ is not None: diff --git a/src/transformers/models/auto/auto_factory.py b/src/transformers/models/auto/auto_factory.py index 6b572b252779..5e2b15bd434c 100644 --- a/src/transformers/models/auto/auto_factory.py +++ b/src/transformers/models/auto/auto_factory.py @@ -30,12 +30,17 @@ extract_commit_hash, find_adapter_config_file, is_peft_available, + is_torch_available, logging, requires_backends, ) from .configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings +if is_torch_available(): + from ...generation import GenerationMixin + + logger = logging.get_logger(__name__) @@ -432,6 +437,7 @@ def from_config(cls, config, **kwargs): else: cls.register(config.__class__, model_class, exist_ok=True) _ = kwargs.pop("code_revision", None) + model_class = add_generation_mixin_to_remote_model(model_class) return model_class._from_config(config, **kwargs) elif type(config) in cls._model_mapping.keys(): model_class = _get_model_class(config, cls._model_mapping) @@ -556,6 +562,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): model_class.register_for_auto_class(cls.__name__) else: cls.register(config.__class__, model_class, exist_ok=True) + model_class = add_generation_mixin_to_remote_model(model_class) return model_class.from_pretrained( pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs ) @@ -705,6 +712,36 @@ def getattribute_from_module(module, attr): raise ValueError(f"Could not find {attr} in {transformers_module}!") +def add_generation_mixin_to_remote_model(model_class): + """ + Adds `GenerationMixin` to the inheritance of `model_class`, if `model_class` is a PyTorch model. This function is + used for backwards compatibility purposes: prior to v4.45, `PreTrainedModel` inherited from `GenerationMixin`. + Without this function, older models dynamically loaded from the Hub may not have the `generate` method. + """ + # 1. If it is not a PT model (i.e. doesn't inherit Module), do nothing + if "torch.nn.modules.module.Module" not in str(model_class.__mro__): + return model_class + + # 2. If it already inherits from GenerationMixin, do nothing + if issubclass(model_class, GenerationMixin): + return model_class + + # 3. If the class name has a suffix that indicates that it should be able to generate, add `GenerationMixin` to + # the class inheritance. Otherwise, do nothing. + terminations_with_generation_support = [ + "ForCausalLM", + "ForConditionalGeneration", + "ForSpeechSeq2Seq", + "ForVision2Seq", + ] + if any(model_class.__name__.endswith(termination) for termination in terminations_with_generation_support): + model_class_with_generation_mixin = type( + model_class.__name__, (GenerationMixin, model_class), {**model_class.__dict__} + ) + return model_class_with_generation_mixin + return model_class + + class _LazyAutoMapping(OrderedDict): """ " A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed. diff --git a/src/transformers/models/bark/modeling_bark.py b/src/transformers/models/bark/modeling_bark.py index ac67ef4b37e4..658272048494 100644 --- a/src/transformers/models/bark/modeling_bark.py +++ b/src/transformers/models/bark/modeling_bark.py @@ -22,6 +22,7 @@ from torch import nn from torch.nn import functional as F +from ...generation import GenerationMixin from ...generation.logits_process import ( AlternatingCodebooksLogitsProcessor, BarkEosPrioritizerLogitsProcessor, @@ -546,7 +547,7 @@ def device(self) -> torch.device: # GPT2-like autoregressive model -class BarkCausalModel(BarkPreTrainedModel): +class BarkCausalModel(GenerationMixin, BarkPreTrainedModel): config_class = BarkSubModelConfig def __init__(self, config): @@ -1136,7 +1137,7 @@ def generate( language modeling heads, one for each codebook.""", BARK_MODEL_START_DOCSTRING.format(config="BarkFineConfig"), ) -class BarkFineModel(BarkPreTrainedModel): +class BarkFineModel(GenerationMixin, BarkPreTrainedModel): base_model_prefix = "fine_acoustics" config_class = BarkFineConfig main_input_name = "codebook_idx" @@ -1536,7 +1537,7 @@ def generate( """, BARK_START_DOCSTRING, ) -class BarkModel(BarkPreTrainedModel): +class BarkModel(GenerationMixin, BarkPreTrainedModel): config_class = BarkConfig def __init__(self, config): diff --git a/src/transformers/models/bart/modeling_bart.py b/src/transformers/models/bart/modeling_bart.py index fa928d05caa8..806e5bb4c522 100755 --- a/src/transformers/models/bart/modeling_bart.py +++ b/src/transformers/models/bart/modeling_bart.py @@ -25,6 +25,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa, @@ -1557,7 +1558,7 @@ def forward( @add_start_docstrings( "The BART Model with a language modeling head. Can be used for summarization.", BART_START_DOCSTRING ) -class BartForConditionalGeneration(BartPreTrainedModel): +class BartForConditionalGeneration(GenerationMixin, BartPreTrainedModel): base_model_prefix = "model" _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] _keys_to_ignore_on_load_missing = ["final_logits_bias"] @@ -2010,7 +2011,7 @@ def forward(self, *args, **kwargs): """, BART_START_DOCSTRING, ) -class BartForCausalLM(BartPreTrainedModel): +class BartForCausalLM(GenerationMixin, BartPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/bert/modeling_bert.py b/src/transformers/models/bert/modeling_bert.py index 850e93ca59fb..ddbc9beb2cd7 100755 --- a/src/transformers/models/bert/modeling_bert.py +++ b/src/transformers/models/bert/modeling_bert.py @@ -28,6 +28,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask_for_sdpa, @@ -1280,7 +1281,7 @@ def forward( @add_start_docstrings( """Bert Model with a `language modeling` head on top for CLM fine-tuning.""", BERT_START_DOCSTRING ) -class BertLMHeadModel(BertPreTrainedModel): +class BertLMHeadModel(GenerationMixin, BertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"] def __init__(self, config): diff --git a/src/transformers/models/bert_generation/modeling_bert_generation.py b/src/transformers/models/bert_generation/modeling_bert_generation.py index a5fb3d053115..3359a0de0f95 100755 --- a/src/transformers/models/bert_generation/modeling_bert_generation.py +++ b/src/transformers/models/bert_generation/modeling_bert_generation.py @@ -23,6 +23,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer @@ -863,7 +864,7 @@ def _tie_weights(self): """BertGeneration Model with a `language modeling` head on top for CLM fine-tuning.""", BERT_GENERATION_START_DOCSTRING, ) -class BertGenerationDecoder(BertGenerationPreTrainedModel): +class BertGenerationDecoder(GenerationMixin, BertGenerationPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] def __init__(self, config): diff --git a/src/transformers/models/big_bird/modeling_big_bird.py b/src/transformers/models/big_bird/modeling_big_bird.py index a6b1660d5ae1..5852f96230cf 100755 --- a/src/transformers/models/big_bird/modeling_big_bird.py +++ b/src/transformers/models/big_bird/modeling_big_bird.py @@ -26,6 +26,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, @@ -2495,7 +2496,7 @@ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_ @add_start_docstrings( """BigBird Model with a `language modeling` head on top for CLM fine-tuning.""", BIG_BIRD_START_DOCSTRING ) -class BigBirdForCausalLM(BigBirdPreTrainedModel): +class BigBirdForCausalLM(GenerationMixin, BigBirdPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): diff --git a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py index 9f8e3cd19cd8..a2c58d8b5a0e 100755 --- a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py @@ -24,6 +24,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, @@ -2436,7 +2437,7 @@ def forward( BIGBIRD_PEGASUS_START_DOCSTRING, ) # Copied from transformers.models.bart.modeling_bart.BartForConditionalGeneration with Bart->BigBirdPegasus, BART->BIGBIRD_PEGASUS -class BigBirdPegasusForConditionalGeneration(BigBirdPegasusPreTrainedModel): +class BigBirdPegasusForConditionalGeneration(GenerationMixin, BigBirdPegasusPreTrainedModel): base_model_prefix = "model" _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] _keys_to_ignore_on_load_missing = ["final_logits_bias"] @@ -2882,7 +2883,7 @@ def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) -class BigBirdPegasusForCausalLM(BigBirdPegasusPreTrainedModel): +class BigBirdPegasusForCausalLM(GenerationMixin, BigBirdPegasusPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/biogpt/modeling_biogpt.py b/src/transformers/models/biogpt/modeling_biogpt.py index 020f52833d5b..b09909337a68 100755 --- a/src/transformers/models/biogpt/modeling_biogpt.py +++ b/src/transformers/models/biogpt/modeling_biogpt.py @@ -23,6 +23,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, @@ -596,7 +597,7 @@ def forward( @add_start_docstrings( """BioGPT Model with a `language modeling` head on top for CLM fine-tuning.""", BIOGPT_START_DOCSTRING ) -class BioGptForCausalLM(BioGptPreTrainedModel): +class BioGptForCausalLM(GenerationMixin, BioGptPreTrainedModel): _tied_weights_keys = ["output_projection.weight"] def __init__(self, config): diff --git a/src/transformers/models/blenderbot/modeling_blenderbot.py b/src/transformers/models/blenderbot/modeling_blenderbot.py index 12d259fde71e..7e42a88d4ddd 100755 --- a/src/transformers/models/blenderbot/modeling_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_blenderbot.py @@ -26,6 +26,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, @@ -1196,7 +1197,7 @@ def forward( @add_start_docstrings( "The Blenderbot Model with a language modeling head. Can be used for summarization.", BLENDERBOT_START_DOCSTRING ) -class BlenderbotForConditionalGeneration(BlenderbotPreTrainedModel): +class BlenderbotForConditionalGeneration(GenerationMixin, BlenderbotPreTrainedModel): base_model_prefix = "model" _keys_to_ignore_on_load_missing = ["final_logits_bias"] _tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "lm_head.weight"] @@ -1397,7 +1398,7 @@ def forward(self, *args, **kwargs): # Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->Blenderbot, facebook/bart-base->facebook/blenderbot-400M-distill -class BlenderbotForCausalLM(BlenderbotPreTrainedModel): +class BlenderbotForCausalLM(GenerationMixin, BlenderbotPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py index aa0e38bd8e91..94ed63dea616 100755 --- a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py @@ -24,6 +24,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, @@ -1163,7 +1164,7 @@ def forward( "The BlenderbotSmall Model with a language modeling head. Can be used for summarization.", BLENDERBOT_SMALL_START_DOCSTRING, ) -class BlenderbotSmallForConditionalGeneration(BlenderbotSmallPreTrainedModel): +class BlenderbotSmallForConditionalGeneration(GenerationMixin, BlenderbotSmallPreTrainedModel): base_model_prefix = "model" _keys_to_ignore_on_load_missing = ["final_logits_bias"] _tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "lm_head.weight"] @@ -1349,7 +1350,7 @@ def forward(self, *args, **kwargs): # Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->BlenderbotSmall, facebook/bart-base->facebook/blenderbot_small-90M -class BlenderbotSmallForCausalLM(BlenderbotSmallPreTrainedModel): +class BlenderbotSmallForCausalLM(GenerationMixin, BlenderbotSmallPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/blip/modeling_blip.py b/src/transformers/models/blip/modeling_blip.py index 46e3a6005b0a..38e40a5b8c4d 100644 --- a/src/transformers/models/blip/modeling_blip.py +++ b/src/transformers/models/blip/modeling_blip.py @@ -25,6 +25,7 @@ from torch.nn.functional import normalize from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...utils import ( @@ -1027,7 +1028,7 @@ def forward( """, BLIP_START_DOCSTRING, ) -class BlipForConditionalGeneration(BlipPreTrainedModel): +class BlipForConditionalGeneration(GenerationMixin, BlipPreTrainedModel): config_class = BlipConfig _tied_weights_keys = ["text_decoder.cls.predictions.decoder.bias"] main_input_name = "pixel_values" diff --git a/src/transformers/models/blip/modeling_blip_text.py b/src/transformers/models/blip/modeling_blip_text.py index a800ba89825d..2363f6f582eb 100644 --- a/src/transformers/models/blip/modeling_blip_text.py +++ b/src/transformers/models/blip/modeling_blip_text.py @@ -23,6 +23,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, @@ -808,7 +809,7 @@ def forward( # Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L811 -class BlipTextLMHeadModel(BlipTextPreTrainedModel): +class BlipTextLMHeadModel(GenerationMixin, BlipTextPreTrainedModel): def __init__(self, config): super().__init__(config) diff --git a/src/transformers/models/blip_2/modeling_blip_2.py b/src/transformers/models/blip_2/modeling_blip_2.py index fba4c98696a0..8ff06604cff9 100644 --- a/src/transformers/models/blip_2/modeling_blip_2.py +++ b/src/transformers/models/blip_2/modeling_blip_2.py @@ -24,6 +24,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, @@ -1997,7 +1998,7 @@ def forward( """, BLIP_2_START_DOCSTRING, ) -class Blip2ForConditionalGeneration(Blip2PreTrainedModel): +class Blip2ForConditionalGeneration(GenerationMixin, Blip2PreTrainedModel): config_class = Blip2Config main_input_name = "pixel_values" diff --git a/src/transformers/models/bloom/modeling_bloom.py b/src/transformers/models/bloom/modeling_bloom.py index 70e748343561..5f6e60c25bcf 100644 --- a/src/transformers/models/bloom/modeling_bloom.py +++ b/src/transformers/models/bloom/modeling_bloom.py @@ -26,6 +26,7 @@ from ...cache_utils import Cache, DynamicCache, StaticCache from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, @@ -856,7 +857,7 @@ def _update_causal_mask( """, BLOOM_START_DOCSTRING, ) -class BloomForCausalLM(BloomPreTrainedModel): +class BloomForCausalLM(GenerationMixin, BloomPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: BloomConfig): diff --git a/src/transformers/models/camembert/modeling_camembert.py b/src/transformers/models/camembert/modeling_camembert.py index 03a60a2856be..72f62ea0f9a4 100644 --- a/src/transformers/models/camembert/modeling_camembert.py +++ b/src/transformers/models/camembert/modeling_camembert.py @@ -25,6 +25,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask_for_sdpa, @@ -1544,7 +1545,7 @@ def forward( """CamemBERT Model with a `language modeling` head on top for CLM fine-tuning.""", CAMEMBERT_START_DOCSTRING ) # Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM with Roberta->Camembert, ROBERTA->CAMEMBERT, FacebookAI/roberta-base->almanach/camembert-base -class CamembertForCausalLM(CamembertPreTrainedModel): +class CamembertForCausalLM(GenerationMixin, CamembertPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] def __init__(self, config): diff --git a/src/transformers/models/chameleon/modeling_chameleon.py b/src/transformers/models/chameleon/modeling_chameleon.py index 23334311ca95..43fe52bd94c0 100644 --- a/src/transformers/models/chameleon/modeling_chameleon.py +++ b/src/transformers/models/chameleon/modeling_chameleon.py @@ -26,6 +26,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import _flash_attention_forward from ...modeling_outputs import ( @@ -1496,7 +1497,7 @@ def _update_causal_mask( "Chameleon Model with a head on top used for outputting logits for next token prediction.", CHAMELEON_START_DOCSTRING, ) -class ChameleonForConditionalGeneration(ChameleonPreTrainedModel): +class ChameleonForConditionalGeneration(GenerationMixin, ChameleonPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/clvp/modeling_clvp.py b/src/transformers/models/clvp/modeling_clvp.py index b6d025a0b8e2..81ac5d469397 100644 --- a/src/transformers/models/clvp/modeling_clvp.py +++ b/src/transformers/models/clvp/modeling_clvp.py @@ -26,7 +26,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN -from ...generation import GenerationConfig +from ...generation import GenerationConfig, GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, @@ -1278,7 +1278,7 @@ def forward( "The CLVP decoder model with a language modelling head on top.", CLVP_START_DOCSTRING, ) -class ClvpForCausalLM(ClvpPreTrainedModel): +class ClvpForCausalLM(GenerationMixin, ClvpPreTrainedModel): def __init__(self, config): super().__init__(config) @@ -1509,7 +1509,7 @@ def _reorder_cache( "together to filter out the best speech_ids.", CLVP_START_DOCSTRING, ) -class ClvpModelForConditionalGeneration(ClvpPreTrainedModel): +class ClvpModelForConditionalGeneration(GenerationMixin, ClvpPreTrainedModel): config_class = ClvpConfig def __init__(self, config: ClvpConfig): diff --git a/src/transformers/models/codegen/modeling_codegen.py b/src/transformers/models/codegen/modeling_codegen.py index bfa591f7bdaf..7668ecda0083 100644 --- a/src/transformers/models/codegen/modeling_codegen.py +++ b/src/transformers/models/codegen/modeling_codegen.py @@ -23,6 +23,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...modeling_utils import PreTrainedModel @@ -697,7 +698,7 @@ def _update_causal_mask( """, CODEGEN_START_DOCSTRING, ) -class CodeGenForCausalLM(CodeGenPreTrainedModel): +class CodeGenForCausalLM(GenerationMixin, CodeGenPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/cohere/modeling_cohere.py b/src/transformers/models/cohere/modeling_cohere.py index 6912a4596370..c7aa0c421f49 100644 --- a/src/transformers/models/cohere/modeling_cohere.py +++ b/src/transformers/models/cohere/modeling_cohere.py @@ -32,6 +32,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( BaseModelOutputWithPast, @@ -972,7 +973,7 @@ def _update_causal_mask( # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with Llama->Cohere -class CohereForCausalLM(CoherePreTrainedModel): +class CohereForCausalLM(GenerationMixin, CoherePreTrainedModel): _tied_weights_keys = ["lm_head.weight"] # Ignore copy diff --git a/src/transformers/models/cpmant/modeling_cpmant.py b/src/transformers/models/cpmant/modeling_cpmant.py index c8a313505251..903a851d11fb 100755 --- a/src/transformers/models/cpmant/modeling_cpmant.py +++ b/src/transformers/models/cpmant/modeling_cpmant.py @@ -24,6 +24,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging @@ -736,7 +737,7 @@ def forward( """, CPMANT_START_DOCSTRING, ) -class CpmAntForCausalLM(CpmAntPreTrainedModel): +class CpmAntForCausalLM(GenerationMixin, CpmAntPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: CpmAntConfig): diff --git a/src/transformers/models/ctrl/modeling_ctrl.py b/src/transformers/models/ctrl/modeling_ctrl.py index bbf3b10a62ec..7aadf5819fa9 100644 --- a/src/transformers/models/ctrl/modeling_ctrl.py +++ b/src/transformers/models/ctrl/modeling_ctrl.py @@ -22,6 +22,7 @@ from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_linear_layer @@ -503,7 +504,7 @@ def forward( """, CTRL_START_DOCSTRING, ) -class CTRLLMHeadModel(CTRLPreTrainedModel): +class CTRLLMHeadModel(GenerationMixin, CTRLPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/data2vec/modeling_data2vec_text.py b/src/transformers/models/data2vec/modeling_data2vec_text.py index a41fdfb56ed1..19ec77d08c29 100644 --- a/src/transformers/models/data2vec/modeling_data2vec_text.py +++ b/src/transformers/models/data2vec/modeling_data2vec_text.py @@ -23,6 +23,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, @@ -866,7 +867,7 @@ def forward( @add_start_docstrings( """Data2VecText Model with a `language modeling` head on top for CLM fine-tuning.""", DATA2VECTEXT_START_DOCSTRING ) -class Data2VecTextForCausalLM(Data2VecTextPreTrainedModel): +class Data2VecTextForCausalLM(GenerationMixin, Data2VecTextPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] def __init__(self, config): diff --git a/src/transformers/models/dbrx/modeling_dbrx.py b/src/transformers/models/dbrx/modeling_dbrx.py index 9684fd174733..482c0a0e90eb 100644 --- a/src/transformers/models/dbrx/modeling_dbrx.py +++ b/src/transformers/models/dbrx/modeling_dbrx.py @@ -23,6 +23,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast from ...modeling_utils import PreTrainedModel @@ -1223,7 +1224,7 @@ def _update_causal_mask( @add_start_docstrings("The DBRX Model transformer for causal language modeling.", DBRX_START_DOCSTRING) -class DbrxForCausalLM(DbrxPreTrainedModel): +class DbrxForCausalLM(GenerationMixin, DbrxPreTrainedModel): def __init__(self, config: DbrxConfig): super().__init__(config) self.transformer = DbrxModel(config) diff --git a/src/transformers/models/electra/modeling_electra.py b/src/transformers/models/electra/modeling_electra.py index dd017170bef9..3682ad23ff36 100644 --- a/src/transformers/models/electra/modeling_electra.py +++ b/src/transformers/models/electra/modeling_electra.py @@ -25,6 +25,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, get_activation +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithCrossAttentions, BaseModelOutputWithPastAndCrossAttentions, @@ -1524,7 +1525,7 @@ def forward( @add_start_docstrings( """ELECTRA Model with a `language modeling` head on top for CLM fine-tuning.""", ELECTRA_START_DOCSTRING ) -class ElectraForCausalLM(ElectraPreTrainedModel): +class ElectraForCausalLM(GenerationMixin, ElectraPreTrainedModel): _tied_weights_keys = ["generator_lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/ernie/modeling_ernie.py b/src/transformers/models/ernie/modeling_ernie.py index 6a0a26a5cbe5..11c0abd5fffe 100644 --- a/src/transformers/models/ernie/modeling_ernie.py +++ b/src/transformers/models/ernie/modeling_ernie.py @@ -25,6 +25,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, @@ -1081,7 +1082,7 @@ def forward( @add_start_docstrings( """Ernie Model with a `language modeling` head on top for CLM fine-tuning.""", ERNIE_START_DOCSTRING ) -class ErnieForCausalLM(ErniePreTrainedModel): +class ErnieForCausalLM(GenerationMixin, ErniePreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"] # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.__init__ with BertLMHeadModel->ErnieForCausalLM,Bert->Ernie,bert->ernie diff --git a/src/transformers/models/falcon/modeling_falcon.py b/src/transformers/models/falcon/modeling_falcon.py index a9acd171c3ae..4c6f8df8c1b8 100644 --- a/src/transformers/models/falcon/modeling_falcon.py +++ b/src/transformers/models/falcon/modeling_falcon.py @@ -25,6 +25,7 @@ from ...activations import get_activation from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( AttentionMaskConverter, ) @@ -1220,7 +1221,7 @@ def _update_causal_mask( "The Falcon Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).", FALCON_START_DOCSTRING, ) -class FalconForCausalLM(FalconPreTrainedModel): +class FalconForCausalLM(GenerationMixin, FalconPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: FalconConfig): diff --git a/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py b/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py index 07374fe1dfd7..05b8b6ff6117 100644 --- a/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py +++ b/src/transformers/models/falcon_mamba/modeling_falcon_mamba.py @@ -25,6 +25,7 @@ from ...activations import ACT2FN from ...cache_utils import MambaCache +from ...generation import GenerationMixin from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, @@ -702,7 +703,7 @@ def forward( FALCONMAMBA_START_DOCSTRING, ) # Copied from transformers.models.mamba.modeling_mamba.MambaForCausalLM with MAMBA->FALCONMAMBA,Mamba->FalconMamba,mamba->falcon_mamba,FalconMambaCache->MambaCache -class FalconMambaForCausalLM(FalconMambaPreTrainedModel): +class FalconMambaForCausalLM(GenerationMixin, FalconMambaPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/flaubert/modeling_flaubert.py b/src/transformers/models/flaubert/modeling_flaubert.py index 50c6f7ede222..1be2394cb3d7 100644 --- a/src/transformers/models/flaubert/modeling_flaubert.py +++ b/src/transformers/models/flaubert/modeling_flaubert.py @@ -25,6 +25,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import gelu +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutput, MaskedLMOutput, @@ -644,7 +645,7 @@ def forward( FLAUBERT_START_DOCSTRING, ) # Copied transformers.models.xlm.modeling_xlm.XLMWithLMHeadModel with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert -class FlaubertWithLMHeadModel(FlaubertPreTrainedModel): +class FlaubertWithLMHeadModel(GenerationMixin, FlaubertPreTrainedModel): _tied_weights_keys = ["pred_layer.proj.weight"] def __init__(self, config): diff --git a/src/transformers/models/fsmt/modeling_fsmt.py b/src/transformers/models/fsmt/modeling_fsmt.py index 179408aba38e..6cf1ee9a842e 100644 --- a/src/transformers/models/fsmt/modeling_fsmt.py +++ b/src/transformers/models/fsmt/modeling_fsmt.py @@ -35,6 +35,7 @@ from torch.nn import CrossEntropyLoss, LayerNorm from ...activations import ACT2FN +from ...generation import GenerationMixin from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import ( BaseModelOutput, @@ -1173,7 +1174,7 @@ def set_output_embeddings(self, value): @add_start_docstrings( "The FSMT Model with a language modeling head. Can be used for summarization.", FSMT_START_DOCSTRING ) -class FSMTForConditionalGeneration(PretrainedFSMTModel): +class FSMTForConditionalGeneration(GenerationMixin, PretrainedFSMTModel): base_model_prefix = "model" _tied_weights_keys = ["decoder.embed_tokens.weight", "decoder.output_projection.weight"] diff --git a/src/transformers/models/fuyu/modeling_fuyu.py b/src/transformers/models/fuyu/modeling_fuyu.py index b4b6330d0d86..82b7e6554f6a 100644 --- a/src/transformers/models/fuyu/modeling_fuyu.py +++ b/src/transformers/models/fuyu/modeling_fuyu.py @@ -20,6 +20,7 @@ import torch.utils.checkpoint from torch import nn +from ...generation import GenerationMixin from ...modeling_outputs import CausalLMOutputWithPast from ...modeling_utils import PreTrainedModel from ...models.auto.modeling_auto import AutoModelForCausalLM @@ -145,7 +146,7 @@ def _init_weights(self, module): "Fuyu Model with a language modeling head on top for causal language model conditioned on image patches and text.", FUYU_START_DOCSTRING, ) -class FuyuForCausalLM(FuyuPreTrainedModel): +class FuyuForCausalLM(GenerationMixin, FuyuPreTrainedModel): def __init__(self, config: FuyuConfig): super().__init__(config) self.padding_idx = config.pad_token_id diff --git a/src/transformers/models/gemma/diff_gemma.py b/src/transformers/models/gemma/diff_gemma.py index fcdb0f0b3d7d..6ec1aad286c2 100644 --- a/src/transformers/models/gemma/diff_gemma.py +++ b/src/transformers/models/gemma/diff_gemma.py @@ -34,6 +34,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_flash_attention_utils import _flash_attention_forward from ...modeling_outputs import CausalLMOutputWithPast from ...pytorch_utils import ALL_LAYERNORM_LAYERS @@ -520,7 +521,7 @@ def forward( # Example where we ony modify the docstring and call super -class GemmaForCausalLM(LlamaForCausalLM): +class GemmaForCausalLM(GenerationMixin, LlamaForCausalLM): def forward( self, input_ids: torch.LongTensor = None, diff --git a/src/transformers/models/gemma/modeling_gemma.py b/src/transformers/models/gemma/modeling_gemma.py index 62917c73f332..1549bcf64f3d 100644 --- a/src/transformers/models/gemma/modeling_gemma.py +++ b/src/transformers/models/gemma/modeling_gemma.py @@ -29,6 +29,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import _flash_attention_forward from ...modeling_outputs import ( @@ -989,7 +990,7 @@ def _update_causal_mask( return causal_mask -class GemmaForCausalLM(GemmaPreTrainedModel): +class GemmaForCausalLM(GenerationMixin, GemmaPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/gemma2/diff_gemma2.py b/src/transformers/models/gemma2/diff_gemma2.py index 0e300c6337e2..c7c18415fed4 100644 --- a/src/transformers/models/gemma2/diff_gemma2.py +++ b/src/transformers/models/gemma2/diff_gemma2.py @@ -33,6 +33,7 @@ ) from ...cache_utils import Cache +from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...utils import is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10, logging @@ -476,7 +477,7 @@ def _update_causal_mask( return causal_mask -class Gemma2ForCausalLM(GemmaForCausalLM): +class Gemma2ForCausalLM(GenerationMixin, GemmaForCausalLM): def forward( self, input_ids: torch.LongTensor = None, diff --git a/src/transformers/models/gemma2/modeling_gemma2.py b/src/transformers/models/gemma2/modeling_gemma2.py index bf6ff76189d4..17b5133461d0 100644 --- a/src/transformers/models/gemma2/modeling_gemma2.py +++ b/src/transformers/models/gemma2/modeling_gemma2.py @@ -28,6 +28,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, HybridCache +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, @@ -943,7 +944,7 @@ def _update_causal_mask( return causal_mask -class Gemma2ForCausalLM(Gemma2PreTrainedModel): +class Gemma2ForCausalLM(GenerationMixin, Gemma2PreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/git/modeling_git.py b/src/transformers/models/git/modeling_git.py index 4807289c927c..e1510ffd513e 100644 --- a/src/transformers/models/git/modeling_git.py +++ b/src/transformers/models/git/modeling_git.py @@ -27,6 +27,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...file_utils import ModelOutput +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import ( BaseModelOutput, @@ -1318,7 +1319,7 @@ def forward( @add_start_docstrings( """GIT Model with a `language modeling` head on top for autoregressive language modeling.""", GIT_START_DOCSTRING ) -class GitForCausalLM(GitPreTrainedModel): +class GitForCausalLM(GenerationMixin, GitPreTrainedModel): _tied_weights_keys = ["output.weight"] def __init__(self, config): diff --git a/src/transformers/models/gpt2/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py index 8dfbfb906444..02ea192a8aa6 100644 --- a/src/transformers/models/gpt2/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -28,6 +28,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask_for_sdpa from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, @@ -1182,7 +1183,7 @@ def forward( """, GPT2_START_DOCSTRING, ) -class GPT2LMHeadModel(GPT2PreTrainedModel): +class GPT2LMHeadModel(GenerationMixin, GPT2PreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): @@ -1384,7 +1385,7 @@ def _reorder_cache( """, GPT2_START_DOCSTRING, ) -class GPT2DoubleHeadsModel(GPT2PreTrainedModel): +class GPT2DoubleHeadsModel(GenerationMixin, GPT2PreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py index 0f927a72469d..9caab1f3c47c 100644 --- a/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +++ b/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py @@ -22,6 +22,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, @@ -1040,7 +1041,7 @@ def forward( """, GPT_BIGCODE_START_DOCSTRING, ) -class GPTBigCodeForCausalLM(GPTBigCodePreTrainedModel): +class GPTBigCodeForCausalLM(GenerationMixin, GPTBigCodePreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/gpt_neo/modeling_gpt_neo.py b/src/transformers/models/gpt_neo/modeling_gpt_neo.py index e59853677f83..ade982415958 100755 --- a/src/transformers/models/gpt_neo/modeling_gpt_neo.py +++ b/src/transformers/models/gpt_neo/modeling_gpt_neo.py @@ -24,6 +24,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutputWithPast, @@ -912,7 +913,7 @@ def _update_causal_mask( """, GPT_NEO_START_DOCSTRING, ) -class GPTNeoForCausalLM(GPTNeoPreTrainedModel): +class GPTNeoForCausalLM(GenerationMixin, GPTNeoPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/gpt_neox/modeling_gpt_neox.py b/src/transformers/models/gpt_neox/modeling_gpt_neox.py index 7be35c0d137d..13908a1a14fb 100755 --- a/src/transformers/models/gpt_neox/modeling_gpt_neox.py +++ b/src/transformers/models/gpt_neox/modeling_gpt_neox.py @@ -30,6 +30,7 @@ add_start_docstrings_to_model_forward, replace_return_docstrings, ) +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( BaseModelOutputWithPast, @@ -1079,7 +1080,7 @@ def _update_causal_mask( @add_start_docstrings( """GPTNeoX Model with a `language modeling` head on top for CLM fine-tuning.""", GPT_NEOX_START_DOCSTRING ) -class GPTNeoXForCausalLM(GPTNeoXPreTrainedModel): +class GPTNeoXForCausalLM(GenerationMixin, GPTNeoXPreTrainedModel): _tied_weights_keys = ["embed_out.weight"] def __init__(self, config): diff --git a/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py b/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py index b9c4cad0fdc5..137afe616d06 100755 --- a/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +++ b/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py @@ -23,6 +23,7 @@ from ...activations import ACT2FN from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings +from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...modeling_utils import PreTrainedModel from ...utils import logging @@ -589,7 +590,7 @@ def forward( """GPTNeoXJapanese Model with a `language modeling` head on top for Classifier Model fine-tuning.""", GPT_NEOX_JAPANESE_START_DOCSTRING, ) -class GPTNeoXJapaneseForCausalLM(GPTNeoXJapanesePreTrainedModel): +class GPTNeoXJapaneseForCausalLM(GenerationMixin, GPTNeoXJapanesePreTrainedModel): _tied_weights_keys = ["embed_out.weight"] def __init__(self, config): diff --git a/src/transformers/models/gptj/modeling_gptj.py b/src/transformers/models/gptj/modeling_gptj.py index 1408bfe8a61d..aae8d0ea615e 100644 --- a/src/transformers/models/gptj/modeling_gptj.py +++ b/src/transformers/models/gptj/modeling_gptj.py @@ -25,6 +25,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( BaseModelOutputWithPast, @@ -1006,7 +1007,7 @@ def _update_causal_mask( """, GPTJ_START_DOCSTRING, ) -class GPTJForCausalLM(GPTJPreTrainedModel): +class GPTJForCausalLM(GenerationMixin, GPTJPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/idefics2/modeling_idefics2.py b/src/transformers/models/idefics2/modeling_idefics2.py index 5d4f8e408eb0..c7c8e9f56d3f 100644 --- a/src/transformers/models/idefics2/modeling_idefics2.py +++ b/src/transformers/models/idefics2/modeling_idefics2.py @@ -23,11 +23,12 @@ from torch import nn from torch.nn import CrossEntropyLoss -from ... import PreTrainedModel from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput, ModelOutput +from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, @@ -1438,7 +1439,7 @@ def forward( """The Idefics2 Model with a language modeling head. It is made up a SigLIP vision encoder, with a language modeling head on top. """, IDEFICS2_START_DOCSTRING, ) -class Idefics2ForConditionalGeneration(Idefics2PreTrainedModel): +class Idefics2ForConditionalGeneration(GenerationMixin, Idefics2PreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/imagegpt/modeling_imagegpt.py b/src/transformers/models/imagegpt/modeling_imagegpt.py index 5d59a4ed90e4..a202f2dbc9bf 100755 --- a/src/transformers/models/imagegpt/modeling_imagegpt.py +++ b/src/transformers/models/imagegpt/modeling_imagegpt.py @@ -26,6 +26,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, @@ -880,7 +881,7 @@ def forward( """, IMAGEGPT_START_DOCSTRING, ) -class ImageGPTForCausalImageModeling(ImageGPTPreTrainedModel): +class ImageGPTForCausalImageModeling(GenerationMixin, ImageGPTPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: ImageGPTConfig): diff --git a/src/transformers/models/instructblip/modeling_instructblip.py b/src/transformers/models/instructblip/modeling_instructblip.py index f59f72a6699c..beb65d130f8d 100644 --- a/src/transformers/models/instructblip/modeling_instructblip.py +++ b/src/transformers/models/instructblip/modeling_instructblip.py @@ -24,6 +24,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, @@ -1274,7 +1275,7 @@ def forward( """, INSTRUCTBLIP_START_DOCSTRING, ) -class InstructBlipForConditionalGeneration(InstructBlipPreTrainedModel): +class InstructBlipForConditionalGeneration(GenerationMixin, InstructBlipPreTrainedModel): config_class = InstructBlipConfig main_input_name = "pixel_values" diff --git a/src/transformers/models/instructblipvideo/diff_instructblipvideo.py b/src/transformers/models/instructblipvideo/diff_instructblipvideo.py index 506da83c5322..b6ccc67b1e83 100644 --- a/src/transformers/models/instructblipvideo/diff_instructblipvideo.py +++ b/src/transformers/models/instructblipvideo/diff_instructblipvideo.py @@ -45,6 +45,7 @@ InstructBlipVisionModel, ) +from ...generation import GenerationMixin from ...utils import logging @@ -128,7 +129,7 @@ class InstructBlipVideoQFormerModel(InstructBlipQFormerModel): pass -class InstructBlipVideoForConditionalGeneration(InstructBlipForConditionalGeneration): +class InstructBlipVideoForConditionalGeneration(GenerationMixin, InstructBlipForConditionalGeneration): def forward( self, pixel_values: torch.FloatTensor, diff --git a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py index 701402241d4a..57c38bee9ffc 100644 --- a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py +++ b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py @@ -30,6 +30,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, @@ -1283,7 +1284,7 @@ def forward( """, INSTRUCTBLIPVIDEO_START_DOCSTRING, ) -class InstructBlipVideoForConditionalGeneration(InstructBlipVideoPreTrainedModel): +class InstructBlipVideoForConditionalGeneration(GenerationMixin, InstructBlipVideoPreTrainedModel): config_class = InstructBlipVideoConfig main_input_name = "pixel_values" diff --git a/src/transformers/models/jamba/modeling_jamba.py b/src/transformers/models/jamba/modeling_jamba.py index 60e1670a3c27..f55e3af49cb7 100755 --- a/src/transformers/models/jamba/modeling_jamba.py +++ b/src/transformers/models/jamba/modeling_jamba.py @@ -30,6 +30,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache # we need __iter__ and __len__ of pkv +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( AttentionMaskConverter, ) @@ -1424,7 +1425,7 @@ def _update_mamba_mask(self, attention_mask, cache_position): # Adapted from transformers.models.mixtral.modeling_mixtral.MixtralForCausalLM with MIXTRAL->JAMBA, Mixtral->Jamba -class JambaForCausalLM(JambaPreTrainedModel): +class JambaForCausalLM(GenerationMixin, JambaPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: JambaConfig): diff --git a/src/transformers/models/jetmoe/modeling_jetmoe.py b/src/transformers/models/jetmoe/modeling_jetmoe.py index 162478a7258c..53853bb9d845 100644 --- a/src/transformers/models/jetmoe/modeling_jetmoe.py +++ b/src/transformers/models/jetmoe/modeling_jetmoe.py @@ -25,6 +25,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( MoeCausalLMOutputWithPast, @@ -1195,7 +1196,7 @@ def _update_causal_mask( return causal_mask -class JetMoeForCausalLM(JetMoePreTrainedModel): +class JetMoeForCausalLM(GenerationMixin, JetMoePreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/kosmos2/modeling_kosmos2.py b/src/transformers/models/kosmos2/modeling_kosmos2.py index 69641790b2db..625fb777d6e3 100644 --- a/src/transformers/models/kosmos2/modeling_kosmos2.py +++ b/src/transformers/models/kosmos2/modeling_kosmos2.py @@ -24,6 +24,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, @@ -1521,7 +1522,7 @@ def forward( """, KOSMOS2_START_DOCSTRING, ) -class Kosmos2TextForCausalLM(Kosmos2PreTrainedModel): +class Kosmos2TextForCausalLM(GenerationMixin, Kosmos2PreTrainedModel): config_class = Kosmos2TextConfig _tied_weights_keys = ["lm_head.weight"] @@ -1864,7 +1865,7 @@ def forward( """, KOSMOS2_START_DOCSTRING, ) -class Kosmos2ForConditionalGeneration(Kosmos2PreTrainedModel): +class Kosmos2ForConditionalGeneration(GenerationMixin, Kosmos2PreTrainedModel): config_class = Kosmos2Config main_input_name = "pixel_values" _tied_weights_keys = ["text_model.lm_head.weight"] diff --git a/src/transformers/models/led/modeling_led.py b/src/transformers/models/led/modeling_led.py index 41b6c0a2bea2..30081e636291 100755 --- a/src/transformers/models/led/modeling_led.py +++ b/src/transformers/models/led/modeling_led.py @@ -25,6 +25,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, @@ -2298,7 +2299,7 @@ def forward( @add_start_docstrings( "The LED Model with a language modeling head. Can be used for summarization.", LED_START_DOCSTRING ) -class LEDForConditionalGeneration(LEDPreTrainedModel): +class LEDForConditionalGeneration(GenerationMixin, LEDPreTrainedModel): base_model_prefix = "led" _keys_to_ignore_on_load_missing = ["final_logits_bias"] _tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "lm_head.weight"] diff --git a/src/transformers/models/llama/modeling_llama.py b/src/transformers/models/llama/modeling_llama.py index 022ae5ce74c4..aa8eb91689c3 100644 --- a/src/transformers/models/llama/modeling_llama.py +++ b/src/transformers/models/llama/modeling_llama.py @@ -28,6 +28,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import _flash_attention_forward from ...modeling_outputs import ( @@ -1097,7 +1098,7 @@ def _update_causal_mask( return causal_mask -class LlamaForCausalLM(LlamaPreTrainedModel): +class LlamaForCausalLM(GenerationMixin, LlamaPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/llava/modeling_llava.py b/src/transformers/models/llava/modeling_llava.py index 394c80edb540..290fc75517e3 100644 --- a/src/transformers/models/llava/modeling_llava.py +++ b/src/transformers/models/llava/modeling_llava.py @@ -23,6 +23,7 @@ from ... import PreTrainedModel from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ModelOutput from ...utils import ( add_start_docstrings, @@ -240,7 +241,7 @@ def _supports_sdpa(self): """The LLAVA model which consists of a vision backbone and a language model.""", LLAVA_START_DOCSTRING, ) -class LlavaForConditionalGeneration(LlavaPreTrainedModel): +class LlavaForConditionalGeneration(GenerationMixin, LlavaPreTrainedModel): def __init__(self, config: LlavaConfig): super().__init__(config) self.vision_tower = AutoModel.from_config(config.vision_config) diff --git a/src/transformers/models/llava_next/modeling_llava_next.py b/src/transformers/models/llava_next/modeling_llava_next.py index 723d54c92dd9..460754fa7e91 100644 --- a/src/transformers/models/llava_next/modeling_llava_next.py +++ b/src/transformers/models/llava_next/modeling_llava_next.py @@ -23,10 +23,11 @@ import torch.utils.checkpoint from torch import nn -from ... import PreTrainedModel from ...activations import ACT2FN +from ...generation import GenerationMixin from ...image_processing_utils import select_best_resolution from ...modeling_outputs import ModelOutput +from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, @@ -346,7 +347,7 @@ def _supports_sdpa(self): """The LLAVA-NeXT model which consists of a vision backbone and a language model.""", LLAVA_NEXT_START_DOCSTRING, ) -class LlavaNextForConditionalGeneration(LlavaNextPreTrainedModel): +class LlavaNextForConditionalGeneration(GenerationMixin, LlavaNextPreTrainedModel): def __init__(self, config: LlavaNextConfig): super().__init__(config) self.vision_tower = AutoModel.from_config(config.vision_config) diff --git a/src/transformers/models/llava_next_video/diff_llava_next_video.py b/src/transformers/models/llava_next_video/diff_llava_next_video.py index b4018db586e7..5242eb1ed3a2 100644 --- a/src/transformers/models/llava_next_video/diff_llava_next_video.py +++ b/src/transformers/models/llava_next_video/diff_llava_next_video.py @@ -30,6 +30,7 @@ ) from ...cache_utils import Cache +from ...generation import GenerationMixin from ...utils import ( logging, replace_return_docstrings, @@ -219,7 +220,7 @@ class LlavaNextVideoMultiModalProjector(LlavaNextMultiModalProjector): pass -class LlavaNextVideoForConditionalGeneration(LlavaNextForConditionalGeneration): +class LlavaNextVideoForConditionalGeneration(GenerationMixin, LlavaNextForConditionalGeneration): def __init__(self, config: LlavaNextVideoConfig, **super_kwargs): super().__init__(config, **super_kwargs) self.vision_resampler = LlavaNextVideoPooler(config) diff --git a/src/transformers/models/llava_next_video/modeling_llava_next_video.py b/src/transformers/models/llava_next_video/modeling_llava_next_video.py index 3430fbe590aa..8fecf89f5965 100644 --- a/src/transformers/models/llava_next_video/modeling_llava_next_video.py +++ b/src/transformers/models/llava_next_video/modeling_llava_next_video.py @@ -32,6 +32,7 @@ from ... import PreTrainedModel from ...activations import ACT2FN from ...cache_utils import Cache +from ...generation import GenerationMixin from ...image_processing_utils import select_best_resolution from ...modeling_outputs import ModelOutput from ...utils import ( @@ -387,7 +388,7 @@ def _supports_sdpa(self): """The LLAVA-NeXT model which consists of a vision backbone and a language model.""", LLAVA_NEXT_VIDEO_START_DOCSTRING, ) -class LlavaNextVideoForConditionalGeneration(LlavaNextVideoPreTrainedModel): +class LlavaNextVideoForConditionalGeneration(GenerationMixin, LlavaNextVideoPreTrainedModel): def __init__( self, config: LlavaNextVideoConfig, diff --git a/src/transformers/models/longt5/modeling_longt5.py b/src/transformers/models/longt5/modeling_longt5.py index b2a6ed11ca57..b6f3a5a5d33a 100644 --- a/src/transformers/models/longt5/modeling_longt5.py +++ b/src/transformers/models/longt5/modeling_longt5.py @@ -24,6 +24,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, @@ -1900,7 +1901,7 @@ def forward( @add_start_docstrings("""LONGT5 Model with a `language modeling` head on top.""", LONGT5_START_DOCSTRING) -class LongT5ForConditionalGeneration(LongT5PreTrainedModel): +class LongT5ForConditionalGeneration(GenerationMixin, LongT5PreTrainedModel): _keys_to_ignore_on_load_unexpected = [ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", ] diff --git a/src/transformers/models/m2m_100/modeling_m2m_100.py b/src/transformers/models/m2m_100/modeling_m2m_100.py index 23a855fff256..93294e3592c3 100755 --- a/src/transformers/models/m2m_100/modeling_m2m_100.py +++ b/src/transformers/models/m2m_100/modeling_m2m_100.py @@ -22,6 +22,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( @@ -1342,7 +1343,7 @@ def forward( @add_start_docstrings( "The M2M100 Model with a language modeling head. Can be used for summarization.", M2M_100_START_DOCSTRING ) -class M2M100ForConditionalGeneration(M2M100PreTrainedModel): +class M2M100ForConditionalGeneration(GenerationMixin, M2M100PreTrainedModel): base_model_prefix = "model" _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] diff --git a/src/transformers/models/mamba/modeling_mamba.py b/src/transformers/models/mamba/modeling_mamba.py index 14a3dea1d1cc..ad96469a9063 100644 --- a/src/transformers/models/mamba/modeling_mamba.py +++ b/src/transformers/models/mamba/modeling_mamba.py @@ -25,6 +25,7 @@ from ...activations import ACT2FN from ...cache_utils import MambaCache +from ...generation import GenerationMixin from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, @@ -657,7 +658,7 @@ def forward( """, MAMBA_START_DOCSTRING, ) -class MambaForCausalLM(MambaPreTrainedModel): +class MambaForCausalLM(GenerationMixin, MambaPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/mamba2/modeling_mamba2.py b/src/transformers/models/mamba2/modeling_mamba2.py index 69390ea9ad2b..fd49f6cbf6d2 100644 --- a/src/transformers/models/mamba2/modeling_mamba2.py +++ b/src/transformers/models/mamba2/modeling_mamba2.py @@ -24,6 +24,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, @@ -931,7 +932,7 @@ def forward( """, MAMBA2_START_DOCSTRING, ) -class Mamba2ForCausalLM(Mamba2PreTrainedModel): +class Mamba2ForCausalLM(GenerationMixin, Mamba2PreTrainedModel): _tied_weights_keys = [] def __init__(self, config): diff --git a/src/transformers/models/marian/modeling_marian.py b/src/transformers/models/marian/modeling_marian.py index 2045f673540f..7aa1fd60c52a 100755 --- a/src/transformers/models/marian/modeling_marian.py +++ b/src/transformers/models/marian/modeling_marian.py @@ -25,6 +25,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, @@ -1224,7 +1225,7 @@ def forward( @add_start_docstrings( "The Marian Model with a language modeling head. Can be used for summarization.", MARIAN_START_DOCSTRING ) -class MarianMTModel(MarianPreTrainedModel): +class MarianMTModel(GenerationMixin, MarianPreTrainedModel): base_model_prefix = "model" _keys_to_ignore_on_load_missing = [ "final_logits_bias", @@ -1504,7 +1505,7 @@ def forward(self, *args, **kwargs): # Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->Marian, facebook/bart-base->Helsinki-NLP/opus-mt-fr-en -class MarianForCausalLM(MarianPreTrainedModel): +class MarianForCausalLM(GenerationMixin, MarianPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/mbart/modeling_mbart.py b/src/transformers/models/mbart/modeling_mbart.py index 6cad7b08f994..c9a72e30cc37 100755 --- a/src/transformers/models/mbart/modeling_mbart.py +++ b/src/transformers/models/mbart/modeling_mbart.py @@ -24,6 +24,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, @@ -1389,7 +1390,7 @@ def forward( "The MBART Model with a language modeling head. Can be used for summarization, after fine-tuning the pretrained models.", MBART_START_DOCSTRING, ) -class MBartForConditionalGeneration(MBartPreTrainedModel): +class MBartForConditionalGeneration(GenerationMixin, MBartPreTrainedModel): base_model_prefix = "model" _keys_to_ignore_on_load_missing = ["final_logits_bias"] _tied_weights_keys = ["model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight", "lm_head.weight"] @@ -1830,7 +1831,7 @@ def forward(self, *args, **kwargs): # Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->MBart, facebook/bart-base->facebook/mbart-large-cc25 -class MBartForCausalLM(MBartPreTrainedModel): +class MBartForCausalLM(GenerationMixin, MBartPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/megatron_bert/modeling_megatron_bert.py b/src/transformers/models/megatron_bert/modeling_megatron_bert.py index 16641655e203..5c0f920b02fd 100755 --- a/src/transformers/models/megatron_bert/modeling_megatron_bert.py +++ b/src/transformers/models/megatron_bert/modeling_megatron_bert.py @@ -27,6 +27,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, @@ -1110,7 +1111,7 @@ def forward( """MegatronBert Model with a `language modeling` head on top for CLM fine-tuning.""", MEGATRON_BERT_START_DOCSTRING, ) -class MegatronBertForCausalLM(MegatronBertPreTrainedModel): +class MegatronBertForCausalLM(GenerationMixin, MegatronBertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder"] def __init__(self, config): diff --git a/src/transformers/models/mistral/modeling_mistral.py b/src/transformers/models/mistral/modeling_mistral.py index 240e229e0bb0..ce85cfa25a9a 100644 --- a/src/transformers/models/mistral/modeling_mistral.py +++ b/src/transformers/models/mistral/modeling_mistral.py @@ -29,6 +29,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( BaseModelOutputWithPast, @@ -947,7 +948,7 @@ def _update_causal_mask( return causal_mask -class MistralForCausalLM(MistralPreTrainedModel): +class MistralForCausalLM(GenerationMixin, MistralPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/mixtral/modeling_mixtral.py b/src/transformers/models/mixtral/modeling_mixtral.py index 919f32abc7fc..fa01764c9148 100644 --- a/src/transformers/models/mixtral/modeling_mixtral.py +++ b/src/transformers/models/mixtral/modeling_mixtral.py @@ -30,6 +30,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( MoeCausalLMOutputWithPast, @@ -1180,7 +1181,7 @@ def _update_causal_mask( return causal_mask -class MixtralForCausalLM(MixtralPreTrainedModel): +class MixtralForCausalLM(GenerationMixin, MixtralPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/mpt/modeling_mpt.py b/src/transformers/models/mpt/modeling_mpt.py index 85579636dcc4..fa9c2362324a 100644 --- a/src/transformers/models/mpt/modeling_mpt.py +++ b/src/transformers/models/mpt/modeling_mpt.py @@ -24,6 +24,7 @@ from torch.nn import functional as F from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, @@ -500,7 +501,7 @@ def forward( """, MPT_START_DOCSTRING, ) -class MptForCausalLM(MptPreTrainedModel): +class MptForCausalLM(GenerationMixin, MptPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: MptConfig): diff --git a/src/transformers/models/mt5/modeling_mt5.py b/src/transformers/models/mt5/modeling_mt5.py index 54943cf982dd..6d2d99df290b 100644 --- a/src/transformers/models/mt5/modeling_mt5.py +++ b/src/transformers/models/mt5/modeling_mt5.py @@ -25,6 +25,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, @@ -1550,7 +1551,7 @@ def forward( @add_start_docstrings("""MT5 Model with a `language modeling` head on top.""", MT5_START_DOCSTRING) -class MT5ForConditionalGeneration(MT5PreTrainedModel): +class MT5ForConditionalGeneration(GenerationMixin, MT5PreTrainedModel): r""" Examples: diff --git a/src/transformers/models/musicgen/modeling_musicgen.py b/src/transformers/models/musicgen/modeling_musicgen.py index f720faac038e..0e6bc8c90a35 100644 --- a/src/transformers/models/musicgen/modeling_musicgen.py +++ b/src/transformers/models/musicgen/modeling_musicgen.py @@ -26,9 +26,14 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN -from ...generation.configuration_utils import GenerationConfig, GenerationMode -from ...generation.logits_process import ClassifierFreeGuidanceLogitsProcessor, LogitsProcessorList -from ...generation.stopping_criteria import StoppingCriteriaList +from ...generation import ( + ClassifierFreeGuidanceLogitsProcessor, + GenerationConfig, + GenerationMixin, + GenerationMode, + LogitsProcessorList, + StoppingCriteriaList, +) from ...modeling_attn_mask_utils import ( _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa, @@ -1206,7 +1211,7 @@ def forward( "The MusicGen decoder model with a language modelling head on top.", MUSICGEN_START_DOCSTRING, ) -class MusicgenForCausalLM(MusicgenPreTrainedModel): +class MusicgenForCausalLM(GenerationMixin, MusicgenPreTrainedModel): def __init__(self, config: MusicgenDecoderConfig): super().__init__(config) @@ -1658,7 +1663,7 @@ def generate( "for music generation tasks with one or both of text and audio prompts.", MUSICGEN_START_DOCSTRING, ) -class MusicgenForConditionalGeneration(PreTrainedModel): +class MusicgenForConditionalGeneration(GenerationMixin, PreTrainedModel): config_class = MusicgenConfig base_model_prefix = "encoder_decoder" main_input_name = "input_ids" diff --git a/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py b/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py index a8a8fe960989..8584594b825c 100644 --- a/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py +++ b/src/transformers/models/musicgen_melody/modeling_musicgen_melody.py @@ -26,9 +26,14 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN -from ...generation.configuration_utils import GenerationConfig, GenerationMode -from ...generation.logits_process import ClassifierFreeGuidanceLogitsProcessor, LogitsProcessorList -from ...generation.stopping_criteria import StoppingCriteriaList +from ...generation import ( + ClassifierFreeGuidanceLogitsProcessor, + GenerationConfig, + GenerationMixin, + GenerationMode, + LogitsProcessorList, + StoppingCriteriaList, +) from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa from ...modeling_outputs import ( BaseModelOutputWithPast, @@ -1117,7 +1122,7 @@ def forward( MUSICGEN_MELODY_START_DOCSTRING, ) # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenForCausalLM with MUSICGEN->MUSICGEN_MELODY,Musicgen->MusicgenMelody,MusicGen->Musicgen Melody -class MusicgenMelodyForCausalLM(MusicgenMelodyPreTrainedModel): +class MusicgenMelodyForCausalLM(GenerationMixin, MusicgenMelodyPreTrainedModel): def __init__(self, config: MusicgenMelodyDecoderConfig): super().__init__(config) @@ -1585,7 +1590,7 @@ def generate( decoder (`Optional[MusicgenMelodyForCausalLM]`, *optional*): MusicGen Melody decoder used to generate audio codes. """, ) -class MusicgenMelodyForConditionalGeneration(PreTrainedModel): +class MusicgenMelodyForConditionalGeneration(GenerationMixin, PreTrainedModel): config_class = MusicgenMelodyConfig main_input_name = "input_ids" supports_gradient_checkpointing = True diff --git a/src/transformers/models/mvp/modeling_mvp.py b/src/transformers/models/mvp/modeling_mvp.py index 319f1760cef9..0868af9af081 100644 --- a/src/transformers/models/mvp/modeling_mvp.py +++ b/src/transformers/models/mvp/modeling_mvp.py @@ -24,6 +24,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, @@ -1351,7 +1352,7 @@ def forward( @add_start_docstrings( "The MVP Model with a language modeling head. Can be used for various text generation tasks.", MVP_START_DOCSTRING ) -class MvpForConditionalGeneration(MvpPreTrainedModel): +class MvpForConditionalGeneration(GenerationMixin, MvpPreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: MvpConfig): @@ -1791,7 +1792,7 @@ def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) -class MvpForCausalLM(MvpPreTrainedModel): +class MvpForCausalLM(GenerationMixin, MvpPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/nemotron/modeling_nemotron.py b/src/transformers/models/nemotron/modeling_nemotron.py index 548732b371a5..8c986e075785 100644 --- a/src/transformers/models/nemotron/modeling_nemotron.py +++ b/src/transformers/models/nemotron/modeling_nemotron.py @@ -26,6 +26,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import _flash_attention_forward from ...modeling_outputs import ( @@ -979,7 +980,7 @@ def _update_causal_mask( # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with LLAMA->NEMOTRON,Llama->Nemotron,llama->nemotron -class NemotronForCausalLM(NemotronPreTrainedModel): +class NemotronForCausalLM(GenerationMixin, NemotronPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/nllb_moe/modeling_nllb_moe.py b/src/transformers/models/nllb_moe/modeling_nllb_moe.py index 2bec0fb84dce..a419b8eccff3 100644 --- a/src/transformers/models/nllb_moe/modeling_nllb_moe.py +++ b/src/transformers/models/nllb_moe/modeling_nllb_moe.py @@ -22,6 +22,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( @@ -1604,7 +1605,7 @@ def forward( @add_start_docstrings( "The NllbMoe Model with a language modeling head. Can be used for summarization.", NLLB_MOE_START_DOCSTRING ) -class NllbMoeForConditionalGeneration(NllbMoePreTrainedModel): +class NllbMoeForConditionalGeneration(GenerationMixin, NllbMoePreTrainedModel): base_model_prefix = "model" _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] diff --git a/src/transformers/models/olmo/modeling_olmo.py b/src/transformers/models/olmo/modeling_olmo.py index 587ef92e4585..61006c1cc43f 100644 --- a/src/transformers/models/olmo/modeling_olmo.py +++ b/src/transformers/models/olmo/modeling_olmo.py @@ -30,6 +30,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( BaseModelOutputWithPast, @@ -1018,7 +1019,7 @@ def _update_causal_mask( # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with LLAMA->OLMO,Llama->Olmo -class OlmoForCausalLM(OlmoPreTrainedModel): +class OlmoForCausalLM(GenerationMixin, OlmoPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/openai/modeling_openai.py b/src/transformers/models/openai/modeling_openai.py index 2b24850f3f0c..2b252cfc76d2 100644 --- a/src/transformers/models/openai/modeling_openai.py +++ b/src/transformers/models/openai/modeling_openai.py @@ -26,6 +26,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import gelu_new, silu +from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel, SequenceSummary from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer @@ -524,7 +525,7 @@ def forward( """, OPENAI_GPT_START_DOCSTRING, ) -class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel): +class OpenAIGPTLMHeadModel(GenerationMixin, OpenAIGPTPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/opt/modeling_opt.py b/src/transformers/models/opt/modeling_opt.py index 8f058171778e..858f32073454 100644 --- a/src/transformers/models/opt/modeling_opt.py +++ b/src/transformers/models/opt/modeling_opt.py @@ -22,6 +22,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutputWithPast, @@ -882,7 +883,7 @@ def forward( ) -class OPTForCausalLM(OPTPreTrainedModel): +class OPTForCausalLM(GenerationMixin, OPTPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/paligemma/modeling_paligemma.py b/src/transformers/models/paligemma/modeling_paligemma.py index 8eff8cce50cc..31ea0e93fcc7 100644 --- a/src/transformers/models/paligemma/modeling_paligemma.py +++ b/src/transformers/models/paligemma/modeling_paligemma.py @@ -22,6 +22,7 @@ from torch import nn from ...cache_utils import Cache, StaticCache +from ...generation import GenerationMixin from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, @@ -236,7 +237,7 @@ def _supports_sdpa(self): """The PALIGEMMA model which consists of a vision backbone and a language model.""", PALIGEMMA_START_DOCSTRING, ) -class PaliGemmaForConditionalGeneration(PaliGemmaPreTrainedModel): +class PaliGemmaForConditionalGeneration(GenerationMixin, PaliGemmaPreTrainedModel): def __init__(self, config: PaliGemmaConfig): super().__init__(config) self.vision_tower = AutoModel.from_config(config=config.vision_config) diff --git a/src/transformers/models/pegasus/modeling_pegasus.py b/src/transformers/models/pegasus/modeling_pegasus.py index 42cef3a63558..69890f162c14 100755 --- a/src/transformers/models/pegasus/modeling_pegasus.py +++ b/src/transformers/models/pegasus/modeling_pegasus.py @@ -25,6 +25,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, @@ -1244,7 +1245,7 @@ def forward( @add_start_docstrings( "The PEGASUS Model with a language modeling head. Can be used for summarization.", PEGASUS_START_DOCSTRING ) -class PegasusForConditionalGeneration(PegasusPreTrainedModel): +class PegasusForConditionalGeneration(GenerationMixin, PegasusPreTrainedModel): base_model_prefix = "model" _keys_to_ignore_on_load_missing = ["final_logits_bias"] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] @@ -1456,7 +1457,7 @@ def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) -class PegasusForCausalLM(PegasusPreTrainedModel): +class PegasusForCausalLM(GenerationMixin, PegasusPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/pegasus_x/modeling_pegasus_x.py b/src/transformers/models/pegasus_x/modeling_pegasus_x.py index 6d9072777bf6..1cc5d44bc2a3 100755 --- a/src/transformers/models/pegasus_x/modeling_pegasus_x.py +++ b/src/transformers/models/pegasus_x/modeling_pegasus_x.py @@ -25,6 +25,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, @@ -1464,7 +1465,7 @@ def forward( @add_start_docstrings("The PEGASUS-X for conditional generation (e.g. summarization).", PEGASUS_X_START_DOCSTRING) -class PegasusXForConditionalGeneration(PegasusXPreTrainedModel): +class PegasusXForConditionalGeneration(GenerationMixin, PegasusXPreTrainedModel): base_model_prefix = "model" _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] diff --git a/src/transformers/models/persimmon/modeling_persimmon.py b/src/transformers/models/persimmon/modeling_persimmon.py index 90a7f355992e..695e64fe973c 100644 --- a/src/transformers/models/persimmon/modeling_persimmon.py +++ b/src/transformers/models/persimmon/modeling_persimmon.py @@ -29,6 +29,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( BaseModelOutputWithPast, @@ -828,7 +829,7 @@ def _update_causal_mask( return causal_mask -class PersimmonForCausalLM(PersimmonPreTrainedModel): +class PersimmonForCausalLM(GenerationMixin, PersimmonPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with LLAMA->PERSIMMON,Llama->Persimmon diff --git a/src/transformers/models/phi/modeling_phi.py b/src/transformers/models/phi/modeling_phi.py index 3c647a9d8d81..08d11d9b96f1 100644 --- a/src/transformers/models/phi/modeling_phi.py +++ b/src/transformers/models/phi/modeling_phi.py @@ -26,6 +26,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( BaseModelOutputWithPast, @@ -1113,7 +1114,7 @@ def _update_causal_mask( return causal_mask -class PhiForCausalLM(PhiPreTrainedModel): +class PhiForCausalLM(GenerationMixin, PhiPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi,bias=False->bias=True diff --git a/src/transformers/models/phi3/modeling_phi3.py b/src/transformers/models/phi3/modeling_phi3.py index 4652294980fd..ec47e6fa3e69 100644 --- a/src/transformers/models/phi3/modeling_phi3.py +++ b/src/transformers/models/phi3/modeling_phi3.py @@ -26,6 +26,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( BaseModelOutputWithPast, @@ -1153,7 +1154,7 @@ def _update_causal_mask( return causal_mask -class Phi3ForCausalLM(Phi3PreTrainedModel): +class Phi3ForCausalLM(GenerationMixin, Phi3PreTrainedModel): _tied_weights_keys = ["lm_head.weight"] # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi3 diff --git a/src/transformers/models/pix2struct/modeling_pix2struct.py b/src/transformers/models/pix2struct/modeling_pix2struct.py index 94d882c80566..df7f84a19f83 100644 --- a/src/transformers/models/pix2struct/modeling_pix2struct.py +++ b/src/transformers/models/pix2struct/modeling_pix2struct.py @@ -22,6 +22,7 @@ from torch import nn from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, @@ -1553,7 +1554,7 @@ def forward( "A conditional generation model with a language modeling head. Can be used for sequence generation tasks.", PIX2STRUCT_START_DOCSTRING, ) -class Pix2StructForConditionalGeneration(Pix2StructPreTrainedModel): +class Pix2StructForConditionalGeneration(GenerationMixin, Pix2StructPreTrainedModel): config_class = Pix2StructConfig main_input_name = "flattened_patches" _tied_weights_keys = ["decoder.lm_head.weight"] diff --git a/src/transformers/models/plbart/modeling_plbart.py b/src/transformers/models/plbart/modeling_plbart.py index 93d91e160089..520a994e9afa 100644 --- a/src/transformers/models/plbart/modeling_plbart.py +++ b/src/transformers/models/plbart/modeling_plbart.py @@ -24,6 +24,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa, @@ -1254,7 +1255,7 @@ def forward( "The PLBART Model with a language modeling head. Can be used for code-to-text, text-to-code and code-to-code.", PLBART_START_DOCSTRING, ) -class PLBartForConditionalGeneration(PLBartPreTrainedModel): +class PLBartForConditionalGeneration(GenerationMixin, PLBartPreTrainedModel): base_model_prefix = "model" _keys_to_ignore_on_load_missing = ["final_logits_bias"] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] @@ -1568,7 +1569,7 @@ def forward(self, *args, **kwargs): # Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->PLBart, facebook/bart-base->uclanlp/plbart-base -class PLBartForCausalLM(PLBartPreTrainedModel): +class PLBartForCausalLM(GenerationMixin, PLBartPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/pop2piano/modeling_pop2piano.py b/src/transformers/models/pop2piano/modeling_pop2piano.py index c769cff3c454..0ecd4af73edb 100644 --- a/src/transformers/models/pop2piano/modeling_pop2piano.py +++ b/src/transformers/models/pop2piano/modeling_pop2piano.py @@ -25,6 +25,7 @@ from transformers.generation import GenerationConfig from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, @@ -1001,7 +1002,7 @@ def forward(self, feature, index_value, embedding_offset): @add_start_docstrings("""Pop2Piano Model with a `language modeling` head on top.""", Pop2Piano_START_DOCSTRING) -class Pop2PianoForConditionalGeneration(Pop2PianoPreTrainedModel): +class Pop2PianoForConditionalGeneration(GenerationMixin, Pop2PianoPreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: Pop2PianoConfig): diff --git a/src/transformers/models/prophetnet/modeling_prophetnet.py b/src/transformers/models/prophetnet/modeling_prophetnet.py index 96fa2e2c12e5..1acc3e66ec2a 100644 --- a/src/transformers/models/prophetnet/modeling_prophetnet.py +++ b/src/transformers/models/prophetnet/modeling_prophetnet.py @@ -26,6 +26,7 @@ from torch.nn import LayerNorm from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel from ...utils import ( @@ -1856,7 +1857,7 @@ def forward( "The ProphetNet Model with a language modeling head. Can be used for sequence generation tasks.", PROPHETNET_START_DOCSTRING, ) -class ProphetNetForConditionalGeneration(ProphetNetPreTrainedModel): +class ProphetNetForConditionalGeneration(GenerationMixin, ProphetNetPreTrainedModel): _tied_weights_keys = ["encoder.word_embeddings.weight", "decoder.word_embeddings.weight", "lm_head.weight"] def __init__(self, config: ProphetNetConfig): @@ -2073,7 +2074,7 @@ def get_decoder(self): " language modeling.", PROPHETNET_START_DOCSTRING, ) -class ProphetNetForCausalLM(ProphetNetPreTrainedModel): +class ProphetNetForCausalLM(GenerationMixin, ProphetNetPreTrainedModel): _tied_weights_keys = [ "prophetnet.word_embeddings.weight", "prophetnet.decoder.word_embeddings.weight", diff --git a/src/transformers/models/qwen2/modeling_qwen2.py b/src/transformers/models/qwen2/modeling_qwen2.py index 59413730ad4a..0772fe6bd626 100644 --- a/src/transformers/models/qwen2/modeling_qwen2.py +++ b/src/transformers/models/qwen2/modeling_qwen2.py @@ -29,6 +29,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( BaseModelOutputWithPast, @@ -1018,7 +1019,7 @@ def _update_causal_mask( return causal_mask -class Qwen2ForCausalLM(Qwen2PreTrainedModel): +class Qwen2ForCausalLM(GenerationMixin, Qwen2PreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/qwen2_audio/modeling_qwen2_audio.py b/src/transformers/models/qwen2_audio/modeling_qwen2_audio.py index 14235bf0aaf6..489434350111 100644 --- a/src/transformers/models/qwen2_audio/modeling_qwen2_audio.py +++ b/src/transformers/models/qwen2_audio/modeling_qwen2_audio.py @@ -22,10 +22,11 @@ import torch.utils.checkpoint from torch import nn -from ... import PreTrainedModel from ...activations import ACT2FN from ...cache_utils import Cache, EncoderDecoderCache, StaticCache +from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutput, ModelOutput +from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, @@ -855,7 +856,7 @@ def forward(self, audio_features): """The QWEN2AUDIO model which consists of a audio backbone and a language model.""", QWEN2AUDIO_START_DOCSTRING, ) -class Qwen2AudioForConditionalGeneration(Qwen2AudioPreTrainedModel): +class Qwen2AudioForConditionalGeneration(GenerationMixin, Qwen2AudioPreTrainedModel): def __init__(self, config: Qwen2AudioConfig): super().__init__(config) self.audio_tower = AutoModel.from_config(config.audio_config, attn_implementation=config._attn_implementation) diff --git a/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py b/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py index c08735f45345..c3558a5b2851 100644 --- a/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py +++ b/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py @@ -30,6 +30,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( MoeCausalLMOutputWithPast, @@ -1191,7 +1192,7 @@ def _update_causal_mask( return causal_mask -class Qwen2MoeForCausalLM(Qwen2MoePreTrainedModel): +class Qwen2MoeForCausalLM(GenerationMixin, Qwen2MoePreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py b/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py index 6ab813ad9ade..c6c2cedc7813 100644 --- a/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py +++ b/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py @@ -31,6 +31,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( AttentionMaskConverter, ) @@ -1306,7 +1307,7 @@ def _update_causal_mask( """ -class Qwen2VLForConditionalGeneration(Qwen2VLPreTrainedModel): +class Qwen2VLForConditionalGeneration(GenerationMixin, Qwen2VLPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py b/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py index a8f076fad79c..28055de872ab 100644 --- a/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py +++ b/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py @@ -24,6 +24,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import BaseModelOutputWithNoAttention, CausalLMOutput from ...modeling_utils import PreTrainedModel @@ -777,7 +778,7 @@ def _update_causal_mask(self, attention_mask, input_tensor, cache_position): # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with LLAMA->RECURRENTGEMMA,Llama->RecurrentGemma,llama->gemma -class RecurrentGemmaForCausalLM(RecurrentGemmaPreTrainedModel): +class RecurrentGemmaForCausalLM(GenerationMixin, RecurrentGemmaPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/reformer/modeling_reformer.py b/src/transformers/models/reformer/modeling_reformer.py index 2e98a07217e6..c1b074d86ca3 100755 --- a/src/transformers/models/reformer/modeling_reformer.py +++ b/src/transformers/models/reformer/modeling_reformer.py @@ -29,6 +29,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward @@ -2183,7 +2184,7 @@ def _pad_to_mult_of_chunk_length( @add_start_docstrings("""Reformer Model with a `language modeling` head on top.""", REFORMER_START_DOCSTRING) -class ReformerModelWithLMHead(ReformerPreTrainedModel): +class ReformerModelWithLMHead(GenerationMixin, ReformerPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] def __init__(self, config): diff --git a/src/transformers/models/rembert/modeling_rembert.py b/src/transformers/models/rembert/modeling_rembert.py index 31f7e3dce454..0604bdb1d864 100755 --- a/src/transformers/models/rembert/modeling_rembert.py +++ b/src/transformers/models/rembert/modeling_rembert.py @@ -24,6 +24,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, @@ -1002,7 +1003,7 @@ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_ @add_start_docstrings( """RemBERT Model with a `language modeling` head on top for CLM fine-tuning.""", REMBERT_START_DOCSTRING ) -class RemBertForCausalLM(RemBertPreTrainedModel): +class RemBertForCausalLM(GenerationMixin, RemBertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight"] def __init__(self, config): diff --git a/src/transformers/models/roberta/modeling_roberta.py b/src/transformers/models/roberta/modeling_roberta.py index f1f83147527d..9ccff336cd6e 100644 --- a/src/transformers/models/roberta/modeling_roberta.py +++ b/src/transformers/models/roberta/modeling_roberta.py @@ -25,6 +25,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask_for_sdpa, @@ -1003,7 +1004,7 @@ def forward( @add_start_docstrings( """RoBERTa Model with a `language modeling` head on top for CLM fine-tuning.""", ROBERTA_START_DOCSTRING ) -class RobertaForCausalLM(RobertaPreTrainedModel): +class RobertaForCausalLM(GenerationMixin, RobertaPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] def __init__(self, config): diff --git a/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py b/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py index 95657c260dc7..a06eae4e4a54 100644 --- a/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py +++ b/src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py @@ -24,6 +24,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, @@ -855,7 +856,7 @@ def forward( ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM with FacebookAI/roberta-base->andreasmadsen/efficient_mlm_m0.40,ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm, RobertaPreLayerNormTokenizer->RobertaTokenizer -class RobertaPreLayerNormForCausalLM(RobertaPreLayerNormPreTrainedModel): +class RobertaPreLayerNormForCausalLM(GenerationMixin, RobertaPreLayerNormPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] def __init__(self, config): diff --git a/src/transformers/models/roc_bert/modeling_roc_bert.py b/src/transformers/models/roc_bert/modeling_roc_bert.py index c4efbf16323e..1c641fb4a510 100644 --- a/src/transformers/models/roc_bert/modeling_roc_bert.py +++ b/src/transformers/models/roc_bert/modeling_roc_bert.py @@ -24,6 +24,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, @@ -1403,7 +1404,7 @@ def prepare_inputs_for_generation( @add_start_docstrings( """RoCBert Model with a `language modeling` head on top for CLM fine-tuning.""", ROC_BERT_START_DOCSTRING ) -class RoCBertForCausalLM(RoCBertPreTrainedModel): +class RoCBertForCausalLM(GenerationMixin, RoCBertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.__init__ with BertLMHeadModel->RoCBertForCausalLM,Bert->RoCBert,bert->roc_bert diff --git a/src/transformers/models/roformer/modeling_roformer.py b/src/transformers/models/roformer/modeling_roformer.py index 69588ff743a0..fe461fc60475 100644 --- a/src/transformers/models/roformer/modeling_roformer.py +++ b/src/transformers/models/roformer/modeling_roformer.py @@ -25,6 +25,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, @@ -1033,7 +1034,7 @@ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_ @add_start_docstrings( """RoFormer Model with a `language modeling` head on top for CLM fine-tuning.""", ROFORMER_START_DOCSTRING ) -class RoFormerForCausalLM(RoFormerPreTrainedModel): +class RoFormerForCausalLM(GenerationMixin, RoFormerPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"] def __init__(self, config): diff --git a/src/transformers/models/rwkv/modeling_rwkv.py b/src/transformers/models/rwkv/modeling_rwkv.py index 7dec1f26e1a3..8acc8bc2f496 100644 --- a/src/transformers/models/rwkv/modeling_rwkv.py +++ b/src/transformers/models/rwkv/modeling_rwkv.py @@ -25,6 +25,7 @@ from torch import nn from torch.nn import CrossEntropyLoss +from ...generation import GenerationMixin from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, @@ -751,7 +752,7 @@ def _bnb_4bit_dequantize_and_rescale(self, target_layer, block_id): """, RWKV_START_DOCSTRING, ) -class RwkvForCausalLM(RwkvPreTrainedModel): +class RwkvForCausalLM(GenerationMixin, RwkvPreTrainedModel): _tied_weights_keys = ["head.weight"] def __init__(self, config): diff --git a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py index a79d1d4cf2b9..c1e4ad9fedd0 100755 --- a/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py +++ b/src/transformers/models/seamless_m4t/modeling_seamless_m4t.py @@ -26,6 +26,7 @@ from ...activations import ACT2FN from ...deepspeed import is_deepspeed_zero3_enabled +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, @@ -2150,7 +2151,7 @@ def forward( embed_tokens_decoder (`nn.Embedding`, *optional*): input embedding of the decoder. """, ) -class SeamlessM4TTextToUnitForConditionalGeneration(SeamlessM4TPreTrainedModel): +class SeamlessM4TTextToUnitForConditionalGeneration(GenerationMixin, SeamlessM4TPreTrainedModel): _keys_to_ignore_on_load_missing = [ "vocoder", "speech_encoder", @@ -2656,7 +2657,7 @@ def remove_weight_norm(self): "The text-to-text SeamlessM4T Model transformer which can be used for T2TT.", SEAMLESS_M4T_START_DOCSTRING, ) -class SeamlessM4TForTextToText(SeamlessM4TPreTrainedModel): +class SeamlessM4TForTextToText(GenerationMixin, SeamlessM4TPreTrainedModel): _keys_to_ignore_on_load_missing = ["speech_encoder", "t2u_model", "vocoder"] main_input_name = "input_ids" diff --git a/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py b/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py index a53f544bb34f..fae1eb61ba53 100644 --- a/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py +++ b/src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py @@ -26,6 +26,7 @@ from ...activations import ACT2FN from ...deepspeed import is_deepspeed_zero3_enabled +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, @@ -2439,7 +2440,7 @@ def forward( embed_tokens_decoder (`nn.Embedding`, *optional*): input embedding of the decoder. """, ) -class SeamlessM4Tv2TextToUnitForConditionalGeneration(SeamlessM4Tv2PreTrainedModel): +class SeamlessM4Tv2TextToUnitForConditionalGeneration(GenerationMixin, SeamlessM4Tv2PreTrainedModel): _keys_to_ignore_on_load_missing = [ "vocoder", "speech_encoder", @@ -2914,7 +2915,7 @@ def remove_weight_norm(self): SEAMLESS_M4T_V2_START_DOCSTRING, ) # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToText with SeamlessM4T->SeamlessM4Tv2,SeamlessM4Tv2Tokenizer->SeamlessM4TTokenizer, SeamlessM4Tv2Processor->SeamlessM4TProcessor -class SeamlessM4Tv2ForTextToText(SeamlessM4Tv2PreTrainedModel): +class SeamlessM4Tv2ForTextToText(GenerationMixin, SeamlessM4Tv2PreTrainedModel): _keys_to_ignore_on_load_missing = ["speech_encoder", "t2u_model", "vocoder"] main_input_name = "input_ids" diff --git a/src/transformers/models/speech_to_text/modeling_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_speech_to_text.py index 8353a172b212..f014a00bc387 100755 --- a/src/transformers/models/speech_to_text/modeling_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_speech_to_text.py @@ -22,6 +22,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import ( BaseModelOutput, @@ -1207,7 +1208,7 @@ def forward( "The Speech2Text Model with a language modeling head. Can be used for summarization.", SPEECH_TO_TEXT_START_DOCSTRING, ) -class Speech2TextForConditionalGeneration(Speech2TextPreTrainedModel): +class Speech2TextForConditionalGeneration(GenerationMixin, Speech2TextPreTrainedModel): base_model_prefix = "model" _tied_weights_keys = ["lm_head.weight"] diff --git a/src/transformers/models/stablelm/modeling_stablelm.py b/src/transformers/models/stablelm/modeling_stablelm.py index 1ec4665fcfb7..489f751a27df 100755 --- a/src/transformers/models/stablelm/modeling_stablelm.py +++ b/src/transformers/models/stablelm/modeling_stablelm.py @@ -29,6 +29,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( BaseModelOutputWithPast, @@ -1106,7 +1107,7 @@ def _update_causal_mask( # Copied from transformers.models.persimmon.modeling_persimmon.PersimmonForCausalLM with PERSIMMON->STABLELM,Persimmon->StableLm -class StableLmForCausalLM(StableLmPreTrainedModel): +class StableLmForCausalLM(GenerationMixin, StableLmPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with LLAMA->STABLELM,Llama->StableLm diff --git a/src/transformers/models/starcoder2/modeling_starcoder2.py b/src/transformers/models/starcoder2/modeling_starcoder2.py index 90603fd4e51e..f1f322c5d52f 100644 --- a/src/transformers/models/starcoder2/modeling_starcoder2.py +++ b/src/transformers/models/starcoder2/modeling_starcoder2.py @@ -29,6 +29,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( BaseModelOutputWithPast, @@ -993,7 +994,7 @@ def _update_causal_mask( # Copied from transformers.models.qwen2.modeling_qwen2.Qwen2ForCausalLM with QWEN2->STARCODER2,Qwen2->Starcoder2 -class Starcoder2ForCausalLM(Starcoder2PreTrainedModel): +class Starcoder2ForCausalLM(GenerationMixin, Starcoder2PreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): diff --git a/src/transformers/models/switch_transformers/modeling_switch_transformers.py b/src/transformers/models/switch_transformers/modeling_switch_transformers.py index c5797d4573b7..dcd86d8884af 100644 --- a/src/transformers/models/switch_transformers/modeling_switch_transformers.py +++ b/src/transformers/models/switch_transformers/modeling_switch_transformers.py @@ -24,6 +24,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( MoEModelOutput, MoEModelOutputWithPastAndCrossAttentions, @@ -1456,7 +1457,7 @@ def forward( @add_start_docstrings( """SWITCH_TRANSFORMERS Model with a `language modeling` head on top.""", SWITCH_TRANSFORMERS_START_DOCSTRING ) -class SwitchTransformersForConditionalGeneration(SwitchTransformersPreTrainedModel): +class SwitchTransformersForConditionalGeneration(GenerationMixin, SwitchTransformersPreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: SwitchTransformersConfig): diff --git a/src/transformers/models/t5/modeling_t5.py b/src/transformers/models/t5/modeling_t5.py index a90101924c5b..3dba75ad8467 100644 --- a/src/transformers/models/t5/modeling_t5.py +++ b/src/transformers/models/t5/modeling_t5.py @@ -25,6 +25,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, @@ -1542,7 +1543,7 @@ def forward( @add_start_docstrings("""T5 Model with a `language modeling` head on top.""", T5_START_DOCSTRING) -class T5ForConditionalGeneration(T5PreTrainedModel): +class T5ForConditionalGeneration(GenerationMixin, T5PreTrainedModel): _keys_to_ignore_on_load_unexpected = [ "decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", ] diff --git a/src/transformers/models/trocr/modeling_trocr.py b/src/transformers/models/trocr/modeling_trocr.py index 04eb40ab2a2f..ad79e3c0ff5e 100644 --- a/src/transformers/models/trocr/modeling_trocr.py +++ b/src/transformers/models/trocr/modeling_trocr.py @@ -23,6 +23,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel @@ -736,7 +737,7 @@ def forward(self, *args, **kwargs): " [`VisionEncoderDecoder`].", TROCR_START_DOCSTRING, ) -class TrOCRForCausalLM(TrOCRPreTrainedModel): +class TrOCRForCausalLM(GenerationMixin, TrOCRPreTrainedModel): _tied_weights_keys = ["output_projection.weight"] def __init__(self, config): diff --git a/src/transformers/models/udop/modeling_udop.py b/src/transformers/models/udop/modeling_udop.py index 972248daaae5..f2ea13ffa4fe 100644 --- a/src/transformers/models/udop/modeling_udop.py +++ b/src/transformers/models/udop/modeling_udop.py @@ -34,6 +34,7 @@ ) from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( @@ -1679,7 +1680,7 @@ def forward( This class is based on [`T5ForConditionalGeneration`], extended to deal with images and layout (2D) data.""", UDOP_START_DOCSTRING, ) -class UdopForConditionalGeneration(UdopPreTrainedModel): +class UdopForConditionalGeneration(GenerationMixin, UdopPreTrainedModel): _tied_weights_keys = [ "encoder.embed_tokens.weight", "decoder.embed_tokens.weight", diff --git a/src/transformers/models/umt5/modeling_umt5.py b/src/transformers/models/umt5/modeling_umt5.py index 3271689540b9..26ed40a5a17a 100644 --- a/src/transformers/models/umt5/modeling_umt5.py +++ b/src/transformers/models/umt5/modeling_umt5.py @@ -23,6 +23,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, @@ -1101,7 +1102,7 @@ def forward( @add_start_docstrings("""UMT5 Model with a `language modeling` head on top.""", UMT5_START_DOCSTRING) -class UMT5ForConditionalGeneration(UMT5PreTrainedModel): +class UMT5ForConditionalGeneration(GenerationMixin, UMT5PreTrainedModel): r""" Examples: diff --git a/src/transformers/models/video_llava/modeling_video_llava.py b/src/transformers/models/video_llava/modeling_video_llava.py index 425d46bd7741..0cc4ec27d4db 100644 --- a/src/transformers/models/video_llava/modeling_video_llava.py +++ b/src/transformers/models/video_llava/modeling_video_llava.py @@ -21,9 +21,10 @@ import torch.utils.checkpoint from torch import nn -from ... import PreTrainedModel from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutputWithPooling, ModelOutput +from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, @@ -238,7 +239,7 @@ def _supports_sdpa(self): """The VideoLlava model which consists of a vision backbone and a language model.""", VIDEO_LLAVA_START_DOCSTRING, ) -class VideoLlavaForConditionalGeneration(VideoLlavaPreTrainedModel): +class VideoLlavaForConditionalGeneration(GenerationMixin, VideoLlavaPreTrainedModel): def __init__(self, config: VideoLlavaConfig): super().__init__(config) self.video_tower = AutoModel.from_config(config.vision_config) diff --git a/src/transformers/models/vipllava/modeling_vipllava.py b/src/transformers/models/vipllava/modeling_vipllava.py index b1df10fdb3dc..008afd6de7f6 100644 --- a/src/transformers/models/vipllava/modeling_vipllava.py +++ b/src/transformers/models/vipllava/modeling_vipllava.py @@ -21,9 +21,10 @@ import torch.utils.checkpoint from torch import nn -from ... import PreTrainedModel from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_outputs import ModelOutput +from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, @@ -242,7 +243,7 @@ def _supports_sdpa(self): VIPLLAVA_START_DOCSTRING, ) # Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration with LLAVA->VIPLLAVA,Llava->VipLlava -class VipLlavaForConditionalGeneration(VipLlavaPreTrainedModel): +class VipLlavaForConditionalGeneration(GenerationMixin, VipLlavaPreTrainedModel): def __init__(self, config: VipLlavaConfig): super().__init__(config) self.vision_tower = AutoModel.from_config(config.vision_config) diff --git a/src/transformers/models/whisper/generation_whisper.py b/src/transformers/models/whisper/generation_whisper.py index 7da4e49de98c..1073bb040cab 100644 --- a/src/transformers/models/whisper/generation_whisper.py +++ b/src/transformers/models/whisper/generation_whisper.py @@ -25,7 +25,7 @@ from transformers.cache_utils import EncoderDecoderCache -from ...generation.configuration_utils import GenerationConfig +from ...generation import GenerationConfig, GenerationMixin from ...generation.logits_process import ( LogitsProcessorList, SuppressTokensAtBeginLogitsProcessor, @@ -172,7 +172,7 @@ def _pad_to_max_length( return sequences -class WhisperGenerationMixin: +class WhisperGenerationMixin(GenerationMixin): def _extract_token_timestamps(self, generate_outputs, alignment_heads, time_precision=0.02, num_frames=None): """ Calculates token-level timestamps using the encoder-decoder cross-attentions and dynamic time-warping (DTW) to diff --git a/src/transformers/models/whisper/modeling_whisper.py b/src/transformers/models/whisper/modeling_whisper.py index 81f60edbfa98..bb24b68df620 100644 --- a/src/transformers/models/whisper/modeling_whisper.py +++ b/src/transformers/models/whisper/modeling_whisper.py @@ -25,6 +25,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache, StaticCache +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import ( BaseModelOutput, @@ -1910,7 +1911,7 @@ def forward(self, *args, **kwargs): """, WHISPER_START_DOCSTRING, ) -class WhisperForCausalLM(WhisperPreTrainedModel): +class WhisperForCausalLM(GenerationMixin, WhisperPreTrainedModel): _tied_weights_keys = ["proj_out.weight"] main_input_name = "input_ids" diff --git a/src/transformers/models/xglm/modeling_xglm.py b/src/transformers/models/xglm/modeling_xglm.py index 4f1693583494..191834a9eac5 100755 --- a/src/transformers/models/xglm/modeling_xglm.py +++ b/src/transformers/models/xglm/modeling_xglm.py @@ -23,6 +23,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel @@ -696,7 +697,7 @@ def forward( """, XGLM_START_DOCSTRING, ) -class XGLMForCausalLM(XGLMPreTrainedModel): +class XGLMForCausalLM(GenerationMixin, XGLMPreTrainedModel): base_model_prefix = "model" _tied_weights_keys = ["lm_head.weight"] diff --git a/src/transformers/models/xlm/modeling_xlm.py b/src/transformers/models/xlm/modeling_xlm.py index 280383630987..ee36c1a6008b 100755 --- a/src/transformers/models/xlm/modeling_xlm.py +++ b/src/transformers/models/xlm/modeling_xlm.py @@ -27,6 +27,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import gelu +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutput, MaskedLMOutput, @@ -657,7 +658,7 @@ def forward(self, x, y=None): """, XLM_START_DOCSTRING, ) -class XLMWithLMHeadModel(XLMPreTrainedModel): +class XLMWithLMHeadModel(GenerationMixin, XLMPreTrainedModel): _tied_weights_keys = ["pred_layer.proj.weight"] def __init__(self, config): diff --git a/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py b/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py index 3ac94e75f92f..ee5036dbedd1 100644 --- a/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py @@ -25,6 +25,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask_for_sdpa, @@ -1006,7 +1007,7 @@ def forward( XLM_ROBERTA_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA -class XLMRobertaForCausalLM(XLMRobertaPreTrainedModel): +class XLMRobertaForCausalLM(GenerationMixin, XLMRobertaPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] def __init__(self, config): diff --git a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py index f66a32291794..a677c61cba1c 100644 --- a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +++ b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py @@ -24,6 +24,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu +from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask_for_sdpa, @@ -986,7 +987,7 @@ def forward( """XLM-RoBERTa-XL Model with a `language modeling` head on top for CLM fine-tuning.""", XLM_ROBERTA_XL_START_DOCSTRING, ) -class XLMRobertaXLForCausalLM(XLMRobertaXLPreTrainedModel): +class XLMRobertaXLForCausalLM(GenerationMixin, XLMRobertaXLPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] def __init__(self, config): diff --git a/src/transformers/models/xlnet/modeling_xlnet.py b/src/transformers/models/xlnet/modeling_xlnet.py index 5d424ebe12dd..5d7e59c215f9 100755 --- a/src/transformers/models/xlnet/modeling_xlnet.py +++ b/src/transformers/models/xlnet/modeling_xlnet.py @@ -26,6 +26,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN +from ...generation import GenerationMixin from ...modeling_utils import PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, PreTrainedModel, SequenceSummary from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( @@ -1286,7 +1287,7 @@ def forward( """, XLNET_START_DOCSTRING, ) -class XLNetLMHeadModel(XLNetPreTrainedModel): +class XLNetLMHeadModel(GenerationMixin, XLNetPreTrainedModel): _tied_weights_keys = ["lm_loss.weight"] def __init__(self, config): diff --git a/src/transformers/models/xmod/modeling_xmod.py b/src/transformers/models/xmod/modeling_xmod.py index b1ca8116a72a..0586b725f7d4 100644 --- a/src/transformers/models/xmod/modeling_xmod.py +++ b/src/transformers/models/xmod/modeling_xmod.py @@ -23,6 +23,7 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN, gelu +from ...generation import GenerationMixin from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, @@ -956,7 +957,7 @@ def forward( "X-MOD Model with a `language modeling` head on top for CLM fine-tuning.", XMOD_START_DOCSTRING, ) -class XmodForCausalLM(XmodPreTrainedModel): +class XmodForCausalLM(GenerationMixin, XmodPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] # Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM.__init__ with Roberta->Xmod