diff --git a/docs/contributing/model/multimodal.md b/docs/contributing/model/multimodal.md index bed6d4e653d6..9da12642920c 100644 --- a/docs/contributing/model/multimodal.md +++ b/docs/contributing/model/multimodal.md @@ -69,7 +69,7 @@ Further update the model as follows: # model as one of the requirements of basic vLLM model implementation. inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids=input_ids, inputs_embeds=inputs_embeds, diff --git a/vllm/model_executor/models/aria.py b/vllm/model_executor/models/aria.py index 4fe6a7b9e938..e544b83dc19f 100644 --- a/vllm/model_executor/models/aria.py +++ b/vllm/model_executor/models/aria.py @@ -620,7 +620,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.config.image_token_index) diff --git a/vllm/model_executor/models/aya_vision.py b/vllm/model_executor/models/aya_vision.py index 7c02d245db8b..327204978a2d 100644 --- a/vllm/model_executor/models/aya_vision.py +++ b/vllm/model_executor/models/aya_vision.py @@ -430,7 +430,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids=input_ids, inputs_embeds=inputs_embeds, diff --git a/vllm/model_executor/models/blip2.py b/vllm/model_executor/models/blip2.py index 87fc6b5b0240..36a6fbb202a7 100644 --- a/vllm/model_executor/models/blip2.py +++ b/vllm/model_executor/models/blip2.py @@ -641,7 +641,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, _IMAGE_TOKEN_ID) diff --git a/vllm/model_executor/models/chameleon.py b/vllm/model_executor/models/chameleon.py index 21f29dc43c26..dcce7b8d95f9 100644 --- a/vllm/model_executor/models/chameleon.py +++ b/vllm/model_executor/models/chameleon.py @@ -1005,7 +1005,7 @@ def get_input_embeddings( ) -> torch.Tensor: inputs_embeds = self.model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.model.vocabulary_mapping.image_token_id) diff --git a/vllm/model_executor/models/deepseek_vl2.py b/vllm/model_executor/models/deepseek_vl2.py index 6341c65a5d4c..b4e3b175f76d 100644 --- a/vllm/model_executor/models/deepseek_vl2.py +++ b/vllm/model_executor/models/deepseek_vl2.py @@ -600,7 +600,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.image_token_id) diff --git a/vllm/model_executor/models/florence2.py b/vllm/model_executor/models/florence2.py index 4b220ea483e8..a36a6299d6a4 100644 --- a/vllm/model_executor/models/florence2.py +++ b/vllm/model_executor/models/florence2.py @@ -1046,7 +1046,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.pad_token_id) diff --git a/vllm/model_executor/models/fuyu.py b/vllm/model_executor/models/fuyu.py index 9692899f7b99..46876dc4bbd8 100644 --- a/vllm/model_executor/models/fuyu.py +++ b/vllm/model_executor/models/fuyu.py @@ -345,7 +345,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/gemma3_mm.py b/vllm/model_executor/models/gemma3_mm.py index 415a8dbdcf87..f40bfff57dee 100644 --- a/vllm/model_executor/models/gemma3_mm.py +++ b/vllm/model_executor/models/gemma3_mm.py @@ -592,7 +592,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/glm4v.py b/vllm/model_executor/models/glm4v.py index e9271367a472..ddcba0fcf48c 100644 --- a/vllm/model_executor/models/glm4v.py +++ b/vllm/model_executor/models/glm4v.py @@ -609,7 +609,7 @@ def get_input_embeddings( ) -> torch.Tensor: inputs_embeds = self.transformer.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids=input_ids, inputs_embeds=inputs_embeds, diff --git a/vllm/model_executor/models/idefics3.py b/vllm/model_executor/models/idefics3.py index be04ad0422df..56569406b21e 100644 --- a/vllm/model_executor/models/idefics3.py +++ b/vllm/model_executor/models/idefics3.py @@ -720,7 +720,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index 9d5cceccff2f..83075c5f0170 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -1336,7 +1336,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: context_token_ids = [ token_id for token_id in (self.img_context_token_id, self.video_context_token_id) diff --git a/vllm/model_executor/models/kimi_vl.py b/vllm/model_executor/models/kimi_vl.py index 351d1fbdc744..7f21bd1dfe64 100644 --- a/vllm/model_executor/models/kimi_vl.py +++ b/vllm/model_executor/models/kimi_vl.py @@ -393,7 +393,7 @@ def get_input_embeddings( # model as one of the requirements of basic vLLM model implementation. inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids=input_ids, inputs_embeds=inputs_embeds, diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index f70ad37a3d3a..e901d5718b1c 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -683,7 +683,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/llava_next_video.py b/vllm/model_executor/models/llava_next_video.py index c13e8e9b2414..5f26bbc497d2 100644 --- a/vllm/model_executor/models/llava_next_video.py +++ b/vllm/model_executor/models/llava_next_video.py @@ -426,7 +426,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.config.video_token_index) diff --git a/vllm/model_executor/models/llava_onevision.py b/vllm/model_executor/models/llava_onevision.py index 373b0a2a7d5e..de06b27baece 100644 --- a/vllm/model_executor/models/llava_onevision.py +++ b/vllm/model_executor/models/llava_onevision.py @@ -881,7 +881,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, [self.config.image_token_index, self.config.video_token_index]) diff --git a/vllm/model_executor/models/minicpmv.py b/vllm/model_executor/models/minicpmv.py index b923287dca3e..86fcab1c20a5 100644 --- a/vllm/model_executor/models/minicpmv.py +++ b/vllm/model_executor/models/minicpmv.py @@ -892,7 +892,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.llm.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: assert len(self.mm_token_ids) > 0 inputs_embeds = merge_multimodal_embeddings( input_ids, diff --git a/vllm/model_executor/models/minimax_vl_01.py b/vllm/model_executor/models/minimax_vl_01.py index bc00af2ec6b9..3d96d3683064 100644 --- a/vllm/model_executor/models/minimax_vl_01.py +++ b/vllm/model_executor/models/minimax_vl_01.py @@ -201,7 +201,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/mistral3.py b/vllm/model_executor/models/mistral3.py index ebc176e2c724..4040f4830d7c 100644 --- a/vllm/model_executor/models/mistral3.py +++ b/vllm/model_executor/models/mistral3.py @@ -521,7 +521,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/mllama4.py b/vllm/model_executor/models/mllama4.py index bf4bd309eea2..e103f37676af 100644 --- a/vllm/model_executor/models/mllama4.py +++ b/vllm/model_executor/models/mllama4.py @@ -808,7 +808,7 @@ def get_input_embeddings( ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index 70c60c6d528b..1958ab75b043 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -1487,7 +1487,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: assert self.img_patch_id is not None inputs_embeds = merge_multimodal_embeddings( diff --git a/vllm/model_executor/models/ovis.py b/vllm/model_executor/models/ovis.py index 900a1f5de458..096e550eb9ba 100644 --- a/vllm/model_executor/models/ovis.py +++ b/vllm/model_executor/models/ovis.py @@ -515,7 +515,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.llm.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.image_pad_token_id) diff --git a/vllm/model_executor/models/paligemma.py b/vllm/model_executor/models/paligemma.py index 103a267c41f5..b4540159f77c 100644 --- a/vllm/model_executor/models/paligemma.py +++ b/vllm/model_executor/models/paligemma.py @@ -364,7 +364,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.config.image_token_index) diff --git a/vllm/model_executor/models/phi4mm.py b/vllm/model_executor/models/phi4mm.py index a3ca72d1f5cf..092f472c7838 100644 --- a/vllm/model_executor/models/phi4mm.py +++ b/vllm/model_executor/models/phi4mm.py @@ -1148,7 +1148,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.model.embed_tokens(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, [_IMAGE_PLACEHOLDER_TOKEN_ID, _AUDIO_PLACEHOLDER_TOKEN_ID]) diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index 320c0e10d06a..5fc4f2f80e1a 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -423,7 +423,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/qwen2_5_omni_thinker.py b/vllm/model_executor/models/qwen2_5_omni_thinker.py index ad1e8fcb39d5..2bf49c4dea8c 100644 --- a/vllm/model_executor/models/qwen2_5_omni_thinker.py +++ b/vllm/model_executor/models/qwen2_5_omni_thinker.py @@ -805,7 +805,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: # TODO (ywang96): support overlapping modalitiy embeddings so that # `use_audio_in_video` will work on V1. diff --git a/vllm/model_executor/models/qwen2_5_vl.py b/vllm/model_executor/models/qwen2_5_vl.py index 202cd5e860d1..4c81a419938b 100644 --- a/vllm/model_executor/models/qwen2_5_vl.py +++ b/vllm/model_executor/models/qwen2_5_vl.py @@ -1046,7 +1046,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, [self.config.image_token_id, self.config.video_token_id]) diff --git a/vllm/model_executor/models/qwen2_audio.py b/vllm/model_executor/models/qwen2_audio.py index e77a8e05d200..287c13074eb1 100644 --- a/vllm/model_executor/models/qwen2_audio.py +++ b/vllm/model_executor/models/qwen2_audio.py @@ -364,7 +364,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.config.audio_token_index) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 49b709069cd2..5294345bb4b0 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -1289,7 +1289,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, [self.config.image_token_id, self.config.video_token_id]) diff --git a/vllm/model_executor/models/qwen_vl.py b/vllm/model_executor/models/qwen_vl.py index 546737621a7c..08d5415a54dd 100644 --- a/vllm/model_executor/models/qwen_vl.py +++ b/vllm/model_executor/models/qwen_vl.py @@ -754,7 +754,7 @@ def get_input_embeddings( ) -> torch.Tensor: inputs_embeds = self.transformer.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.transformer.visual.image_pad_id) diff --git a/vllm/model_executor/models/skyworkr1v.py b/vllm/model_executor/models/skyworkr1v.py index 9fba24ac5cec..1ba71e1e8bfc 100644 --- a/vllm/model_executor/models/skyworkr1v.py +++ b/vllm/model_executor/models/skyworkr1v.py @@ -883,7 +883,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: assert self.img_context_token_id is not None self._set_visual_token_mask(input_ids) inputs_embeds = merge_multimodal_embeddings( diff --git a/vllm/model_executor/models/tarsier.py b/vllm/model_executor/models/tarsier.py index 2645e700fcda..f962641984f2 100644 --- a/vllm/model_executor/models/tarsier.py +++ b/vllm/model_executor/models/tarsier.py @@ -598,7 +598,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/ultravox.py b/vllm/model_executor/models/ultravox.py index f6b9d19694ef..51052eebfe93 100644 --- a/vllm/model_executor/models/ultravox.py +++ b/vllm/model_executor/models/ultravox.py @@ -560,7 +560,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings: # TODO(ywang96): remove this block after v0 is deprecated. if not envs.VLLM_USE_V1: