Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 13 additions & 4 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -405,6 +405,7 @@ def get_inputs(
images: PromptImageInput | None = None,
videos: PromptVideoInput | None = None,
audios: PromptAudioInput | None = None,
tokenization_kwargs: dict[str, Any] | None = None,
) -> list[BatchFeature | BatchEncoding | dict[str, torch.Tensor]]:
if images is not None:
assert len(prompts) == len(images)
Expand All @@ -418,10 +419,18 @@ def get_inputs(
all_inputs: list[BatchFeature | BatchEncoding | dict[str, torch.Tensor]] = []
for i, prompt in enumerate(prompts):
if isinstance(prompt, str):
processor_kwargs: dict[str, Any] = {
"text": prompt,
"return_tensors": "pt",
}
# Create a copy to avoid modifying the original dict
processor_kwargs = (
tokenization_kwargs.copy()
if tokenization_kwargs is not None
else {}
)
processor_kwargs.update(
{
"text": prompt,
"return_tensors": "pt",
}
)
if images is not None and (image := images[i]) is not None:
processor_kwargs["images"] = image
if videos is not None and (video := videos[i]) is not None:
Expand Down
18 changes: 16 additions & 2 deletions tests/models/multimodal/pooling/test_siglip.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project

from typing import Any

import pytest
from transformers import SiglipModel

Expand Down Expand Up @@ -35,7 +37,11 @@ def _run_test(
model: str,
*,
dtype: str,
tokenization_kwargs: dict[str, Any] | None = None,
) -> None:
if tokenization_kwargs is None:
tokenization_kwargs = {}

with vllm_runner(
model,
runner="pooling",
Expand All @@ -44,10 +50,14 @@ def _run_test(
max_model_len=64,
gpu_memory_utilization=0.7,
) as vllm_model:
vllm_outputs = vllm_model.embed(input_texts, images=input_images)
vllm_outputs = vllm_model.embed(
input_texts, images=input_images, tokenization_kwargs=tokenization_kwargs
)

with hf_runner(model, dtype=dtype, auto_cls=SiglipModel) as hf_model:
all_inputs = hf_model.get_inputs(input_texts, images=input_images)
all_inputs = hf_model.get_inputs(
input_texts, images=input_images, tokenization_kwargs=tokenization_kwargs
)

all_outputs = []
for inputs in all_inputs:
Expand Down Expand Up @@ -94,6 +104,10 @@ def test_models_text(
input_images, # type: ignore
model,
dtype=dtype,
tokenization_kwargs={
"padding": "max_length",
"max_length": 64,
}, # siglip2 was trained with this padding setting.
)


Expand Down
22 changes: 20 additions & 2 deletions vllm/entrypoints/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -1076,6 +1076,7 @@ def encode(
params=pooling_params,
use_tqdm=use_tqdm,
lora_request=lora_request,
tokenization_kwargs=tokenization_kwargs,
)

outputs = self._run_engine(use_tqdm=use_tqdm)
Expand Down Expand Up @@ -1113,6 +1114,7 @@ def embed(
use_tqdm: bool | Callable[..., tqdm] = True,
pooling_params: PoolingParams | Sequence[PoolingParams] | None = None,
lora_request: list[LoRARequest] | LoRARequest | None = None,
tokenization_kwargs: dict[str, Any] | None = None,
) -> list[EmbeddingRequestOutput]:
"""
Generate an embedding vector for each prompt.
Expand Down Expand Up @@ -1150,6 +1152,7 @@ def embed(
pooling_params=pooling_params,
lora_request=lora_request,
pooling_task="embed",
tokenization_kwargs=tokenization_kwargs,
)

return [EmbeddingRequestOutput.from_base(item) for item in items]
Expand All @@ -1161,6 +1164,7 @@ def classify(
use_tqdm: bool | Callable[..., tqdm] = True,
pooling_params: PoolingParams | Sequence[PoolingParams] | None = None,
lora_request: list[LoRARequest] | LoRARequest | None = None,
tokenization_kwargs: dict[str, Any] | None = None,
) -> list[ClassificationRequestOutput]:
"""
Generate class logits for each prompt.
Expand Down Expand Up @@ -1196,6 +1200,7 @@ def classify(
pooling_params=pooling_params,
lora_request=lora_request,
pooling_task="classify",
tokenization_kwargs=tokenization_kwargs,
)

return [ClassificationRequestOutput.from_base(item) for item in items]
Expand All @@ -1209,6 +1214,7 @@ def reward(
use_tqdm: bool | Callable[..., tqdm] = True,
pooling_params: PoolingParams | Sequence[PoolingParams] | None = None,
lora_request: list[LoRARequest] | LoRARequest | None = None,
tokenization_kwargs: dict[str, Any] | None = None,
) -> list[PoolingRequestOutput]:
"""
Generate rewards for each prompt.
Expand Down Expand Up @@ -1236,6 +1242,7 @@ def reward(
pooling_params=pooling_params,
truncate_prompt_tokens=truncate_prompt_tokens,
pooling_task="token_classify",
tokenization_kwargs=tokenization_kwargs,
)

def _embedding_score(
Expand All @@ -1247,6 +1254,7 @@ def _embedding_score(
use_tqdm: bool | Callable[..., tqdm] = True,
pooling_params: PoolingParams | None = None,
lora_request: list[LoRARequest] | LoRARequest | None = None,
tokenization_kwargs: dict[str, Any] | None = None,
) -> list[ScoringRequestOutput]:
encoded_output: list[PoolingRequestOutput] = self.encode(
text_1 + text_2,
Expand All @@ -1255,6 +1263,7 @@ def _embedding_score(
lora_request=lora_request,
pooling_params=pooling_params,
pooling_task="embed",
tokenization_kwargs=tokenization_kwargs,
)

encoded_output_1: list[PoolingRequestOutput] = encoded_output[0 : len(text_1)]
Expand All @@ -1279,6 +1288,7 @@ def _cross_encoding_score(
use_tqdm: bool | Callable[..., tqdm] = True,
pooling_params: PoolingParams | None = None,
lora_request: list[LoRARequest] | LoRARequest | None = None,
tokenization_kwargs: dict[str, Any] | None = None,
) -> list[ScoringRequestOutput]:
model_config = self.model_config

Expand All @@ -1294,7 +1304,8 @@ def _cross_encoding_score(
pooling_params.verify("score", model_config)
pooling_params_list = list[PoolingParams]()

tokenization_kwargs: dict[str, Any] = {}
local_kwargs = tokenization_kwargs or {}
tokenization_kwargs = local_kwargs.copy()
Comment thread
piood marked this conversation as resolved.

_validate_truncation_size(
model_config.max_model_len, truncate_prompt_tokens, tokenization_kwargs
Expand Down Expand Up @@ -1557,6 +1568,7 @@ def _validate_and_add_requests(
use_tqdm: bool | Callable[..., tqdm] = True,
lora_request: Sequence[LoRARequest] | LoRARequest | None,
priority: list[int] | None = None,
tokenization_kwargs: dict[str, Any] | None = None,
) -> None:
if isinstance(prompts, (str, dict)):
# Convert a single prompt to a list.
Expand Down Expand Up @@ -1602,6 +1614,7 @@ def _validate_and_add_requests(
if isinstance(lora_request, Sequence)
else lora_request,
priority=priority[i] if priority else 0,
tokenization_kwargs=tokenization_kwargs,
)
added_request_ids.append(request_id)
except Exception as e:
Expand Down Expand Up @@ -1665,9 +1678,12 @@ def _process_inputs(
*,
lora_request: LoRARequest | None,
priority: int,
tokenization_kwargs: dict[str, Any] | None = None,
) -> tuple[EngineCoreRequest, dict[str, Any]]:
"""Use the Processor to process inputs for LLMEngine."""
tokenization_kwargs: dict[str, Any] = {}

local_kwargs = tokenization_kwargs or {}
tokenization_kwargs = local_kwargs.copy()
Comment thread
piood marked this conversation as resolved.
_validate_truncation_size(
self.model_config.max_model_len,
params.truncate_prompt_tokens,
Expand All @@ -1690,6 +1706,7 @@ def _add_request(
params: SamplingParams | PoolingParams,
lora_request: LoRARequest | None = None,
priority: int = 0,
tokenization_kwargs: dict[str, Any] | None = None,
) -> str:
prompt_text, _, _ = get_prompt_components(prompt)
request_id = str(next(self.request_counter))
Expand All @@ -1700,6 +1717,7 @@ def _add_request(
params,
lora_request=lora_request,
priority=priority,
tokenization_kwargs=tokenization_kwargs,
)

self.llm_engine.add_request(
Expand Down