diff --git a/vllm/entrypoints/openai/run_batch.py b/vllm/entrypoints/openai/run_batch.py index d4121e710dde..c65fefba86f7 100644 --- a/vllm/entrypoints/openai/run_batch.py +++ b/vllm/entrypoints/openai/run_batch.py @@ -54,6 +54,7 @@ ScoreResponse, ) from vllm.entrypoints.utils import create_error_response +from vllm.exceptions import VLLMValidationError from vllm.logger import init_logger from vllm.reasoning import ReasoningParserManager from vllm.utils import random_uuid @@ -86,9 +87,10 @@ class BatchTranscriptionRequest(TranscriptionRequest): def validate_no_file(cls, data: Any): """Ensure file field is not provided in batch requests.""" if isinstance(data, dict) and "file" in data: - raise ValueError( + raise VLLMValidationError( "The 'file' field is not supported in batch requests. " - "Use 'file_url' instead." + "Use 'file_url' instead.", + parameter="file", ) return data @@ -116,9 +118,10 @@ class BatchTranslationRequest(TranslationRequest): def validate_no_file(cls, data: Any): """Ensure file field is not provided in batch requests.""" if isinstance(data, dict) and "file" in data: - raise ValueError( + raise VLLMValidationError( "The 'file' field is not supported in batch requests. " - "Use 'file_url' instead." + "Use 'file_url' instead.", + parameter="file", ) return data diff --git a/vllm/entrypoints/pooling/base/protocol.py b/vllm/entrypoints/pooling/base/protocol.py index 2f547df8d043..2ce89e4bf2fc 100644 --- a/vllm/entrypoints/pooling/base/protocol.py +++ b/vllm/entrypoints/pooling/base/protocol.py @@ -11,6 +11,7 @@ ChatTemplateContentFormatOption, ) from vllm.entrypoints.openai.engine.protocol import OpenAIBaseModel +from vllm.exceptions import VLLMValidationError from vllm.renderers import ChatParams, merge_kwargs from vllm.utils import random_uuid from vllm.utils.serial_utils import EmbedDType, EncodingFormat, Endianness @@ -147,9 +148,9 @@ class ChatRequestMixin(OpenAIBaseModel): @classmethod def check_generation_prompt(cls, data): if data.get("continue_final_message") and data.get("add_generation_prompt"): - raise ValueError( + raise VLLMValidationError( "Cannot set both `continue_final_message` and " - "`add_generation_prompt` to True." + "`add_generation_prompt` to True.", ) return data diff --git a/vllm/entrypoints/serve/tokenize/protocol.py b/vllm/entrypoints/serve/tokenize/protocol.py index f430ae3e8165..66c122da87de 100644 --- a/vllm/entrypoints/serve/tokenize/protocol.py +++ b/vllm/entrypoints/serve/tokenize/protocol.py @@ -17,6 +17,7 @@ from vllm.entrypoints.openai.engine.protocol import ( OpenAIBaseModel, ) +from vllm.exceptions import VLLMValidationError from vllm.renderers import ChatParams, TokenizeParams, merge_kwargs @@ -120,9 +121,9 @@ class TokenizeChatRequest(OpenAIBaseModel): @classmethod def check_generation_prompt(cls, data): if data.get("continue_final_message") and data.get("add_generation_prompt"): - raise ValueError( + raise VLLMValidationError( "Cannot set both `continue_final_message` and " - "`add_generation_prompt` to True." + "`add_generation_prompt` to True.", ) return data