diff --git a/tests/entrypoints/openai/test_completion_error.py b/tests/entrypoints/openai/test_completion_error.py index a7f6a75e0e72..e48cc32e5400 100644 --- a/tests/entrypoints/openai/test_completion_error.py +++ b/tests/entrypoints/openai/test_completion_error.py @@ -219,3 +219,23 @@ async def mock_generate(*args, **kwargs): f"Expected error message in chunks: {chunks}" ) assert chunks[-1] == "data: [DONE]\n\n" + + +def test_negative_prompt_token_ids_nested(): + """Negative token IDs in prompt (nested list) should raise validation error.""" + with pytest.raises(Exception, match="greater than or equal to 0"): + CompletionRequest( + model=MODEL_NAME, + prompt=[[-1]], + max_tokens=10, + ) + + +def test_negative_prompt_token_ids_flat(): + """Negative token IDs in prompt (flat list) should raise validation error.""" + with pytest.raises(Exception, match="greater than or equal to 0"): + CompletionRequest( + model=MODEL_NAME, + prompt=[-1], + max_tokens=10, + ) diff --git a/vllm/entrypoints/openai/completion/protocol.py b/vllm/entrypoints/openai/completion/protocol.py index aec1a0a95de6..531de984b92b 100644 --- a/vllm/entrypoints/openai/completion/protocol.py +++ b/vllm/entrypoints/openai/completion/protocol.py @@ -42,7 +42,13 @@ class CompletionRequest(OpenAIBaseModel): # Ordered by official OpenAI API documentation # https://platform.openai.com/docs/api-reference/completions/create model: str | None = None - prompt: list[int] | list[list[int]] | str | list[str] | None = None + prompt: ( + list[Annotated[int, Field(ge=0)]] + | list[list[Annotated[int, Field(ge=0)]]] + | str + | list[str] + | None + ) = None echo: bool | None = False frequency_penalty: float | None = 0.0 logit_bias: dict[str, float] | None = None