Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions pkgs/development/python-modules/kserve/default.nix
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,18 @@ buildPythonPackage rec {
hash = "sha256-f6ILZMLxfckEpy7wSgCqUx89JWSnn0DbQiqRSHcQHms=";
};

# Fix vllm 0.12.0 compatibility
# Patch submitted upstream: https://github.com/kserve/kserve/pull/4882
postPatch = ''
substituteInPlace kserve/protocol/rest/openai/types/__init__.py \
--replace-fail \
"from vllm.entrypoints.openai.protocol import EmbeddingRequest, EmbeddingResponse as Embedding, EmbeddingResponseData, EmbeddingCompletionRequest" \
"from vllm.entrypoints.pooling.embed.protocol import EmbeddingRequest, EmbeddingResponse as Embedding, EmbeddingResponseData, EmbeddingCompletionRequest" \
--replace-fail \
"from vllm.entrypoints.openai.protocol import RerankRequest, RerankResponse as Rerank" \
"from vllm.entrypoints.pooling.score.protocol import RerankRequest, RerankResponse as Rerank"
'';

sourceRoot = "${src.name}/python/kserve";

pythonRelaxDeps = [
Expand Down Expand Up @@ -161,6 +173,14 @@ buildPythonPackage rec {
];

disabledTests = [
# Started failing since vllm was updated to 0.13.0
# pydantic_core._pydantic_core.ValidationError: 1 validation error for RerankResponse
# usage.prompt_tokens
# Field required [type=missing, input_value={'total_tokens': 100}, input_type=dict]
# For further information visit https://errors.pydantic.dev/2.11/v/missing
"test_create_rerank"
"test_create_embedding"

# AssertionError: assert CompletionReq...lm_xargs=None) == CompletionReq...lm_xargs=None)
"test_convert_params"

Expand Down
13 changes: 6 additions & 7 deletions pkgs/development/python-modules/vllm/0005-drop-intel-reqs.patch
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
diff --git a/requirements/cpu.txt b/requirements/cpu.txt
index d11787df4..71575d707 100644
index 21571be47..e91b378e2 100644
--- a/requirements/cpu.txt
+++ b/requirements/cpu.txt
@@ -20,9 +20,6 @@ torchvision; platform_machine != "ppc64le" and platform_machine != "s390x"
torchvision==0.23.0; platform_machine == "ppc64le"
datasets # for benchmark scripts
@@ -14,8 +14,5 @@ torchaudio; platform_machine != "s390x"
# required for the image processor of phi3v, this must be updated alongside torch
torchvision; platform_machine != "s390x"

-# Intel Extension for PyTorch, only for x86_64 CPUs
-intel-openmp==2024.2.1; platform_machine == "x86_64"
-intel_extension_for_pytorch==2.8.0; platform_machine == "x86_64"
triton==3.2.0; platform_machine == "x86_64" # Triton is required for torch 2.6+cpu, as it is imported in torch.compile.

-
# Use this to gather CPU info and optimize based on ARM Neoverse cores
py-cpuinfo; platform_machine == "aarch64"
28 changes: 19 additions & 9 deletions pkgs/development/python-modules/vllm/default.nix
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
symlinkJoin,
autoAddDriverRunpath,

# build system
# build-system
cmake,
jinja2,
ninja,
Expand Down Expand Up @@ -52,6 +52,8 @@
importlib-metadata,
partial-json-parser,
compressed-tensors,
mcp,
ijson,
mistral-common,
msgspec,
model-hosting-container-standards,
Expand Down Expand Up @@ -154,6 +156,15 @@ let
'';
};

# grep for DEFAULT_TRITON_KERNELS_TAG in the following file
# https://github.com/vllm-project/vllm/blob/v${version}/cmake/external_projects/triton_kernels.cmake
triton-kernels = fetchFromGitHub {
owner = "triton-lang";
repo = "triton";
tag = "v3.5.0";
hash = "sha256-F6T0n37Lbs+B7UHNYzoIQHjNNv3TcMtoXjNrT8ZUlxY=";
};

# grep for GIT_TAG in the following file
# https://github.com/vllm-project/vllm/blob/v${version}/cmake/external_projects/qutlass.cmake
qutlass = fetchFromGitHub {
Expand All @@ -175,8 +186,8 @@ let
name = "flash-attention-source";
owner = "vllm-project";
repo = "flash-attention";
rev = "58e0626a692f09241182582659e3bf8f16472659";
hash = "sha256-ewdZd7LuBKBV0y3AaGRWISJzjg6cu59D2OtgqoDjrbM=";
rev = "86f8f157cf82aa2342743752b97788922dd7de43";
hash = "sha256-+h43jMte/29kraNtPiloSQFfCay4W3NNIlzvs47ygyM=";
};

patches = [
Expand Down Expand Up @@ -304,7 +315,7 @@ in

buildPythonPackage rec {
pname = "vllm";
version = "0.11.2";
version = "0.13.0";
pyproject = true;

stdenv = torch.stdenv;
Expand All @@ -313,7 +324,7 @@ buildPythonPackage rec {
owner = "vllm-project";
repo = "vllm";
tag = "v${version}";
hash = "sha256-DoSlkFmR3KKEtfSfdRB++0CZeeXgxmM3zZjONlxbe8U=";
hash = "sha256-pI9vQBhjRPlKOjZp6kH+n8Y0Q4t9wLYM7SnLftSfYgs=";
};

patches = [
Expand Down Expand Up @@ -345,10 +356,6 @@ buildPythonPackage rec {
--replace-fail \
'set(PYTHON_SUPPORTED_VERSIONS' \
'set(PYTHON_SUPPORTED_VERSIONS "${lib.versions.majorMinor python.version}"'

# Pass build environment PYTHONPATH to vLLM's Python configuration scripts
substituteInPlace CMakeLists.txt \
--replace-fail '$PYTHONPATH' '$ENV{PYTHONPATH}'
'';

nativeBuildInputs = [
Expand Down Expand Up @@ -412,8 +419,10 @@ buildPythonPackage rec {
cbor2
depyf
fastapi
ijson
llguidance
lm-format-enforcer
mcp
numpy
openai
opencv-python-headless
Expand Down Expand Up @@ -500,6 +509,7 @@ buildPythonPackage rec {
lib.optionalAttrs cudaSupport {
VLLM_TARGET_DEVICE = "cuda";
CUDA_HOME = "${lib.getDev cudaPackages.cuda_nvcc}";
TRITON_KERNELS_SRC_DIR = "${lib.getDev triton-kernels}/python/triton_kernels/triton_kernels";
}
// lib.optionalAttrs rocmSupport {
VLLM_TARGET_DEVICE = "rocm";
Expand Down
Loading