From bc4e59413e03dd64cdd4193ed2ffbff6d232dd1d Mon Sep 17 00:00:00 2001 From: Gaetan Lepage Date: Thu, 27 Nov 2025 22:10:34 +0000 Subject: [PATCH] python3Packages.llama-cpp-python: disable pythonImportsCheck when cudaSupport is enabled --- .../llama-cpp-python/default.nix | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/pkgs/development/python-modules/llama-cpp-python/default.nix b/pkgs/development/python-modules/llama-cpp-python/default.nix index 08597d833e768..27781bd0c2c2e 100644 --- a/pkgs/development/python-modules/llama-cpp-python/default.nix +++ b/pkgs/development/python-modules/llama-cpp-python/default.nix @@ -9,6 +9,7 @@ # nativeBuildInputs cmake, ninja, + autoAddDriverRunpath, # build-system pathspec, @@ -73,13 +74,13 @@ buildPythonPackage rec { # -mcpu, breaking linux build as follows: # # cc1: error: unknown value ‘native+nodotprod+noi8mm+nosve’ for ‘-mcpu’ - "-DGGML_NATIVE=off" - "-DGGML_BUILD_NUMBER=1" + (lib.cmakeBool "GGML_NATIVE" false) + (lib.cmakeFeature "GGML_BUILD_NUMBER" "1") ] ++ lib.optionals cudaSupport [ - "-DGGML_CUDA=on" - "-DCUDAToolkit_ROOT=${lib.getDev cudaPackages.cuda_nvcc}" - "-DCMAKE_CUDA_COMPILER=${lib.getExe cudaPackages.cuda_nvcc}" + (lib.cmakeBool "GGML_CUDA" true) + (lib.cmakeFeature "CUDAToolkit_ROOT" "${lib.getDev cudaPackages.cuda_nvcc}") + (lib.cmakeFeature "CMAKE_CUDA_COMPILER" "${lib.getExe cudaPackages.cuda_nvcc}") ]; enableParallelBuilding = true; @@ -87,6 +88,9 @@ buildPythonPackage rec { nativeBuildInputs = [ cmake ninja + ] + ++ lib.optionals cudaSupport [ + autoAddDriverRunpath ]; build-system = [ @@ -125,7 +129,13 @@ buildPythonPackage rec { "test_real_llama" ]; - pythonImportsCheck = [ "llama_cpp" ]; + pythonImportsCheck = lib.optionals (!cudaSupport) [ + # `libllama.so` is loaded at import time, and failing when cudaSupport is enabled as the cuda + # driver is missing in the sandbox: + # RuntimeError: Failed to load shared library '/nix/store/...-python3.13-llama-cpp-python-0.3.16/lib/python3.13/site-packages/llama_cpp/lib/libllama.so': + # libcuda.so.1: cannot open shared object file: No such file or directory + "llama_cpp" + ]; passthru = { updateScript = gitUpdater {