From 3bfbf82ef219882f38acbc5ac4a1e34aee63eed5 Mon Sep 17 00:00:00 2001 From: Silvio Traversaro Date: Tue, 16 Sep 2025 10:23:57 +0200 Subject: [PATCH] Enable Vulkan support --- recipe/recipe.yaml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/recipe/recipe.yaml b/recipe/recipe.yaml index e8bee54e..1805be9c 100644 --- a/recipe/recipe.yaml +++ b/recipe/recipe.yaml @@ -1,7 +1,7 @@ context: name: llama.cpp version: "6441" - build: 0 + build: 1 package: name: ${{ name|lower }} @@ -38,6 +38,12 @@ build: ${{ cmake_args("BUILD_SHARED_LIBS=ON") }} ${{ llama_args("CURL=ON") }} + # Vulkan is only useful on Linux, as on macOS + # metal is used + {%- if linux %} + ${{ ggml_args("VULKAN=ON") }} + {%- endif %} + {%- if osx and arm64 %} ${{ ggml_args("NATIVE=OFF") }} ${{ ggml_args("AVX=OFF") }} @@ -67,6 +73,11 @@ build: {%- endif %} + {%- if build_platform != target_platform %} + ${{ cmake_args("HOST_C_COMPILER=$CC_FOR_BUILD") }} + ${{ cmake_args("HOST_CXX_COMPILER=$CXX_FOR_BUILD") }} + {%- endif %} + echo $LLAMA_ARGS cmake -S . -B build -G Ninja ${CMAKE_ARGS} ${LLAMA_ARGS} cmake --build build @@ -93,6 +104,8 @@ build: ${{ ggml_args("NATIVE=OFF") }} + ${{ ggml_args("VULKAN=ON") }} + {%- if cuda_compiler_version != "None" %} ${{ ggml_args("CUDA=ON") }} @@ -137,6 +150,9 @@ requirements: - pkgconfig host: - libcurl + - libvulkan-headers + - libvulkan-loader + - shaderc - if: cuda_compiler_version != "None" then: # NOTE: Without cuda-version, we are installing cuda-toolkit 11.8 instead of 11.2!