diff --git a/docker/unified/Dockerfile b/docker/unified/Dockerfile index 95b7a8e8..3d2425c6 100644 --- a/docker/unified/Dockerfile +++ b/docker/unified/Dockerfile @@ -143,10 +143,9 @@ COPY --from=sd-build /install/bin/sd-server /usr/local/bin/ COPY --from=sd-build /install/bin/sd-cli /usr/local/bin/ COPY --from=sd-build /install/lib/ /usr/local/lib/ -# Copy llama.cpp binaries and libraries +# Copy llama.cpp binaries (statically linked) COPY --from=llama-build /install/bin/llama-server /usr/local/bin/ COPY --from=llama-build /install/bin/llama-cli /usr/local/bin/ -COPY --from=llama-build /install/lib/ /usr/local/lib/ # Copy llama-swap binary COPY --from=llama-swap-download /install/bin/llama-swap /usr/local/bin/ diff --git a/docker/unified/install-llama.sh b/docker/unified/install-llama.sh index 64ca43ca..f73460e4 100755 --- a/docker/unified/install-llama.sh +++ b/docker/unified/install-llama.sh @@ -6,7 +6,7 @@ set -e COMMIT_HASH="${1:-master}" BACKEND="${BACKEND:-cuda}" -mkdir -p /install/bin /install/lib +mkdir -p /install/bin # Clone and checkout (init-based so cache-mounted /src/llama.cpp/build dir doesn't break clone) echo "=== Cloning llama.cpp at ${COMMIT_HASH} ===" @@ -22,6 +22,7 @@ git checkout FETCH_HEAD # Common cmake flags CMAKE_FLAGS=( -DGGML_NATIVE=OFF + -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache @@ -35,7 +36,6 @@ if [ "$BACKEND" = "cuda" ]; then "-DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES:-60;61;75;86;89}" "-DCMAKE_CUDA_FLAGS=-allow-unsupported-compiler" "-DCMAKE_EXE_LINKER_FLAGS=-Wl,-rpath-link,/usr/local/cuda/lib64/stubs -lcuda" - "-DCMAKE_SHARED_LINKER_FLAGS=-Wl,-rpath-link,/usr/local/cuda/lib64/stubs -lcuda" ) elif [ "$BACKEND" = "vulkan" ]; then CMAKE_FLAGS+=( @@ -59,7 +59,5 @@ for bin in "${TARGETS[@]}"; do fi cp "build/bin/$bin" "/install/bin/" done -find build -name "*.so*" -type f -exec cp {} /install/lib/ \; - echo "=== llama.cpp build complete ===" ls -la /install/bin/