diff --git a/java/build.sh b/java/build.sh
index 7d2628d7e5..cbb1b9ed48 100755
--- a/java/build.sh
+++ b/java/build.sh
@@ -7,10 +7,18 @@ set -e -u -o pipefail
ARGS="$*"
NUMARGS=$#
-CURDIR=$(cd "$(dirname "$0")"; pwd)
VERSION="25.10.0" # Note: The version is updated automatically when ci/release/update-version.sh is invoked
GROUP_ID="com.nvidia.cuvs"
+# Identify CUDA major version.
+CUDA_VERSION_FROM_NVCC=$(nvcc --version | grep -oP 'release [0-9]+' | awk '{print $2}')
+CUDA_MAJOR_VERSION=${CUDA_VERSION_FROM_NVCC:-12}
+
+# Identify architecture.
+ARCH=$(uname -m)
+
+BUILD_PROFILE="$ARCH-cuda$CUDA_MAJOR_VERSION"
+
if [ -z "${CMAKE_PREFIX_PATH:=}" ]; then
CMAKE_PREFIX_PATH="$(pwd)/../cpp/build"
export CMAKE_PREFIX_PATH
@@ -33,12 +41,12 @@ fi
# Build the java layer
if [ -z ${LD_LIBRARY_PATH+x} ]
-then export LD_LIBRARY_PATH=${CURDIR}/../cpp/build
-else export LD_LIBRARY_PATH=${CURDIR}/../cpp/build:${LD_LIBRARY_PATH}
+then export LD_LIBRARY_PATH=$CMAKE_PREFIX_PATH
+else export LD_LIBRARY_PATH=$CMAKE_PREFIX_PATH:${LD_LIBRARY_PATH}
fi
-export LD_LIBRARY_PATH=${CURDIR}/../cpp/build:${LD_LIBRARY_PATH}
cd cuvs-java
-mvn verify "${MAVEN_VERIFY_ARGS[@]}" \
+mvn clean verify "${MAVEN_VERIFY_ARGS[@]}" -P "$BUILD_PROFILE" \
&& mvn install:install-file -Dfile=./target/cuvs-java-$VERSION.jar -DgroupId=$GROUP_ID -DartifactId=cuvs-java -Dversion=$VERSION -Dpackaging=jar \
+ && mvn install:install-file -Dfile=./target/cuvs-java-$VERSION-"$BUILD_PROFILE".jar -DgroupId=$GROUP_ID -DartifactId=cuvs-java -Dversion=$VERSION -Dclassifier="$BUILD_PROFILE" -Dpackaging=jar \
&& cp pom.xml ./target/
diff --git a/java/cuvs-java/pom.xml b/java/cuvs-java/pom.xml
index b3b862d291..ca27c67635 100644
--- a/java/cuvs-java/pom.xml
+++ b/java/cuvs-java/pom.xml
@@ -62,6 +62,7 @@
22
UTF-8
UTF-8
+ ${project.build.directory}/../../../cpp/build
@@ -108,10 +109,11 @@
org.apache.maven.plugins
maven-compiler-plugin
- 3.13.0
+ 3.11.0
default-compile
+ compile
compile
@@ -134,6 +136,9 @@
${project.basedir}/src/main/java22
true
+
+ module-info.java
+
@@ -155,29 +160,6 @@
-
- org.apache.maven.plugins
- maven-assembly-plugin
- 3.4.2
-
-
- jar-with-dependencies
-
- merge
-
- add
-
-
-
-
- assemble-all
- package
-
- single
-
-
-
-
org.apache.maven.plugins
maven-jar-plugin
@@ -277,4 +259,172 @@
+
+
+
+ x86_64-cuda12
+
+
+ cuda.version
+ 12
+
+
+
+ x86_64-cuda12
+ ${native.build.path}/cuda12
+
+
+
+
+ org.apache.maven.plugins
+ maven-resources-plugin
+ 3.3.1
+
+
+ copy-native-libs
+ prepare-package
+
+ copy-resources
+
+
+ true
+ ${project.build.directory}/native-libs/${os.arch}/${os.name}
+
+
+ ${native.lib.path}
+
+ libcuvs.so
+ libcuvs_c.so
+
+
+
+ ${native.lib.path}/_deps/rmm-build
+
+ librmm.so
+
+
+
+ ${native.lib.path}/_deps/rapids_logger-build
+
+ librapids_logger.so
+
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-assembly-plugin
+ 3.4.2
+
+
+ src/assembly/native-with-deps.xml
+
+
+
+ true
+ true
+ com.nvidia.cuvs.examples.CagraExample
+
+
+
+
+
+ assemble-native
+ package
+
+ single
+
+
+
+
+
+
+
+
+
+ x86_64-cuda13
+
+
+ cuda.version
+ 13
+
+
+
+ x86_64-cuda13
+ ${native.build.path}/cuda13
+
+
+
+
+ org.apache.maven.plugins
+ maven-resources-plugin
+ 3.3.1
+
+
+ copy-native-libs
+ prepare-package
+
+ copy-resources
+
+
+ true
+ ${project.build.directory}/native-libs/${os.arch}/${os.name}
+
+
+ ${native.lib.path}
+
+ libcuvs.so
+ libcuvs_c.so
+
+
+
+ ${native.lib.path}/_deps/rmm-build
+
+ librmm.so
+
+
+
+ ${native.lib.path}/_deps/rapids_logger-build
+
+ librapids_logger.so
+
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-assembly-plugin
+ 3.4.2
+
+
+ src/assembly/native-with-deps.xml
+
+
+
+ true
+ true
+ com.nvidia.cuvs.examples.CagraExample
+
+
+
+
+
+ assemble-native
+ package
+
+ single
+
+
+
+
+
+
+
+
diff --git a/java/cuvs-java/src/assembly/native-with-deps.xml b/java/cuvs-java/src/assembly/native-with-deps.xml
new file mode 100644
index 0000000000..91e1ad85de
--- /dev/null
+++ b/java/cuvs-java/src/assembly/native-with-deps.xml
@@ -0,0 +1,52 @@
+
+
+
+
+ ${native.classifier}
+
+ jar
+
+
+ false
+
+
+
+
+ /
+ true
+ true
+ runtime
+
+
+
+
+
+
+ ${project.build.directory}/native-libs
+ /
+
+ **/*
+
+
+
+
+
diff --git a/java/cuvs-java/src/main/java22/com/nvidia/cuvs/spi/JDKProvider.java b/java/cuvs-java/src/main/java22/com/nvidia/cuvs/spi/JDKProvider.java
index 0ee0cf1d10..cfbc8acefa 100644
--- a/java/cuvs-java/src/main/java22/com/nvidia/cuvs/spi/JDKProvider.java
+++ b/java/cuvs-java/src/main/java22/com/nvidia/cuvs/spi/JDKProvider.java
@@ -31,6 +31,10 @@
final class JDKProvider implements CuVSProvider {
+ static {
+ OptionalNativeDependencyLoader.loadLibraries();
+ }
+
private static final MethodHandle createNativeDataset$mh = createNativeDatasetBuilder();
static MethodHandle createNativeDatasetBuilder() {
diff --git a/java/cuvs-java/src/main/java22/com/nvidia/cuvs/spi/OptionalNativeDependencyLoader.java b/java/cuvs-java/src/main/java22/com/nvidia/cuvs/spi/OptionalNativeDependencyLoader.java
new file mode 100644
index 0000000000..facbb670a3
--- /dev/null
+++ b/java/cuvs-java/src/main/java22/com/nvidia/cuvs/spi/OptionalNativeDependencyLoader.java
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2025, NVIDIA CORPORATION.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.nvidia.cuvs.spi;
+
+import java.io.*;
+import java.net.URL;
+import java.util.stream.*;
+
+/**
+ * A class that loads native dependencies if they are available in the jar.
+ */
+public class OptionalNativeDependencyLoader {
+
+ private static final ClassLoader loader = JDKProvider.class.getClassLoader();
+
+ private static boolean loaded = false;
+
+ private static final String[] FILES_TO_LOAD = {
+ "rapids_logger", "rmm", "cuvs", "cuvs_c",
+ };
+
+ public static void loadLibraries() {
+ if (!loaded) {
+ String os = System.getProperty("os.name");
+ String arch = System.getProperty("os.arch");
+
+ Stream.of(FILES_TO_LOAD)
+ .forEach(
+ file -> {
+ // Uncomment the following line to trace the loading of native dependencies.
+ // System.out.println("Loading native dependency: " + file);
+ try {
+ System.load(createFile(os, arch, file).getAbsolutePath());
+ } catch (Throwable t) {
+ System.err.println(
+ "Continuing despite failure to load native dependency: "
+ + System.mapLibraryName(file)
+ + ".so: "
+ + t.getMessage());
+ }
+ });
+
+ loaded = true;
+ }
+ }
+
+ /** Extract the contents of a library resource into a temporary file */
+ private static File createFile(String os, String arch, String baseName) throws IOException {
+ String path = arch + "/" + os + "/" + System.mapLibraryName(baseName);
+ File loc;
+ URL resource = loader.getResource(path);
+ if (resource == null) {
+ throw new FileNotFoundException("Could not locate native dependency " + path);
+ }
+ try (InputStream in = resource.openStream()) {
+ loc = File.createTempFile(baseName, ".so");
+ loc.deleteOnExit();
+ try (OutputStream out = new FileOutputStream(loc)) {
+ byte[] buffer = new byte[1024 * 16];
+ int read = 0;
+ while ((read = in.read(buffer)) >= 0) {
+ out.write(buffer, 0, read);
+ }
+ }
+ }
+ return loc;
+ }
+}
diff --git a/java/docker-build/Dockerfile b/java/docker-build/Dockerfile
new file mode 100644
index 0000000000..84e3d1f712
--- /dev/null
+++ b/java/docker-build/Dockerfile
@@ -0,0 +1,104 @@
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+###
+# Build the image for cuVS Java API development environment.
+#
+# Arguments: CUDA_VERSION=[12.X.Y], OS_RELEASE=[8, 9], TARGETPLATFORM=[linux/amd64, linux/arm64]
+#
+###
+ARG CUDA_VERSION=12.9.1
+ARG OS_RELEASE=9
+ARG TARGETPLATFORM=linux/amd64
+
+# multi-platform build with: docker buildx build --platform linux/arm64,linux/amd64 on either amd64 or arm64 host
+# check available official arm-based docker images at https://hub.docker.com/r/nvidia/cuda/tags (OS/ARCH)
+FROM --platform=$TARGETPLATFORM nvidia/cuda:$CUDA_VERSION-devel-rockylinux$OS_RELEASE
+
+# Re-declare ARGs after FROM since they don't persist across FROM boundary
+ARG CUDA_VERSION=12.9.1
+ARG OS_RELEASE=9
+ARG TARGETPLATFORM=linux/amd64
+
+# If DEV_BUILD is ON, the gcc-toolset will be enabled by default for bash shell
+ARG DEV_BUILD=OFF
+
+# Dependency versions
+# Act as default GCC toolset in the image
+ARG TOOLSET_VERSION=14
+ARG CMAKE_VERSION=3.30.4
+ARG CCACHE_VERSION=4.11.2
+
+# Default x86_64 from x86 build, aarch64 cmake for arm build
+ARG CMAKE_ARCH=x86_64
+
+### Install basic requirements
+RUN if [ "$OS_RELEASE" = "9" ]; then \
+ dnf --enablerepo=crb install -y scl-utils gcc-toolset-${TOOLSET_VERSION} python39 \
+ zlib-devel maven tar wget patch ninja-build git zip; \
+ elif [ "$OS_RELEASE" = "8" ]; then \
+ dnf --enablerepo=devel install -y scl-utils gcc-toolset-${TOOLSET_VERSION} python39 \
+ zlib-devel maven tar wget patch ninja-build git zip; \
+ else \
+ echo "Unsupported OS version: $OS_RELEASE"; \
+ fi
+
+# Enable the gcc-toolset by default for bash shell if DEV_BUILD is ON
+RUN if [ "$DEV_BUILD" = "ON" ]; then \
+ echo "source scl_source enable gcc-toolset-${TOOLSET_VERSION}" >> /etc/bashrc; \
+ fi
+
+# Execute every time a new non-interactive bash shell is started
+ENV BASH_ENV=/etc/bashrc
+
+## pre-create the CMAKE_INSTALL_PREFIX folder, set writable by any user for Jenkins
+RUN mkdir -m 777 /usr/local/rapids /rapids
+
+# Fetch and install CMake.
+RUN cd /usr/local && wget --quiet https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-${CMAKE_ARCH}.tar.gz && \
+ tar zxf cmake-${CMAKE_VERSION}-linux-${CMAKE_ARCH}.tar.gz && \
+ rm cmake-${CMAKE_VERSION}-linux-${CMAKE_ARCH}.tar.gz
+
+# Make version-less alias for external reference such as when cmake is called by an IDE outside of the container
+RUN ln -s /usr/local/cmake-${CMAKE_VERSION}-linux-${CMAKE_ARCH}/bin/cmake /usr/local/bin/cmake
+
+# ccache for interactive builds
+RUN cd /tmp && wget --quiet https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/ccache-${CCACHE_VERSION}.tar.gz && \
+ tar zxf ccache-${CCACHE_VERSION}.tar.gz && \
+ rm ccache-${CCACHE_VERSION}.tar.gz && \
+ cd ccache-${CCACHE_VERSION} && \
+ mkdir build && \
+ cd build && \
+ scl enable gcc-toolset-${TOOLSET_VERSION} \
+ "cmake .. \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DZSTD_FROM_INTERNET=ON \
+ -DREDIS_STORAGE_BACKEND=OFF && \
+ cmake --build . --parallel ${PARALLEL_LEVEL} --target install" && \
+ cd ../.. && \
+ rm -rf ccache-${CCACHE_VERSION}
+
+# Fetch and install JDK 22.
+RUN cd /usr/local && wget --quiet https://download.oracle.com/java/22/archive/jdk-22.0.2_linux-x64_bin.tar.gz && \
+ tar zxf jdk-22.0.2_linux-x64_bin.tar.gz && \
+ rm jdk-22.0.2_linux-x64_bin.tar.gz
+
+# Make Java22 available in path.
+RUN ln -sf /usr/local/jdk-22.0.2/bin/java /usr/local/bin/java
+ENV JAVA_HOME="/usr/local/jdk-22.0.2"
+
+# disable cuda container constraints to allow running w/ older drivers on datacenter GPUs
+ENV NVIDIA_DISABLE_REQUIRE="true"
diff --git a/java/docker-build/README.md b/java/docker-build/README.md
new file mode 100644
index 0000000000..92cb616bf1
--- /dev/null
+++ b/java/docker-build/README.md
@@ -0,0 +1,152 @@
+# Docker Build Environment for cuVS Java API
+
+This directory contains the Docker-based build system for the cuVS Java API, providing a containerized environment with all necessary dependencies for building the project across different CUDA versions and architectures.
+
+## Overview
+
+The Docker build system provides:
+- Consistent build environment across different host systems
+- Support for multiple CUDA versions (12.x and 13.x)
+- Cross-platform builds (x86_64 and aarch64)
+- Pre-configured development tools (GCC toolset, CMake, Maven, JDK 22)
+
+## Quick Start
+
+### Default Build (CUDA 12, Rocky Linux 9, All GPU Architectures)
+
+```bash
+./build-in-docker
+```
+
+This builds using the defaults:
+- CUDA version: 12.9.1
+- OS version: Rocky Linux 9
+- Target: libcuvs java with all GPU architectures (`--allgpuarch`)
+
+### Building for CUDA 13, All GPU Architectures
+
+```bash
+CUDA_VERSION=13.0.0 ./build-in-docker
+```
+
+### Building for Local GPU Architecture Only
+
+```bash
+./build-in-docker libcuvs java
+```
+
+## Environment Variables
+
+### Core Configuration
+
+- **`CUDA_VERSION`**: CUDA toolkit version to use (default: `12.9.1`)
+ - Examples: `12.9.1`, `13.0.0`, `13.1.0`
+- **`OS_VERSION`**: Rocky Linux OS version to use (default: `9`)
+ - Supported values: `8`, `9`
+- **`CMAKE_GENERATOR`**: CMake generator to use (default: `Ninja`)
+- **`LOCAL_MAVEN_REPO`**: Local Maven repository path (default: `$HOME/.m2/repository`)
+
+### Docker Configuration
+
+- **`JNI_DOCKER_DEV_BUILD`**: Set to `ON` for development builds with gcc-toolset enabled by default (default: `OFF`)
+- **`DOCKER_CMD`**: Docker command to use (default: `docker`)
+- **`DOCKER_BUILD_EXTRA_ARGS`**: Additional arguments for `docker build`
+- **`DOCKER_RUN_EXTRA_ARGS`**: Additional arguments for `docker run`
+- **`DOCKER_GPU_OPTS`**: GPU options for Docker (default: `--gpus all`)
+
+### Build Optimization
+
+- **`LOCAL_CCACHE_DIR`**: ccache directory for build acceleration (default: `$HOME/.ccache`)
+- **`PARALLEL_LEVEL`**: Number of parallel build jobs
+- **`VERBOSE`**: Enable verbose build output
+
+## Architecture Support
+
+The build system automatically detects the host architecture:
+- **x86_64**: Uses `linux/amd64` platform and x86_64 CMake binaries
+- **aarch64**: Uses `linux/arm64` platform and aarch64 CMake binaries
+
+## Files in This Directory
+
+- **`build-in-docker`**: Main entry point script for Docker-based builds
+- **`run-in-docker`**: Lower-level script that handles Docker container execution
+- **`Dockerfile`**: Multi-stage Docker image definition with CUDA, development tools, and dependencies
+- **`env.sh`**: Environment configuration script
+
+## Examples
+
+### Development Build with Custom CUDA Version (Default: All GPU Architectures)
+
+```bash
+CUDA_VERSION=13.1.0 JNI_DOCKER_DEV_BUILD=ON ./build-in-docker
+```
+
+### Production Build with Rocky Linux 8
+
+```bash
+OS_VERSION=8 ./build-in-docker
+```
+
+### Build with Custom Maven Repository and ccache
+
+```bash
+LOCAL_MAVEN_REPO=/custom/maven/repo LOCAL_CCACHE_DIR=/custom/ccache CUDA_VERSION=13.0.0 ./build-in-docker
+```
+
+### Build with Additional Docker Arguments
+
+```bash
+DOCKER_BUILD_EXTRA_ARGS="--no-cache" ./build-in-docker
+```
+
+### Interactive Development Session
+
+```bash
+CUDA_VERSION=12.9.1 JNI_DOCKER_DEV_BUILD=ON ./docker-build/run-in-docker
+```
+
+This starts an interactive bash shell in the container for development work.
+
+## Docker Image Details
+
+The Docker image is based on `nvidia/cuda:{CUDA_VERSION}-devel-rockylinux9` and includes:
+
+- **CUDA Development Tools**: Complete CUDA toolkit for the specified version
+- **GCC Toolset 14**: Modern C++ compiler with C++20 support
+- **CMake 3.30.4**: Build system generator
+- **JDK 22**: Java Development Kit with Panama FFM support
+- **Maven**: Java project management and build tool
+- **ccache**: Compiler cache for faster incremental builds
+- **Additional Tools**: git, ninja-build, wget, tar, zip, patch
+
+## Troubleshooting
+
+### CUDA Version Compatibility
+
+Ensure your target CUDA version is supported by checking available tags at:
+- [NVIDIA CUDA Docker Hub](https://hub.docker.com/r/nvidia/cuda/tags)
+
+### GPU Access Issues
+
+If you encounter GPU access problems, verify:
+1. NVIDIA Docker runtime is installed (`nvidia-docker2` or `nvidia-container-toolkit`)
+2. Your user has permissions to access Docker
+3. GPU drivers are compatible with the CUDA version
+
+### Build Performance
+
+For tweaking build performance:
+1. Use ccache: Ensure `LOCAL_CCACHE_DIR` is set to a persistent directory. (By default, the build uses ccache.)
+2. Change parallelism: Set `PARALLEL_LEVEL` to match your CPU cores. (This happens by default.)
+3. Use development builds: Set `JNI_DOCKER_DEV_BUILD=ON` for development
+
+### Memory Requirements
+
+The Docker build process requires significant memory. For large projects, ensure:
+- At least 8GB of available RAM
+- Sufficient disk space for Docker images and build artifacts
+
+## Related Documentation
+
+- [cuVS Java API README](../README.md): Main Java API documentation
+- [cuVS Build Instructions](https://docs.rapids.ai/api/cuvs/stable/build/): Native build documentation
diff --git a/java/docker-build/build-in-docker b/java/docker-build/build-in-docker
new file mode 100755
index 0000000000..1c61429fa8
--- /dev/null
+++ b/java/docker-build/build-in-docker
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Build the cuVS Java API artifact in a Docker container with devtoolset
+
+set -e
+
+# Base paths relative to this script's location
+SCRIPTDIR=$(cd "$(dirname "$0")"; pwd)
+
+LOCAL_MAVEN_REPO=${LOCAL_MAVEN_REPO:-"$HOME/.m2/repository"}
+export CMAKE_GENERATOR=${CMAKE_GENERATOR:-"Ninja"}
+export CUDA_VERSION=${CUDA_VERSION:-12.9.1}
+# CUDA_CLASSIFIER=cuda${CUDA_VERSION%%.*} # Currently unused, but may be needed for future CUDA version handling
+
+# Set env for arm64 build, The possible values of 'uname -m' : [x86_64/i386/aarch64/mips/...]
+if [ "$(uname -m)" == "aarch64" ]; then
+ # ARM-specific settings.
+ echo "Set ARM Specific settings"
+fi
+
+# TODO: Settings to build for different CUDA versions, different GPU archs, CPU archs, etc.
+
+# Default to building libcuvs java with all GPU architectures if no arguments provided
+if [ $# -eq 0 ]; then
+ "$SCRIPTDIR/run-in-docker" "./build.sh" "libcuvs" "java" "--allgpuarch"
+else
+ "$SCRIPTDIR/run-in-docker" "./build.sh" "$@"
+fi
diff --git a/java/docker-build/run-in-docker b/java/docker-build/run-in-docker
new file mode 100755
index 0000000000..9c102b64a3
--- /dev/null
+++ b/java/docker-build/run-in-docker
@@ -0,0 +1,161 @@
+#!/bin/bash
+
+#
+# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Run a command in a Docker container with devtoolset
+
+set -e
+
+# Convert sclCMD to array for proper argument handling
+if [[ -n "${sclCMD:-}" ]]; then
+ # Convert existing string to array by splitting on whitespace
+ read -ra sclCMD <<< "$sclCMD"
+else
+ sclCMD=(scl enable gcc-toolset-14)
+fi
+
+# Set this environment variable to ON to build the docker image for local development,
+# with gcc-toolset enabled by default.
+# This is necessary especially if the docker container is used for local development with an IDE.
+JNI_DOCKER_DEV_BUILD=${JNI_DOCKER_DEV_BUILD:-OFF}
+
+REPODIR_REL=$(git rev-parse --show-toplevel)
+REPODIR=$(realpath "$REPODIR_REL")
+GIT_COMMON_DIR_REL=$(git rev-parse --git-common-dir)
+GIT_COMMON_DIR=$(realpath "$GIT_COMMON_DIR_REL")
+WORKDIR=${WORKDIR:-$REPODIR}
+TZ=${TZ:-UTC}
+
+CUDA_VERSION=${CUDA_VERSION:-12.9.1}
+CUDA_CLASSIFIER=cuda${CUDA_VERSION%%.*}
+OS_VERSION=${OS_VERSION:-9} # Default to 9
+
+DOCKER_CMD=${DOCKER_CMD:-docker}
+
+# Initialize DOCKER_BUILD_EXTRA_ARGS as an array
+if [[ -n "${DOCKER_BUILD_EXTRA_ARGS:-}" ]]; then
+ # Convert existing string to array by splitting on whitespace
+ read -ra DOCKER_BUILD_EXTRA_ARGS <<< "$DOCKER_BUILD_EXTRA_ARGS"
+else
+ DOCKER_BUILD_EXTRA_ARGS=()
+fi
+
+if [ "$(uname -m)" == "aarch64" ]; then
+ DOCKER_BUILD_EXTRA_ARGS=(--build-arg TARGETPLATFORM=linux/arm64 --build-arg CMAKE_ARCH=aarch64 "${DOCKER_BUILD_EXTRA_ARGS[@]}")
+else
+ DOCKER_BUILD_EXTRA_ARGS=(--build-arg TARGETPLATFORM=linux/amd64 --build-arg CMAKE_ARCH=x86_64 "${DOCKER_BUILD_EXTRA_ARGS[@]}")
+fi
+# Initialize DOCKER_RUN_EXTRA_ARGS as an array
+if [[ -n "${DOCKER_RUN_EXTRA_ARGS:-}" ]]; then
+ # Convert existing string to array by splitting on whitespace
+ read -ra DOCKER_RUN_EXTRA_ARGS <<< "$DOCKER_RUN_EXTRA_ARGS"
+else
+ DOCKER_RUN_EXTRA_ARGS=()
+fi
+LOCAL_CCACHE_DIR=${LOCAL_CCACHE_DIR:-"$HOME/.ccache"}
+LOCAL_MAVEN_REPO=${LOCAL_MAVEN_REPO:-"$HOME/.m2/repository"}
+
+if [ "$JNI_DOCKER_DEV_BUILD" == "ON" ]; then
+ echo "Building docker image for local development, gcc-toolset is enabled by default..."
+ JNI_DOCKER_IMAGE="cuvs-jni-build:${CUDA_VERSION}-devel-rockylinux${OS_VERSION}"
+else
+ echo "Building docker image for production, gcc-toolset is NOT enabled by default..."
+ JNI_DOCKER_IMAGE="cuvs-jni-build:${CUDA_VERSION}-rockylinux${OS_VERSION}"
+fi
+
+# ensure directories exist
+mkdir -p "$LOCAL_CCACHE_DIR" "$LOCAL_MAVEN_REPO"
+
+$DOCKER_CMD build "${DOCKER_BUILD_EXTRA_ARGS[@]}" -f "$REPODIR/java/docker-build/Dockerfile" \
+ --build-arg CUDA_VERSION="$CUDA_VERSION" \
+ --build-arg OS_RELEASE="$OS_VERSION" \
+ --build-arg DEV_BUILD="$JNI_DOCKER_DEV_BUILD" \
+ -t "$JNI_DOCKER_IMAGE" \
+ "$REPODIR/java/docker-build"
+
+if [[ "$DOCKER_CMD" == "docker" ]]; then
+ # Initialize DOCKER_GPU_OPTS as an array
+ if [[ -n "${DOCKER_GPU_OPTS:-}" ]]; then
+ # Convert existing string to array by splitting on whitespace
+ read -ra DOCKER_GPU_OPTS <<< "$DOCKER_GPU_OPTS"
+ else
+ DOCKER_GPU_OPTS=(--gpus all)
+ fi
+else
+ DOCKER_GPU_OPTS=()
+fi
+
+# Initialize DOCKER_OPTS as an array
+if [[ -n "${DOCKER_OPTS:-}" ]]; then
+ # Convert existing string to array by splitting on whitespace
+ read -ra DOCKER_OPTS <<< "$DOCKER_OPTS"
+else
+ DOCKER_OPTS=()
+fi
+
+if (( $# == 0 )); then
+ # no arguments gets an interactive shell
+ DOCKER_OPTS+=(-it)
+ RUN_CMD=("/bin/bash")
+else
+ RUN_CMD=("$@")
+fi
+
+MNT_ARGS=()
+
+RO_SRC=(
+ "/etc/group"
+ "/etc/passwd"
+ "/etc/shadow"
+ "/etc/sudoers.d"
+)
+RO_DST=("${RO_SRC[@]}")
+if [[ "$HOST_CUDA_PATH" != "" ]]; then
+ RO_SRC+=("$HOST_CUDA_PATH")
+ RO_DST+=("/usr/local/cuda")
+fi
+for (( i=0; i<${#RO_SRC[@]}; i++)); do
+ MNT_ARGS+=(--mount "type=bind,src=${RO_SRC[$i]},dst=${RO_DST[$i]},ro")
+done
+
+RW_SRC=(
+ "$GIT_COMMON_DIR"
+ "$WORKDIR"
+ "$LOCAL_CCACHE_DIR"
+ "$LOCAL_MAVEN_REPO"
+)
+for (( i=0; i<${#RW_SRC[@]}; i++)); do
+ MNT_ARGS+=(--mount "type=bind,src=${RW_SRC[$i]},dst=${RW_SRC[$i]}")
+done
+
+$DOCKER_CMD run "${DOCKER_GPU_OPTS[@]}" "${DOCKER_RUN_EXTRA_ARGS[@]}" -u "$(id -u):$(id -g)" --rm \
+ "${MNT_ARGS[@]}" \
+ --workdir "$WORKDIR" \
+ -e CCACHE_DIR="$LOCAL_CCACHE_DIR" \
+ -e CMAKE_C_COMPILER_LAUNCHER="ccache" \
+ -e CMAKE_CXX_COMPILER_LAUNCHER="ccache" \
+ -e CMAKE_CUDA_COMPILER_LAUNCHER="ccache" \
+ -e CMAKE_CXX_LINKER_LAUNCHER="ccache" \
+ -e CMAKE_PREFIX_PATH="${WORKDIR}/cpp/build/$CUDA_CLASSIFIER" \
+ -e LIBCUVS_BUILD_DIR="${WORKDIR}/cpp/build/$CUDA_CLASSIFIER" \
+ -e CMAKE_GENERATOR \
+ -e CUDA_VISIBLE_DEVICES \
+ -e PARALLEL_LEVEL \
+ -e VERBOSE \
+ "${DOCKER_OPTS[@]}" \
+ "$JNI_DOCKER_IMAGE" \
+ "${sclCMD[@]}" -- "${RUN_CMD[@]}"