From e3a67aae3ca8a9c3a1bc4b0a82544e9ceea6be2c Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Thu, 8 Jan 2026 18:28:30 +0100 Subject: [PATCH] chore(ci): use latest jetpack image for l4t This image is for HW prior Jetpack 7. Jetpack 7 broke compatibility with older devices (which are still in use) such as AGX Orin or Jetsons. While we do have l4t-cuda-13 images with sbsa support for new Nvidia devices (Thor, DGX, etc). For older HW we are forced to keep old images around as 24.04 does not seem to be supported. Signed-off-by: Ettore Di Giacinto --- .github/workflows/backend.yml | 44 +++++++++++++++++------------------ .github/workflows/image.yml | 8 +++---- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml index 87392e81562e..e47ca8be1ed5 100644 --- a/.github/workflows/backend.yml +++ b/.github/workflows/backend.yml @@ -41,17 +41,17 @@ jobs: include: - build-type: 'l4t' cuda-major-version: "12" - cuda-minor-version: "9" + cuda-minor-version: "0" platforms: 'linux/arm64' tag-latest: 'auto' tag-suffix: '-nvidia-l4t-diffusers' runs-on: 'ubuntu-24.04-arm' - base-image: "ubuntu:24.04" + base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0" skip-drivers: 'true' backend: "diffusers" dockerfile: "./backend/Dockerfile.python" context: "./" - ubuntu-version: '2404' + ubuntu-version: '2204' - build-type: '' cuda-major-version: "" cuda-minor-version: "" @@ -766,12 +766,12 @@ jobs: tag-latest: 'auto' tag-suffix: '-nvidia-l4t-vibevoice' runs-on: 'ubuntu-24.04-arm' - base-image: "ubuntu:24.04" + base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0" skip-drivers: 'true' backend: "vibevoice" dockerfile: "./backend/Dockerfile.python" context: "./" - ubuntu-version: '2404' + ubuntu-version: '2204' - build-type: 'l4t' cuda-major-version: "12" cuda-minor-version: "0" @@ -779,12 +779,12 @@ jobs: tag-latest: 'auto' tag-suffix: '-nvidia-l4t-kokoro' runs-on: 'ubuntu-24.04-arm' - base-image: "ubuntu:24.04" + base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0" skip-drivers: 'true' backend: "kokoro" dockerfile: "./backend/Dockerfile.python" context: "./" - ubuntu-version: '2404' + ubuntu-version: '2204' # SYCL additional backends - build-type: 'intel' cuda-major-version: "" @@ -894,17 +894,17 @@ jobs: ubuntu-version: '2404' - build-type: 'cublas' cuda-major-version: "12" - cuda-minor-version: "9" + cuda-minor-version: "0" platforms: 'linux/arm64' skip-drivers: 'false' tag-latest: 'auto' tag-suffix: '-nvidia-l4t-arm64-llama-cpp' - base-image: "ubuntu:24.04" + base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0" runs-on: 'ubuntu-24.04-arm' backend: "llama-cpp" dockerfile: "./backend/Dockerfile.llama-cpp" context: "./" - ubuntu-version: '2404' + ubuntu-version: '2204' - build-type: 'vulkan' cuda-major-version: "" cuda-minor-version: "" @@ -973,17 +973,17 @@ jobs: ubuntu-version: '2404' - build-type: 'cublas' cuda-major-version: "12" - cuda-minor-version: "9" + cuda-minor-version: "0" platforms: 'linux/arm64' skip-drivers: 'false' tag-latest: 'auto' tag-suffix: '-nvidia-l4t-arm64-stablediffusion-ggml' - base-image: "ubuntu:24.04" + base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0" runs-on: 'ubuntu-24.04-arm' backend: "stablediffusion-ggml" dockerfile: "./backend/Dockerfile.golang" context: "./" - ubuntu-version: '2404' + ubuntu-version: '2204' # whisper - build-type: '' cuda-major-version: "" @@ -1039,17 +1039,17 @@ jobs: ubuntu-version: '2404' - build-type: 'cublas' cuda-major-version: "12" - cuda-minor-version: "9" + cuda-minor-version: "0" platforms: 'linux/arm64' skip-drivers: 'false' tag-latest: 'auto' tag-suffix: '-nvidia-l4t-arm64-whisper' - base-image: "ubuntu:24.04" + base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0" runs-on: 'ubuntu-24.04-arm' backend: "whisper" dockerfile: "./backend/Dockerfile.golang" context: "./" - ubuntu-version: '2404' + ubuntu-version: '2204' - build-type: 'hipblas' cuda-major-version: "" cuda-minor-version: "" @@ -1139,12 +1139,12 @@ jobs: skip-drivers: 'true' tag-latest: 'auto' tag-suffix: '-nvidia-l4t-arm64-rfdetr' - base-image: "ubuntu:24.04" + base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0" runs-on: 'ubuntu-24.04-arm' backend: "rfdetr" dockerfile: "./backend/Dockerfile.python" context: "./" - ubuntu-version: '2404' + ubuntu-version: '2204' # exllama2 - build-type: '' cuda-major-version: "" @@ -1192,12 +1192,12 @@ jobs: skip-drivers: 'true' tag-latest: 'auto' tag-suffix: '-nvidia-l4t-arm64-chatterbox' - base-image: "ubuntu:24.04" + base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0" runs-on: 'ubuntu-24.04-arm' backend: "chatterbox" dockerfile: "./backend/Dockerfile.python" context: "./" - ubuntu-version: '2404' + ubuntu-version: '2204' # runs out of space on the runner # - build-type: 'hipblas' # cuda-major-version: "" @@ -1259,12 +1259,12 @@ jobs: skip-drivers: 'true' tag-latest: 'auto' tag-suffix: '-nvidia-l4t-arm64-neutts' - base-image: "ubuntu:24.04" + base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0" runs-on: 'ubuntu-24.04-arm' backend: "neutts" dockerfile: "./backend/Dockerfile.python" context: "./" - ubuntu-version: '2404' + ubuntu-version: '2204' - build-type: '' cuda-major-version: "" cuda-minor-version: "" diff --git a/.github/workflows/image.yml b/.github/workflows/image.yml index 4fde110dc238..ce571006e510 100644 --- a/.github/workflows/image.yml +++ b/.github/workflows/image.yml @@ -162,16 +162,16 @@ include: - build-type: 'cublas' cuda-major-version: "12" - cuda-minor-version: "9" + cuda-minor-version: "0" platforms: 'linux/arm64' tag-latest: 'auto' tag-suffix: '-nvidia-l4t-arm64' - base-image: "ubuntu:24.04" + base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0" runs-on: 'ubuntu-24.04-arm' makeflags: "--jobs=4 --output-sync=target" skip-drivers: 'true' - ubuntu-version: "2404" - ubuntu-codename: 'noble' + ubuntu-version: "2204" + ubuntu-codename: 'jammy' - build-type: 'cublas' cuda-major-version: "13" cuda-minor-version: "0"