From fd913dc858bf35f0a3ff5c5c09488fd470d08ffb Mon Sep 17 00:00:00 2001
From: youkaichao
Date: Thu, 21 Aug 2025 22:00:23 +0800
Subject: [PATCH 1/3] add cuda 129 backend
---
crates/uv-torch/src/backend.rs | 16 ++++++++++++++--
1 file changed, 14 insertions(+), 2 deletions(-)
diff --git a/crates/uv-torch/src/backend.rs b/crates/uv-torch/src/backend.rs
index 5407d6fbe548d..6f050e3a57622 100644
--- a/crates/uv-torch/src/backend.rs
+++ b/crates/uv-torch/src/backend.rs
@@ -59,6 +59,8 @@ pub enum TorchMode {
Auto,
/// Use the CPU-only PyTorch index.
Cpu,
+ /// Use the PyTorch index for CUDA 12.9.
+ Cu129,
/// Use the PyTorch index for CUDA 12.8.
Cu128,
/// Use the PyTorch index for CUDA 12.6.
@@ -208,6 +210,7 @@ impl TorchStrategy {
None => Ok(Self::Backend(TorchBackend::Cpu)),
},
TorchMode::Cpu => Ok(Self::Backend(TorchBackend::Cpu)),
+ TorchMode::Cu129 => Ok(Self::Backend(TorchBackend::Cu129)),
TorchMode::Cu128 => Ok(Self::Backend(TorchBackend::Cu128)),
TorchMode::Cu126 => Ok(Self::Backend(TorchBackend::Cu126)),
TorchMode::Cu125 => Ok(Self::Backend(TorchBackend::Cu125)),
@@ -380,6 +383,7 @@ impl TorchStrategy {
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum TorchBackend {
Cpu,
+ Cu129,
Cu128,
Cu126,
Cu125,
@@ -428,6 +432,7 @@ impl TorchBackend {
fn index_url(self) -> &'static IndexUrl {
match self {
Self::Cpu => &CPU_INDEX_URL,
+ Self::Cu129 => &CU129_INDEX_URL,
Self::Cu128 => &CU128_INDEX_URL,
Self::Cu126 => &CU126_INDEX_URL,
Self::Cu125 => &CU125_INDEX_URL,
@@ -491,6 +496,7 @@ impl TorchBackend {
pub fn cuda_version(&self) -> Option {
match self {
Self::Cpu => None,
+ Self::Cu129 => Some(Version::new([12, 9])),
Self::Cu128 => Some(Version::new([12, 8])),
Self::Cu126 => Some(Version::new([12, 6])),
Self::Cu125 => Some(Version::new([12, 5])),
@@ -539,6 +545,7 @@ impl TorchBackend {
pub fn rocm_version(&self) -> Option {
match self {
Self::Cpu => None,
+ Self::Cu129 => None,
Self::Cu128 => None,
Self::Cu126 => None,
Self::Cu125 => None,
@@ -590,6 +597,7 @@ impl FromStr for TorchBackend {
fn from_str(s: &str) -> Result {
match s {
"cpu" => Ok(Self::Cpu),
+ "cu129" => Ok(Self::Cu129),
"cu128" => Ok(Self::Cu128),
"cu126" => Ok(Self::Cu126),
"cu125" => Ok(Self::Cu125),
@@ -639,10 +647,11 @@ impl FromStr for TorchBackend {
/// Linux CUDA driver versions and the corresponding CUDA versions.
///
/// See:
-static LINUX_CUDA_DRIVERS: LazyLock<[(TorchBackend, Version); 24]> = LazyLock::new(|| {
+static LINUX_CUDA_DRIVERS: LazyLock<[(TorchBackend, Version); 25]> = LazyLock::new(|| {
[
// Table 2 from
// https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html
+ (TorchBackend::Cu129, Version::new([525, 60, 13])),
(TorchBackend::Cu128, Version::new([525, 60, 13])),
(TorchBackend::Cu126, Version::new([525, 60, 13])),
(TorchBackend::Cu125, Version::new([525, 60, 13])),
@@ -677,10 +686,11 @@ static LINUX_CUDA_DRIVERS: LazyLock<[(TorchBackend, Version); 24]> = LazyLock::n
/// Windows CUDA driver versions and the corresponding CUDA versions.
///
/// See:
-static WINDOWS_CUDA_VERSIONS: LazyLock<[(TorchBackend, Version); 24]> = LazyLock::new(|| {
+static WINDOWS_CUDA_VERSIONS: LazyLock<[(TorchBackend, Version); 25]> = LazyLock::new(|| {
[
// Table 2 from
// https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html
+ (TorchBackend::Cu129, Version::new([528, 33])),
(TorchBackend::Cu128, Version::new([528, 33])),
(TorchBackend::Cu126, Version::new([528, 33])),
(TorchBackend::Cu125, Version::new([528, 33])),
@@ -781,6 +791,8 @@ static LINUX_AMD_GPU_DRIVERS: LazyLock<[(TorchBackend, AmdGpuArchitecture); 44]>
static CPU_INDEX_URL: LazyLock =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cpu").unwrap());
+static CU129_INDEX_URL: LazyLock =
+ LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu129").unwrap());
static CU128_INDEX_URL: LazyLock =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu128").unwrap());
static CU126_INDEX_URL: LazyLock =
From 90cd9b90e173e7556d7857ab94cba02afac4bbba Mon Sep 17 00:00:00 2001
From: youkaichao
Date: Thu, 21 Aug 2025 22:24:46 +0800
Subject: [PATCH 2/3] update schema.json
Signed-off-by: youkaichao
---
uv.schema.json | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/uv.schema.json b/uv.schema.json
index a86155c326cb4..6deddd4be8442 100644
--- a/uv.schema.json
+++ b/uv.schema.json
@@ -2479,6 +2479,11 @@
"type": "string",
"const": "cpu"
},
+ {
+ "description": "Use the PyTorch index for CUDA 12.9.",
+ "type": "string",
+ "const": "cu129"
+ },
{
"description": "Use the PyTorch index for CUDA 12.8.",
"type": "string",
From 5d767224560f6efa6b666bf3bf5f83186f12bf7c Mon Sep 17 00:00:00 2001
From: Charlie Marsh
Date: Thu, 21 Aug 2025 15:44:21 +0100
Subject: [PATCH 3/3] Run cargo dev generate-all
---
docs/reference/cli.md | 3 +++
1 file changed, 3 insertions(+)
diff --git a/docs/reference/cli.md b/docs/reference/cli.md
index 80277154d32b9..8b6ea73ddec39 100644
--- a/docs/reference/cli.md
+++ b/docs/reference/cli.md
@@ -3701,6 +3701,7 @@ by --python-version.
auto: Select the appropriate PyTorch index based on the operating system and CUDA driver version
cpu: Use the CPU-only PyTorch index
+cu129: Use the PyTorch index for CUDA 12.9
cu128: Use the PyTorch index for CUDA 12.8
cu126: Use the PyTorch index for CUDA 12.6
cu125: Use the PyTorch index for CUDA 12.5
@@ -3973,6 +3974,7 @@ be used with caution, as it can modify the system Python installation.
auto: Select the appropriate PyTorch index based on the operating system and CUDA driver version
cpu: Use the CPU-only PyTorch index
+cu129: Use the PyTorch index for CUDA 12.9
cu128: Use the PyTorch index for CUDA 12.8
cu126: Use the PyTorch index for CUDA 12.6
cu125: Use the PyTorch index for CUDA 12.5
@@ -4271,6 +4273,7 @@ should be used with caution, as it can modify the system Python installation.
auto: Select the appropriate PyTorch index based on the operating system and CUDA driver version
cpu: Use the CPU-only PyTorch index
+cu129: Use the PyTorch index for CUDA 12.9
cu128: Use the PyTorch index for CUDA 12.8
cu126: Use the PyTorch index for CUDA 12.6
cu125: Use the PyTorch index for CUDA 12.5