diff --git a/README.md b/README.md index 727a86cb5..087919d54 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ Compilation quickstart: git clone https://github.com/timdettmers/bitsandbytes.git cd bitsandbytes -# CUDA_VERSIONS in {110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 120} +# CUDA_VERSIONS in {110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121} # make argument in {cuda110, cuda11x, cuda12x} # if you do not know what CUDA you have, try looking at the output of: python -m bitsandbytes CUDA_VERSION=117 make cuda11x @@ -83,7 +83,7 @@ Hardware requirements: - LLM.int8(): NVIDIA Turing (RTX 20xx; T4) or Ampere GPU (RTX 30xx; A4-A100); (a GPU from 2018 or older). - 8-bit optimizers and quantization: NVIDIA Kepler GPU or newer (>=GTX 78X). -Supported CUDA versions: 10.2 - 12.0 +Supported CUDA versions: 10.2 - 12.1 The bitsandbytes library is currently only supported on Linux distributions. Windows is not supported at the moment. diff --git a/bitsandbytes/cuda_setup/main.py b/bitsandbytes/cuda_setup/main.py index e7901d82e..1e1cf5c2a 100644 --- a/bitsandbytes/cuda_setup/main.py +++ b/bitsandbytes/cuda_setup/main.py @@ -31,7 +31,7 @@ # libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead # we have libcudart.so.11.0 which causes a lot of errors before # not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt -CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0'] +CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0', 'libcudart.so.12.1'] # this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths backup_paths = [] @@ -75,6 +75,8 @@ def generate_instructions(self): make_cmd += ' make cuda110' elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0: make_cmd += ' make cuda11x' + elif self.cuda_version_string[:2] == '12' and 1 >= int(self.cuda_version_string[2]) >= 0: + make_cmd += ' make cuda12x' elif self.cuda_version_string == '100': self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.') self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')