Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 10 additions & 47 deletions easybuild/easyconfigs/n/NVHPC/NVHPC-25.1-CUDA-12.6.0.eb
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,15 @@ version = '25.1'
versionsuffix = '-CUDA-%(cudaver)s'

homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
description = """Complete toolchain based on NVIDIA HPC SDK. Includes C, C++ and FORTRAN
compilers (nvidia-compilers), an MPI implementation based on OpenMPI (NVHPCX)
and math libraries based on OpenBLAS and ScaLAPACK."""

toolchain = SYSTEM

local_tarball_tmpl = 'nvhpc_2025_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
# By downloading, you accept the HPC SDK Software License Agreement
# https://docs.nvidia.com/hpc-sdk/eula/index.html
# accept_eula = True
local_tarball_tmpl = 'nvhpc_2025_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
sources = [local_tarball_tmpl % '%(arch)s']
checksums = [
Expand All @@ -22,52 +23,14 @@ checksums = [
}
]

local_gccver = '13.3.0'
dependencies = [
('GCCcore', local_gccver),
('binutils', '2.42', '', ('GCCcore', local_gccver)),
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
('numactl', '2.0.18', '', ('GCCcore', local_gccver)),
('CUDA', '12.6.0', '', SYSTEM),
('CUDA', '12.6.0'),
('nvidia-compilers', version, versionsuffix),
]

module_add_cuda = False

# specify default CUDA version that should be used by NVHPC
# should match one of the CUDA versions that are included with this NVHPC version
# (see install_components/Linux_x86_64/$version/cuda/) where $version is the NVHPC version
# this version can be tweaked from the EasyBuild command line with
# --try-amend=default_cuda_version="11.0" (for example)
default_cuda_version = '%(cudaver)s'
# NVHPC needs CUDA to work. This easyconfig uses external CUDA from EasyBuild.
# The default CUDA version used by NVHPC will be the version of loaded CUDA module

# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
# The following list gives examples for the easyconfig
#
# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
# 1) Bundled CUDA
# If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
# default_cuda_version = "11.0"
# in this easyconfig file; alternatively, it can be specified through the command line during installation with
# --try-amend=default_cuda_version="10.2"
# 2) CUDA provided via EasyBuild
# Use CUDA as a dependency, for example
# dependencies = [('CUDA', '11.5.0')]
# The parameter default_cuda_version still can be set as above.
# If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
#
# Define a NVHPC-default Compute Capability
# cuda_compute_capabilities = "8.0"
# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
# Only single values supported, not lists of values!
#
# Options to add/remove things to/from environment module (defaults shown)
# module_byo_compilers = False # Remove compilers from PATH (Bring-your-own compilers)
# module_nvhpc_own_mpi = False # Add NVHPC's own pre-compiled OpenMPI
# module_add_math_libs = False # Add NVHPC's math libraries (which should be there from CUDA anyway)
# module_add_profilers = False # Add NVHPC's NVIDIA Profilers
# module_add_nccl = False # Add NVHPC's NCCL library
# module_add_nvshmem = False # Add NVHPC's NVSHMEM library
# module_add_cuda = False # Add NVHPC's bundled CUDA
# The default Compute Capability used by NVHPC is the one defined in nvidia-compilers (if any)

# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
moduleclass = 'compiler'
moduleclass = 'toolchain'
34 changes: 34 additions & 0 deletions easybuild/easyconfigs/n/NVHPC/NVHPC-25.1.eb
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
name = 'NVHPC'
version = '25.1'

homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = """Complete toolchain based on NVIDIA HPC SDK. Includes C, C++ and FORTRAN
compilers (nvidia-compilers), an MPI implementation based on OpenMPI (NVHPCX)
and math libraries based on OpenBLAS and ScaLAPACK."""

toolchain = SYSTEM

# By downloading, you accept the HPC SDK Software License Agreement
# https://docs.nvidia.com/hpc-sdk/eula/index.html
local_tarball_tmpl = 'nvhpc_2025_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
sources = [local_tarball_tmpl % '%(arch)s']
checksums = [
{
local_tarball_tmpl % 'aarch64':
'0e1d694d54d44559155024d5bab4ca6764eba52d3f27b89f5c252416976e0360',
local_tarball_tmpl % 'x86_64':
'0813791f8363f4c493db7891b00396ce522cb73910279b8f18a440aedda6727c',
}
]

dependencies = [
('nvidia-compilers', version),
]

# NVHPC needs CUDA to work. This easyconfig uses CUDA bundled in NVHPC
# The default CUDA version used by NVHPC is the one defined in nvidia-compilers

# The default Compute Capability used by NVHPC is the one defined in nvidia-compilers (if any)

moduleclass = 'toolchain'
57 changes: 10 additions & 47 deletions easybuild/easyconfigs/n/NVHPC/NVHPC-25.3-CUDA-12.8.0.eb
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,15 @@ version = '25.3'
versionsuffix = '-CUDA-%(cudaver)s'

homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
description = """Complete toolchain based on NVIDIA HPC SDK. Includes C, C++ and FORTRAN
compilers (nvidia-compilers), an MPI implementation based on OpenMPI (NVHPCX)
and math libraries based on OpenBLAS and ScaLAPACK."""

toolchain = SYSTEM

local_tarball_tmpl = 'nvhpc_2025_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
# By downloading, you accept the HPC SDK Software License Agreement
# https://docs.nvidia.com/hpc-sdk/eula/index.html
# accept_eula = True
local_tarball_tmpl = 'nvhpc_2025_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
sources = [local_tarball_tmpl % '%(arch)s']
checksums = [
Expand All @@ -22,52 +23,14 @@ checksums = [
}
]

local_gccver = '14.2.0'
dependencies = [
('GCCcore', local_gccver),
('binutils', '2.42', '', ('GCCcore', local_gccver)),
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
('numactl', '2.0.19', '', ('GCCcore', local_gccver)),
('CUDA', '12.8.0', '', SYSTEM),
('CUDA', '12.8.0'),
('nvidia-compilers', version, versionsuffix),
]

module_add_cuda = False

# specify default CUDA version that should be used by NVHPC
# should match one of the CUDA versions that are included with this NVHPC version
# (see install_components/Linux_x86_64/$version/cuda/) where $version is the NVHPC version
# this version can be tweaked from the EasyBuild command line with
# --try-amend=default_cuda_version="11.0" (for example)
default_cuda_version = '%(cudaver)s'
# NVHPC needs CUDA to work. This easyconfig uses external CUDA from EasyBuild.
# The default CUDA version used by NVHPC will be the version of loaded CUDA module

# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
# The following list gives examples for the easyconfig
#
# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
# 1) Bundled CUDA
# If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
# default_cuda_version = "11.0"
# in this easyconfig file; alternatively, it can be specified through the command line during installation with
# --try-amend=default_cuda_version="10.2"
# 2) CUDA provided via EasyBuild
# Use CUDA as a dependency, for example
# dependencies = [('CUDA', '11.5.0')]
# The parameter default_cuda_version still can be set as above.
# If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
#
# Define a NVHPC-default Compute Capability
# cuda_compute_capabilities = "8.0"
# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
# Only single values supported, not lists of values!
#
# Options to add/remove things to/from environment module (defaults shown)
# module_byo_compilers = False # Remove compilers from PATH (Bring-your-own compilers)
# module_nvhpc_own_mpi = False # Add NVHPC's own pre-compiled OpenMPI
# module_add_math_libs = False # Add NVHPC's math libraries (which should be there from CUDA anyway)
# module_add_profilers = False # Add NVHPC's NVIDIA Profilers
# module_add_nccl = False # Add NVHPC's NCCL library
# module_add_nvshmem = False # Add NVHPC's NVSHMEM library
# module_add_cuda = False # Add NVHPC's bundled CUDA
# The default Compute Capability used by NVHPC is the one defined in nvidia-compilers (if any)

# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
moduleclass = 'compiler'
moduleclass = 'toolchain'
34 changes: 34 additions & 0 deletions easybuild/easyconfigs/n/NVHPC/NVHPC-25.3.eb
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
name = 'NVHPC'
version = '25.3'

homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = """Complete toolchain based on NVIDIA HPC SDK. Includes C, C++ and FORTRAN
compilers (nvidia-compilers), an MPI implementation based on OpenMPI (NVHPCX)
and math libraries based on OpenBLAS and ScaLAPACK."""

toolchain = SYSTEM

# By downloading, you accept the HPC SDK Software License Agreement
# https://docs.nvidia.com/hpc-sdk/eula/index.html
local_tarball_tmpl = 'nvhpc_2025_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
sources = [local_tarball_tmpl % '%(arch)s']
checksums = [
{
local_tarball_tmpl % 'aarch64':
'a2b86cf5141c0a9b0925999521693981451a8d2403367c36c46238163be6f2bb',
local_tarball_tmpl % 'x86_64':
'e2b2c911478a5db6a15d1fd258a8c4004dbfccf6f32f4132fe142a24fb7e6f8f',
}
]

dependencies = [
('nvidia-compilers', version),
]

# NVHPC needs CUDA to work. This easyconfig uses CUDA bundled in NVHPC
# The default CUDA version used by NVHPC is the one defined in nvidia-compilers

# The default Compute Capability used by NVHPC is the one defined in nvidia-compilers (if any)

moduleclass = 'toolchain'
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
name = 'nvidia-compilers'
version = '25.1'
versionsuffix = '-CUDA-%(cudaver)s'

homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = "C, C++ and Fortran compilers included with the NVIDIA HPC SDK"

toolchain = SYSTEM

# By downloading, you accept the HPC SDK Software License Agreement
# https://docs.nvidia.com/hpc-sdk/eula/index.html
local_tarball_tmpl = 'nvhpc_2025_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
sources = [local_tarball_tmpl % '%(arch)s']
checksums = [
{
local_tarball_tmpl % 'aarch64':
'0e1d694d54d44559155024d5bab4ca6764eba52d3f27b89f5c252416976e0360',
local_tarball_tmpl % 'x86_64':
'0813791f8363f4c493db7891b00396ce522cb73910279b8f18a440aedda6727c',
}
]

local_gccver = '13.3.0'
dependencies = [
('CUDA', '12.6.0'),
('GCCcore', local_gccver),
('binutils', '2.42', '', ('GCCcore', local_gccver)),
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
('numactl', '2.0.18', '', ('GCCcore', local_gccver)),
]

# nvidia-compilers (NVHPC) needs CUDA to work. This easyconfig uses external CUDA from EasyBuild.
# The default CUDA version used by nvidia-compilers (NVHPC) will be the version of loaded CUDA module.

# (optional) Define default CUDA Compute Capability used by all easyconfigs in this toolchain
# cuda_compute_capabilities = ['5.2', '6.0', '7.0', '7.5', '8.0', '8.6', '9.0']

# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
moduleclass = 'compiler'
41 changes: 41 additions & 0 deletions easybuild/easyconfigs/n/nvidia-compilers/nvidia-compilers-25.1.eb
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
name = 'nvidia-compilers'
version = '25.1'

homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = "C, C++ and Fortran compilers included with the NVIDIA HPC SDK"

toolchain = SYSTEM

# By downloading, you accept the HPC SDK Software License Agreement
# https://docs.nvidia.com/hpc-sdk/eula/index.html
local_tarball_tmpl = 'nvhpc_2025_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
sources = [local_tarball_tmpl % '%(arch)s']
checksums = [
{
local_tarball_tmpl % 'aarch64':
'0e1d694d54d44559155024d5bab4ca6764eba52d3f27b89f5c252416976e0360',
local_tarball_tmpl % 'x86_64':
'0813791f8363f4c493db7891b00396ce522cb73910279b8f18a440aedda6727c',
}
]

local_gccver = '13.3.0'
dependencies = [
('GCCcore', local_gccver),
('binutils', '2.42', '', ('GCCcore', local_gccver)),
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
('numactl', '2.0.18', '', ('GCCcore', local_gccver)),
]

# nvidia-compilers (NVHPC) needs CUDA to work. This easyconfig uses CUDA bundled in NVHPC
# Specify default CUDA version that should be used by nvidia-compilers (NVHPC),
# it should match one of the CUDA versions that are included with this version of NVHPC
# (see install_components/Linux_x86_64/*/cuda/)
default_cuda_version = '12.6'

# (optional) Define default CUDA Compute Capability used by all easyconfigs in this toolchain
# cuda_compute_capabilities = ['5.2', '6.0', '7.0', '7.5', '8.0', '8.6', '9.0']

# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
moduleclass = 'compiler'
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
name = 'nvidia-compilers'
version = '25.3'
versionsuffix = '-CUDA-%(cudaver)s'

homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = "C, C++ and Fortran compilers included with the NVIDIA HPC SDK"

toolchain = SYSTEM

# By downloading, you accept the HPC SDK Software License Agreement
# https://docs.nvidia.com/hpc-sdk/eula/index.html
local_tarball_tmpl = 'nvhpc_2025_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
sources = [local_tarball_tmpl % '%(arch)s']
checksums = [
{
local_tarball_tmpl % 'aarch64':
'a2b86cf5141c0a9b0925999521693981451a8d2403367c36c46238163be6f2bb',
local_tarball_tmpl % 'x86_64':
'e2b2c911478a5db6a15d1fd258a8c4004dbfccf6f32f4132fe142a24fb7e6f8f',
}
]


local_gccver = '14.2.0'
dependencies = [
('CUDA', '12.8.0'),
('GCCcore', local_gccver),
('binutils', '2.42', '', ('GCCcore', local_gccver)),
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
('numactl', '2.0.19', '', ('GCCcore', local_gccver)),
]

# nvidia-compilers (NVHPC) needs CUDA to work. This easyconfig uses external CUDA from EasyBuild.
# The default CUDA version used by nvidia-compilers (NVHPC) will be the version of loaded CUDA module.

# (optional) Define default CUDA Compute Capability used by all easyconfigs in this toolchain
# cuda_compute_capabilities = ['5.2', '6.0', '7.0', '7.5', '8.0', '8.6', '9.0']

# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
moduleclass = 'compiler'
42 changes: 42 additions & 0 deletions easybuild/easyconfigs/n/nvidia-compilers/nvidia-compilers-25.3.eb
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
name = 'nvidia-compilers'
version = '25.3'

homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = "C, C++ and Fortran compilers included with the NVIDIA HPC SDK"

toolchain = SYSTEM

# By downloading, you accept the HPC SDK Software License Agreement
# https://docs.nvidia.com/hpc-sdk/eula/index.html
local_tarball_tmpl = 'nvhpc_2025_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
sources = [local_tarball_tmpl % '%(arch)s']
checksums = [
{
local_tarball_tmpl % 'aarch64':
'a2b86cf5141c0a9b0925999521693981451a8d2403367c36c46238163be6f2bb',
local_tarball_tmpl % 'x86_64':
'e2b2c911478a5db6a15d1fd258a8c4004dbfccf6f32f4132fe142a24fb7e6f8f',
}
]


local_gccver = '14.2.0'
dependencies = [
('GCCcore', local_gccver),
('binutils', '2.42', '', ('GCCcore', local_gccver)),
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
('numactl', '2.0.19', '', ('GCCcore', local_gccver)),
]

# nvidia-compilers (NVHPC) needs CUDA to work. This easyconfig uses CUDA bundled in NVHPC
# Specify default CUDA version that should be used by nvidia-compilers (NVHPC),
# it should match one of the CUDA versions that are included with this version of NVHPC
# (see install_components/Linux_x86_64/*/cuda/)
default_cuda_version = '12.8'

# (optional) Define default CUDA Compute Capability used by all easyconfigs in this toolchain
# cuda_compute_capabilities = ['5.2', '6.0', '7.0', '7.5', '8.0', '8.6', '9.0']

# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
moduleclass = 'compiler'
Loading
Loading