new file: n/NVHPC/NVHPC-22.2-test.eb

new file:   n/NVHPC/NVHPC-23.5.eb
	new file:   s/Spack/Spack-0.20.0.eb
This commit is contained in:
Lukas Krupcik 2023-06-08 14:50:40 +02:00
parent 899b6c8a63
commit 82ee550f5c
3 changed files with 163 additions and 0 deletions

View File

@ -0,0 +1,67 @@
# IT4Innovations
# LK 2022
name = 'NVHPC'
version = '22.2'
versionsuffix = '-test'
homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
toolchain = SYSTEM
# By downloading, you accept the HPC SDK Software License Agreement (https://docs.nvidia.com/hpc-sdk/eula/index.html)
accept_eula = True
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
local_tarball_tmpl = 'nvhpc_2022_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
sources = [local_tarball_tmpl % '%(arch)s']
checksums = ['8dfb4007d6912b2722946358ac69409592c1f03426d81971ffbcb6fc5fea2cb8']
local_gccver = '10.2.0'
dependencies = [
('GCCcore', local_gccver),
('binutils', '2.35', '', ('GCCcore', local_gccver)),
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
('numactl', '2.0.13', '', ('GCCcore', local_gccver))
]
# specify default CUDA version that should be used by NVHPC
# should match one of the CUDA versions that are included with this NVHPC version
# (see install_components/Linux_x86_64/20.7/cuda/)
# for NVHPC 20.7, those are: 11.0, 10.2, 10.1;
# this version can be tweaked from the EasyBuild command line with
# --try-amend=default_cuda_version="10.2" (for example)
default_cuda_version = '11.6'
# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
# The following list gives examples for the easyconfig
#
# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
# 1) Bundled CUDA
# If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
# default_cuda_version = "11.0"
# in this easyconfig file; alternatively, it can be specified through the command line during installation with
# --try-amend=default_cuda_version="10.2"
# 2) CUDA provided via EasyBuild
# Use CUDAcore as a dependency, for example
# dependencies = [('CUDAcore', '11.0.2')]
# The parameter default_cuda_version still can be set as above.
# If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
#
# Define a NVHPC-default Compute Capability
# cuda_compute_capabilities = "8.0"
cuda_compute_capabilities = '7.0' #V100 GPU
# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
# Only single values supported, not lists of values!
#
# Options to add/remove things to/from environment module (defaults shown)
# module_byo_compilers = Yes # Remove compilers from PATH (Bring-your-own compilers)
# module_nvhpc_own_mpi = False # Add NVHPC's own pre-compiled OpenMPI
# module_add_math_libs = Yes # Add NVHPC's math libraries (which should be there from CUDA anyway)
# module_add_profilers = Yes # Add NVHPC's NVIDIA Profilers
# module_add_nccl = Yes # Add NVHPC's NCCL library
# module_add_nvshmem = Yes # Add NVHPC's NVSHMEM library
# module_add_cuda = Yes # Add NVHPC's bundled CUDA
# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
moduleclass = 'compiler'

66
n/NVHPC/NVHPC-23.5.eb Normal file
View File

@ -0,0 +1,66 @@
# IT4Innovations
# LK 2023
name = 'NVHPC'
version = '23.5'
homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
toolchain = SYSTEM
# By downloading, you accept the HPC SDK Software License Agreement (https://docs.nvidia.com/hpc-sdk/eula/index.html)
accept_eula = True
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
local_tarball_tmpl = 'nvhpc_2023_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
sources = [local_tarball_tmpl % '%(arch)s']
checksums = ['071d7119006cb1d7ac22cb91338c20133a02d394efe14931dfa6f5d7dfa54c81']
local_gccver = '12.2.0'
dependencies = [
('GCCcore', local_gccver),
('binutils', '2.39', '', ('GCCcore', local_gccver)),
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
('numactl', '2.0.16', '', ('GCCcore', local_gccver))
]
# specify default CUDA version that should be used by NVHPC
# should match one of the CUDA versions that are included with this NVHPC version
# (see install_components/Linux_x86_64/20.7/cuda/)
# for NVHPC 20.7, those are: 11.0, 10.2, 10.1;
# this version can be tweaked from the EasyBuild command line with
# --try-amend=default_cuda_version="10.2" (for example)
default_cuda_version = '12.0'
# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
# The following list gives examples for the easyconfig
#
# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
# 1) Bundled CUDA
# If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
# default_cuda_version = "11.0"
# in this easyconfig file; alternatively, it can be specified through the command line during installation with
# --try-amend=default_cuda_version="10.2"
# 2) CUDA provided via EasyBuild
# Use CUDAcore as a dependency, for example
# dependencies = [('CUDAcore', '11.0.2')]
# The parameter default_cuda_version still can be set as above.
# If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
#
# Define a NVHPC-default Compute Capability
cuda_compute_capabilities = "8.0"
# cuda_compute_capabilities = '7.0' #V100 GPU
# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
# Only single values supported, not lists of values!
#
# Options to add/remove things to/from environment module (defaults shown)
# module_byo_compilers = Yes # Remove compilers from PATH (Bring-your-own compilers)
# module_nvhpc_own_mpi = False # Add NVHPC's own pre-compiled OpenMPI
# module_add_math_libs = Yes # Add NVHPC's math libraries (which should be there from CUDA anyway)
# module_add_profilers = Yes # Add NVHPC's NVIDIA Profilers
# module_add_nccl = Yes # Add NVHPC's NCCL library
# module_add_nvshmem = Yes # Add NVHPC's NVSHMEM library
# module_add_cuda = Yes # Add NVHPC's bundled CUDA
# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
moduleclass = 'compiler'

30
s/Spack/Spack-0.20.0.eb Normal file
View File

@ -0,0 +1,30 @@
# IT4Innovations
# LK 2023
easyblock = 'Tarball'
name = 'Spack'
version = '0.20.0'
homepage = 'https://spack.io/'
description = """Spack is a package manager for superlocal_computers, Linux, and macOS. It makes installing scientific
software easy. With Spack, you can build a package with multiple versions, configurations, platforms, and local_compilers,
and all of these builds can coexist on the same machine."""
toolchain = SYSTEM
source_urls = ['https://github.com/LLNL/spack/archive/']
sources = ['v%(version)s.tar.gz']
checksums = ['a189b4e8173eefdf76617445125b329d912f730767048846c38c8a2637396a7d']
sanity_check_paths = {
'files': ['bin/spack'],
'dirs': ['etc/spack/defaults', 'lib/spack', 'share/spack', 'var/spack'],
}
sanity_check_commands = [
"spack list",
"spack versions gcc",
]
moduleclass = 'devel'