mirror of
https://code.it4i.cz/sccs/easyconfigs-it4i.git
synced 2025-04-08 07:52:11 +01:00
new file: n/NVHPC/NVHPC-21.1-CUDA-11.0-OpenMPI-4.0.5-UCX-1.9.0.eb
new file: n/NVHPC/NVHPC-21.1.eb new file: "n/NVHPC/\\"
This commit is contained in:
parent
e785c779a6
commit
1a4ee791ac
87
n/NVHPC/NVHPC-21.1-CUDA-11.0-OpenMPI-4.0.5-UCX-1.9.0.eb
Normal file
87
n/NVHPC/NVHPC-21.1-CUDA-11.0-OpenMPI-4.0.5-UCX-1.9.0.eb
Normal file
@ -0,0 +1,87 @@
|
||||
# IT4Innovations 2021
|
||||
# LK
|
||||
# upravit mpirun - LD_PRELOAD=$EBROOTUCX/lib/libucp.so.0:$EBROOTUCX/lib/libucs.so.0
|
||||
|
||||
name = 'NVHPC'
|
||||
version = '21.1'
|
||||
versionsuffix = '-CUDA-11.0-OpenMPI-4.0.5-UCX-1.9.0'
|
||||
|
||||
homepage = 'https://developer.nvidia.com/hpc-sdk/'
|
||||
description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
|
||||
|
||||
toolchain = SYSTEM
|
||||
|
||||
# NVHPC can be downloaded freely from NVIDIA's website at https://developer.nvidia.com/hpc-sdk
|
||||
# It requires accepting the HPC SDK Software License Agreement (https://docs.nvidia.com/hpc-sdk/eula/index.html)
|
||||
# If you accept the License Agreement, you may also uncomment the following line to automatically download the sources
|
||||
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
|
||||
sources = ['nvhpc_2021_%(version_major)s%(version_minor)s_Linux_x86_64_cuda_multi.tar.gz']
|
||||
checksums = ['9dd990249af8460dafb10c5951ee259c']
|
||||
|
||||
local_gccver = '10.2.0'
|
||||
dependencies = [
|
||||
('GCCcore', local_gccver),
|
||||
('binutils', '2.35', '', ('GCCcore', local_gccver)),
|
||||
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
|
||||
('numactl', '2.0.13', '', ('GCCcore', local_gccver)),
|
||||
('UCX', '1.9.0', '-CUDA-11.1.1', ('GCCcore', '10.2.0')),
|
||||
]
|
||||
|
||||
# specify default CUDA version that should be used by NVHPC
|
||||
# should match one of the CUDA versions that are included with this NVHPC version
|
||||
# (see install_components/Linux_x86_64/20.9/cuda/)
|
||||
# for NVHPC 20.11, those are: 11.1, 11.0, 10.2;
|
||||
# this version can be tweaked from the EasyBuild command line with --try-amend=default_cuda_version="10.2" (for example)
|
||||
default_cuda_version = '11.0' #Driver Version: 450.80.02
|
||||
|
||||
# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
|
||||
# The following list gives examples for the easyconfig
|
||||
#
|
||||
# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
|
||||
# 1) Bundled CUDA
|
||||
# If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
|
||||
# default_cuda_version = "11.0"
|
||||
# in this easyconfig file; alternatively, it can be specified through the command line during installation with
|
||||
# --try-amend=default_cuda_version="10.2"
|
||||
# 2) CUDA provided via EasyBuild
|
||||
# Use CUDAcore as a dependency, for example
|
||||
# dependencies = [('CUDAcore', '11.0.2')]
|
||||
# The parameter default_cuda_version still can be set as above.
|
||||
# If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
|
||||
#
|
||||
# Define a NVHPC-default Compute Capability
|
||||
# cuda_compute_capabilities = "7.0"
|
||||
# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
|
||||
# Only single values supported, not lists of values!
|
||||
cuda_compute_capabilities = '7.0' #V100 GPU
|
||||
|
||||
# Options to add/remove things to/from environment module (defaults shown)
|
||||
# module_byo_compilers = Yes # Remove compilers from PATH (Bring-your-own compilers)
|
||||
# module_nvhpc_own_mpi = Yes # Add NVHPC's own pre-compiled OpenMPI
|
||||
# module_add_math_libs = Yes # Add NVHPC's math libraries (which should be there from CUDA anyway)
|
||||
# module_add_profilers = Yes # Add NVHPC's NVIDIA Profilers
|
||||
# module_add_nccl = Yes # Add NVHPC's NCCL library
|
||||
# module_add_nvshmem = Yes # Add NVHPC's NVSHMEM library
|
||||
# module_add_cuda = Yes # Add NVHPC's bundled CUDA
|
||||
|
||||
local_openmpi_path = "Linux_x86_64/%(version)s/comm_libs/openmpi4/openmpi-4.0.5"
|
||||
|
||||
postinstallcmds = ['sed "s|\$MY_DIR|LD_PRELOAD=\$EBROOTUCX/lib/libucp.so.0:\$EBROOTUCX/lib/libucs.so.0 \$MY_DIR|" %(installdir)s/Linux_x86_64/%(version)s/comm_libs/openmpi4/openmpi-4.0.5/bin/mpirun -i']
|
||||
|
||||
modextrapaths = {
|
||||
'PATH': "%s/bin" % (local_openmpi_path),
|
||||
'LD_LIBRARY_PATH': "%s/lib" % (local_openmpi_path),
|
||||
'LIBRARY_PATH': "%s/lib" % (local_openmpi_path),
|
||||
'CPATH': "%s/include" % (local_openmpi_path),
|
||||
'MANPATH': "%s/share/man" % (local_openmpi_path),
|
||||
'PKG_CONFIG_PATH': "%s/pkgconfig" % (local_openmpi_path),
|
||||
}
|
||||
|
||||
modextravars = {
|
||||
'OMPI_MCA_btl_openib_if_include': 'mlx5_0',
|
||||
'OMPI_MCA_btl_tcp_if_include': '10.33.4.0/24',
|
||||
'OMPI_MCA_orte_base_help_aggregate': '0',
|
||||
}
|
||||
|
||||
# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
|
||||
moduleclass = 'compiler'
|
70
n/NVHPC/NVHPC-21.1.eb
Normal file
70
n/NVHPC/NVHPC-21.1.eb
Normal file
@ -0,0 +1,70 @@
|
||||
# IT4Innovations 2021
|
||||
# LK
|
||||
|
||||
name = 'NVHPC'
|
||||
version = '21.1'
|
||||
|
||||
homepage = 'https://developer.nvidia.com/hpc-sdk/'
|
||||
description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
|
||||
|
||||
toolchain = SYSTEM
|
||||
|
||||
# By downloading, you accept the HPC SDK Software License Agreement (https://docs.nvidia.com/hpc-sdk/eula/index.html)
|
||||
accept_eula = True
|
||||
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
|
||||
local_tarball_tmpl = 'nvhpc_2021_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
|
||||
sources = [local_tarball_tmpl % '%(arch)s']
|
||||
checksums = [
|
||||
{
|
||||
local_tarball_tmpl % 'x86_64':
|
||||
'9dd990249af8460dafb10c5951ee259c',
|
||||
}
|
||||
]
|
||||
|
||||
local_gccver = '9.3.0'
|
||||
dependencies = [
|
||||
('GCCcore', local_gccver),
|
||||
('binutils', '2.34', '', ('GCCcore', local_gccver)),
|
||||
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
|
||||
('numactl', '2.0.13', '', ('GCCcore', local_gccver))
|
||||
]
|
||||
|
||||
# specify default CUDA version that should be used by NVHPC
|
||||
# should match one of the CUDA versions that are included with this NVHPC version
|
||||
# (see install_components/Linux_x86_64/20.7/cuda/)
|
||||
# for NVHPC 20.7, those are: 11.0, 10.2, 10.1;
|
||||
# this version can be tweaked from the EasyBuild command line with
|
||||
# --try-amend=default_cuda_version="10.2" (for example)
|
||||
default_cuda_version = '11.0'
|
||||
|
||||
# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
|
||||
# The following list gives examples for the easyconfig
|
||||
#
|
||||
# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
|
||||
# 1) Bundled CUDA
|
||||
# If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
|
||||
# default_cuda_version = "11.0"
|
||||
# in this easyconfig file; alternatively, it can be specified through the command line during installation with
|
||||
# --try-amend=default_cuda_version="10.2"
|
||||
# 2) CUDA provided via EasyBuild
|
||||
# Use CUDAcore as a dependency, for example
|
||||
# dependencies = [('CUDAcore', '11.0.2')]
|
||||
# The parameter default_cuda_version still can be set as above.
|
||||
# If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
|
||||
#
|
||||
# Define a NVHPC-default Compute Capability
|
||||
# cuda_compute_capabilities = "8.0"
|
||||
# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
|
||||
# Only single values supported, not lists of values!
|
||||
#
|
||||
# Options to add/remove things to/from environment module (defaults shown)
|
||||
# module_byo_compilers = False # Remove compilers from PATH (Bring-your-own compilers)
|
||||
# module_nvhpc_own_mpi = False # Add NVHPC's own pre-compiled OpenMPI
|
||||
# module_add_math_libs = False # Add NVHPC's math libraries (which should be there from CUDA anyway)
|
||||
# module_add_profilers = False # Add NVHPC's NVIDIA Profilers
|
||||
# module_add_nccl = False # Add NVHPC's NCCL library
|
||||
# module_add_nvshmem = False # Add NVHPC's NVSHMEM library
|
||||
# module_add_cuda = False # Add NVHPC's bundled CUDA
|
||||
|
||||
# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
|
||||
moduleclass = 'compiler'
|
72
n/NVHPC/\
Normal file
72
n/NVHPC/\
Normal file
@ -0,0 +1,72 @@
|
||||
# IT4Innovations 2021
|
||||
# LK
|
||||
|
||||
name = 'NVHPC'
|
||||
version = '20.11'
|
||||
|
||||
homepage = 'https://developer.nvidia.com/hpc-sdk/'
|
||||
description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
|
||||
|
||||
toolchain = SYSTEM
|
||||
|
||||
# By downloading, you accept the HPC SDK Software License Agreement (https://docs.nvidia.com/hpc-sdk/eula/index.html)
|
||||
accept_eula = True
|
||||
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
|
||||
local_tarball_tmpl = 'nvhpc_2020_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
|
||||
sources = [local_tarball_tmpl % '%(arch)s']
|
||||
checksums = [
|
||||
{
|
||||
local_tarball_tmpl % 'x86_64':
|
||||
'c80fc26e5ba586696f7030f03054c1aaca0752a891c7923faf47eb23b66857ec',
|
||||
local_tarball_tmpl % 'ppc64le':
|
||||
'99e5a5437e82f3914e0fe81feb761a5b599a3fe8b31f3c2cac8ae47e8cdc7b0f'
|
||||
}
|
||||
]
|
||||
|
||||
local_gccver = '9.3.0'
|
||||
dependencies = [
|
||||
('GCCcore', local_gccver),
|
||||
('binutils', '2.34', '', ('GCCcore', local_gccver)),
|
||||
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
|
||||
('numactl', '2.0.13', '', ('GCCcore', local_gccver))
|
||||
]
|
||||
|
||||
# specify default CUDA version that should be used by NVHPC
|
||||
# should match one of the CUDA versions that are included with this NVHPC version
|
||||
# (see install_components/Linux_x86_64/20.7/cuda/)
|
||||
# for NVHPC 20.7, those are: 11.0, 10.2, 10.1;
|
||||
# this version can be tweaked from the EasyBuild command line with
|
||||
# --try-amend=default_cuda_version="10.2" (for example)
|
||||
default_cuda_version = '11.0'
|
||||
|
||||
# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
|
||||
# The following list gives examples for the easyconfig
|
||||
#
|
||||
# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
|
||||
# 1) Bundled CUDA
|
||||
# If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
|
||||
# default_cuda_version = "11.0"
|
||||
# in this easyconfig file; alternatively, it can be specified through the command line during installation with
|
||||
# --try-amend=default_cuda_version="10.2"
|
||||
# 2) CUDA provided via EasyBuild
|
||||
# Use CUDAcore as a dependency, for example
|
||||
# dependencies = [('CUDAcore', '11.0.2')]
|
||||
# The parameter default_cuda_version still can be set as above.
|
||||
# If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
|
||||
#
|
||||
# Define a NVHPC-default Compute Capability
|
||||
# cuda_compute_capabilities = "8.0"
|
||||
# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
|
||||
# Only single values supported, not lists of values!
|
||||
#
|
||||
# Options to add/remove things to/from environment module (defaults shown)
|
||||
# module_byo_compilers = False # Remove compilers from PATH (Bring-your-own compilers)
|
||||
# module_nvhpc_own_mpi = False # Add NVHPC's own pre-compiled OpenMPI
|
||||
# module_add_math_libs = False # Add NVHPC's math libraries (which should be there from CUDA anyway)
|
||||
# module_add_profilers = False # Add NVHPC's NVIDIA Profilers
|
||||
# module_add_nccl = False # Add NVHPC's NCCL library
|
||||
# module_add_nvshmem = False # Add NVHPC's NVSHMEM library
|
||||
# module_add_cuda = False # Add NVHPC's bundled CUDA
|
||||
|
||||
# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
|
||||
moduleclass = 'compiler'
|
Loading…
x
Reference in New Issue
Block a user