new file: c/CUDA/CUDA-11.0.2-GCC-9.3.0.eb

new file:   n/NVHPC/NVHPC-20.11-CUDA-11.0-OpenMPI-3.1.5.eb
	new file:   n/NVHPC/NVHPC-20.11-CUDA-11.0-OpenMPI-4.0.5.eb
	deleted:    c/CP2K/CP2K-7.1-intel-2020a.eb
This commit is contained in:
Lukáš Krupčík 2021-01-05 12:24:04 +01:00
parent 0712d24af4
commit 93934e68ca
4 changed files with 182 additions and 41 deletions

View File

@ -1,41 +0,0 @@
# IT4Innovations 2020
name = 'CP2K'
version = '7.1'
homepage = 'http://www.cp2k.org/'
description = """CP2K is a freely available (GPL) program, written in Fortran 95, to perform atomistic and molecular
simulations of solid state, liquid, molecular and biological systems. It provides a general framework for different
methods such as e.g. density functional theory (DFT) using a mixed Gaussian and plane waves approach (GPW), and
classical pair and many-body potentials. """
toolchain = {'name': 'intel', 'version': '2020a'}
toolchainopts = {'pic': True}
source_urls = ['https://github.com/cp2k/cp2k/releases/download/v%(version)s.0/']
sources = [SOURCELOWER_TAR_BZ2]
#patches = [
# 'CP2K-2.4.0-fix_compile_date_lastsvn.patch',
#]
dependencies = [
('Libint', '1.1.6'),
('libxc', '4.3.4'),
('libxsmm', '1.8.3'),
('FFTW', '3.3.8'),
('PLUMED', '2.4.0'),
]
builddependencies = [
('flex', '2.6.4', '', True),
('Bison', '3.0.5', '', True),
]
# values to use here are +1 those used for building Libint
# see https://github.com/cp2k/cp2k/blob/master/cp2k/tools/hfx_tools/libint_tools/README_LIBINT
extradflags = "-D__LIBINT_MAX_AM=6 -D__LIBDERIV_MAX_AM1=5"
# regression test reports failures
ignore_regtest_fails = True
moduleclass = 'chem'

View File

@ -0,0 +1,20 @@
easyblock = 'Bundle'
name = 'CUDA'
version = '11.0.2'
homepage = 'https://developer.nvidia.com/cuda-toolkit'
description = """CUDA (formerly Compute Unified Device Architecture) is a parallel
computing platform and programming model created by NVIDIA and implemented by the
graphics processing units (GPUs) that they produce. CUDA gives developers access
to the virtual instruction set and memory of the parallel computational elements in CUDA GPUs."""
toolchain = {'name': 'GCC', 'version': '9.3.0'}
dependencies = [
('CUDAcore', '11.0.2', '', True),
('Qt5', '5.14.1'),
]
altroot = 'CUDAcore'
moduleclass = 'system'

View File

@ -0,0 +1,81 @@
# JH 2020
name = 'NVHPC'
version = '20.11'
versionsuffix = '-CUDA-11.0-OpenMPI-3.1.5'
homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
toolchain = SYSTEM
# NVHPC can be downloaded freely from NVIDIA's website at https://developer.nvidia.com/hpc-sdk
# It requires accepting the HPC SDK Software License Agreement (https://docs.nvidia.com/hpc-sdk/eula/index.html)
# If you accept the License Agreement, you may also uncomment the following line to automatically download the sources
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
sources = ['nvhpc_2020_%(version_major)s%(version_minor)s_Linux_x86_64_cuda_multi.tar.gz']
checksums = ['c80fc26e5ba586696f7030f03054c1aaca0752a891c7923faf47eb23b66857ec']
local_gccver = '9.3.0'
dependencies = [
('GCCcore', local_gccver),
('binutils', '2.34', '', ('GCCcore', local_gccver)),
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
('numactl', '2.0.13', '', ('GCCcore', local_gccver)),
]
# specify default CUDA version that should be used by NVHPC
# should match one of the CUDA versions that are included with this NVHPC version
# (see install_components/Linux_x86_64/20.9/cuda/)
# for NVHPC 20.11, those are: 11.1, 11.0, 10.2;
# this version can be tweaked from the EasyBuild command line with --try-amend=default_cuda_version="10.2" (for example)
default_cuda_version = '11.0' # Driver Version: 450.80.02
# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
# The following list gives examples for the easyconfig
#
# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
# 1) Bundled CUDA
# If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
# default_cuda_version = "11.0"
# in this easyconfig file; alternatively, it can be specified through the command line during installation with
# --try-amend=default_cuda_version="10.2"
# 2) CUDA provided via EasyBuild
# Use CUDAcore as a dependency, for example
# dependencies = [('CUDAcore', '11.0.2')]
# The parameter default_cuda_version still can be set as above.
# If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
#
# Define a NVHPC-default Compute Capability
# cuda_compute_capabilities = "7.0"
# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
# Only single values supported, not lists of values!
cuda_compute_capabilities = '7.0' # V100 GPU
# Options to add/remove things to/from environment module (defaults shown)
# module_byo_compilers = Yes # Remove compilers from PATH (Bring-your-own compilers)
# module_nvhpc_own_mpi = Yes # Add NVHPC's own pre-compiled OpenMPI
# module_add_math_libs = Yes # Add NVHPC's math libraries (which should be there from CUDA anyway)
# module_add_profilers = Yes # Add NVHPC's NVIDIA Profilers
# module_add_nccl = Yes # Add NVHPC's NCCL library
# module_add_nvshmem = Yes # Add NVHPC's NVSHMEM library
# module_add_cuda = Yes # Add NVHPC's bundled CUDA
local_openmpi_path = "Linux_x86_64/%(version)s/comm_libs/openmpi/openmpi-3.1.5"
modextrapaths = {
'PATH': "%s/bin" % (local_openmpi_path),
'LD_LIBRARY_PATH': "%s/lib" % (local_openmpi_path),
'LIBRARY_PATH': "%s/lib" % (local_openmpi_path),
'CPATH': "%s/include" % (local_openmpi_path),
'MANPATH': "%s/share/man" % (local_openmpi_path),
'PKG_CONFIG_PATH': "%s/pkgconfig" % (local_openmpi_path),
}
modextravars = {
'OMPI_MCA_btl_openib_if_include': 'mlx5_0',
'OMPI_MCA_btl_tcp_if_include': '10.33.4.0/24',
'OMPI_MCA_orte_base_help_aggregate': '0',
}
# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
moduleclass = 'compiler'

View File

@ -0,0 +1,81 @@
# JH 2020
name = 'NVHPC'
version = '20.11'
versionsuffix = '-CUDA-11.0-OpenMPI-4.0.5'
homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
toolchain = SYSTEM
# NVHPC can be downloaded freely from NVIDIA's website at https://developer.nvidia.com/hpc-sdk
# It requires accepting the HPC SDK Software License Agreement (https://docs.nvidia.com/hpc-sdk/eula/index.html)
# If you accept the License Agreement, you may also uncomment the following line to automatically download the sources
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
sources = ['nvhpc_2020_%(version_major)s%(version_minor)s_Linux_x86_64_cuda_multi.tar.gz']
checksums = ['c80fc26e5ba586696f7030f03054c1aaca0752a891c7923faf47eb23b66857ec']
local_gccver = '9.3.0'
dependencies = [
('GCCcore', local_gccver),
('binutils', '2.34', '', ('GCCcore', local_gccver)),
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
('numactl', '2.0.13', '', ('GCCcore', local_gccver)),
]
# specify default CUDA version that should be used by NVHPC
# should match one of the CUDA versions that are included with this NVHPC version
# (see install_components/Linux_x86_64/20.9/cuda/)
# for NVHPC 20.11, those are: 11.1, 11.0, 10.2;
# this version can be tweaked from the EasyBuild command line with --try-amend=default_cuda_version="10.2" (for example)
default_cuda_version = '11.0' #Driver Version: 450.80.02
# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
# The following list gives examples for the easyconfig
#
# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
# 1) Bundled CUDA
# If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
# default_cuda_version = "11.0"
# in this easyconfig file; alternatively, it can be specified through the command line during installation with
# --try-amend=default_cuda_version="10.2"
# 2) CUDA provided via EasyBuild
# Use CUDAcore as a dependency, for example
# dependencies = [('CUDAcore', '11.0.2')]
# The parameter default_cuda_version still can be set as above.
# If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
#
# Define a NVHPC-default Compute Capability
# cuda_compute_capabilities = "7.0"
# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
# Only single values supported, not lists of values!
cuda_compute_capabilities = '7.0' #V100 GPU
# Options to add/remove things to/from environment module (defaults shown)
# module_byo_compilers = Yes # Remove compilers from PATH (Bring-your-own compilers)
# module_nvhpc_own_mpi = Yes # Add NVHPC's own pre-compiled OpenMPI
# module_add_math_libs = Yes # Add NVHPC's math libraries (which should be there from CUDA anyway)
# module_add_profilers = Yes # Add NVHPC's NVIDIA Profilers
# module_add_nccl = Yes # Add NVHPC's NCCL library
# module_add_nvshmem = Yes # Add NVHPC's NVSHMEM library
# module_add_cuda = Yes # Add NVHPC's bundled CUDA
local_openmpi_path = "Linux_x86_64/%(version)s/comm_libs/openmpi4/openmpi-4.0.5"
modextrapaths = {
'PATH': "%s/bin" % (local_openmpi_path),
'LD_LIBRARY_PATH': "%s/lib" % (local_openmpi_path),
'LIBRARY_PATH': "%s/lib" % (local_openmpi_path),
'CPATH': "%s/include" % (local_openmpi_path),
'MANPATH': "%s/share/man" % (local_openmpi_path),
'PKG_CONFIG_PATH': "%s/pkgconfig" % (local_openmpi_path),
}
modextravars = {
'OMPI_MCA_btl_openib_if_include': 'mlx5_0',
'OMPI_MCA_btl_tcp_if_include': '10.33.4.0/24',
'OMPI_MCA_orte_base_help_aggregate': '0',
}
# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
moduleclass = 'compiler'