easyconfigs-it4i/n/NVHPC/NVHPC-21.9.eb
Lukas Krupcik 914c775b08 new file: b/Boost.Python-NumPy/Boost.Python-NumPy-1.83.0-gfbf-2023b.eb
modified:   b/Boost/Boost-1.77.0-gompi-2021a.eb
	modified:   c/CMake/CMake-3.16.2.eb
	new file:   e/ELPA/ELPA-2019.11.001-intel-2021a.eb
	modified:   e/ELPA/ELPA-2023.11.001-intel-2023b.eb
	modified:   h/HDF5/HDF5-1.10.6-intel-2020a.eb
	modified:   i/IOTK/IOTK-1.2.2-intel-2020a.eb
	modified:   j/Java/Java-1.8.0_221.eb
	modified:   n/NVHPC/NVHPC-21.9.eb
	modified:   n/netCDF-Fortran/netCDF-Fortran-4.5.2-intel-2020a.eb
	modified:   n/netCDF/netCDF-4.7.3-intel-2020a.eb
	new file:   p/PETSc/PETSc-3.21.2-foss-2022b.eb
	new file:   p/petsc4py/petsc4py-3.21.2-foss-2022b.eb
	modified:   q/QMCPACK/QMCPACK-3.14.0-foss-2021a-Python-3.9.5.eb
	new file:   q/QuantumESPRESSO/QuantumESPRESSO-6.7-intel-2020a-MaX.eb
	new file:   q/QuantumESPRESSO/QuantumESPRESSO-7.2-intel-2021a.eb
	new file:   q/QuantumESPRESSO/QuantumESPRESSO-7.3-intel-2021a.eb
	new file:   q/QuantumESPRESSO/QuantumESPRESSO-7.3.1-intel-2023b-gpu.eb
	new file:   s/SLEPc/SLEPc-3.21.1-foss-2022b.eb
	new file:   s/slepc4py/slepc4py-3.21.1-foss-2022b.eb
	new file:   s/sqsgenerator/sqsgenerator-0.3-conda.eb
	modified:   s/sqsgenerator/sqsgenerator-0.3-foss-2023b-Python-3.11.5.eb
	new file:   y/Yambo/Yambo-5.2.3-intel-2020a.eb
2024-06-12 11:09:38 +02:00

67 lines
3.1 KiB
Plaintext

# IT4Innovations 2021
# LK
name = 'NVHPC'
version = '21.9'
homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
toolchain = SYSTEM
# By downloading, you accept the HPC SDK Software License Agreement (https://docs.nvidia.com/hpc-sdk/eula/index.html)
accept_eula = True
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
local_tarball_tmpl = 'nvhpc_2021_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
sources = [local_tarball_tmpl % '%(arch)s']
checksums = ['7de6a6880fd7e59afe0dee51f1fae4d3bff1ca0fb8ee234b24e1f2fdff23ffc9']
#local_gccver = '10.2.0'
#dependencies = [
# ('GCCcore', local_gccver),
# ('binutils', '2.35', '', ('GCCcore', local_gccver)),
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
# ('numactl', '2.0.13', '', ('GCCcore', local_gccver))
#]
# specify default CUDA version that should be used by NVHPC
# should match one of the CUDA versions that are included with this NVHPC version
# (see install_components/Linux_x86_64/20.7/cuda/)
# for NVHPC 20.7, those are: 11.0, 10.2, 10.1;
# this version can be tweaked from the EasyBuild command line with
# --try-amend=default_cuda_version="10.2" (for example)
default_cuda_version = '11.4'
# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
# The following list gives examples for the easyconfig
#
# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
# 1) Bundled CUDA
# If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
# default_cuda_version = "11.0"
# in this easyconfig file; alternatively, it can be specified through the command line during installation with
# --try-amend=default_cuda_version="10.2"
# 2) CUDA provided via EasyBuild
# Use CUDAcore as a dependency, for example
# dependencies = [('CUDAcore', '11.0.2')]
# The parameter default_cuda_version still can be set as above.
# If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
#
# Define a NVHPC-default Compute Capability
# cuda_compute_capabilities = "8.0"
cuda_compute_capabilities = '7.0' #V100 GPU
# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
# Only single values supported, not lists of values!
#
# Options to add/remove things to/from environment module (defaults shown)
# module_byo_compilers = Yes # Remove compilers from PATH (Bring-your-own compilers)
# module_nvhpc_own_mpi = False # Add NVHPC's own pre-compiled OpenMPI
# module_add_math_libs = Yes # Add NVHPC's math libraries (which should be there from CUDA anyway)
# module_add_profilers = Yes # Add NVHPC's NVIDIA Profilers
# module_add_nccl = Yes # Add NVHPC's NCCL library
# module_add_nvshmem = Yes # Add NVHPC's NVSHMEM library
# module_add_cuda = Yes # Add NVHPC's bundled CUDA
# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
moduleclass = 'compiler'