# IT4Innovations 2021 # LK name = 'NVHPC' version = '21.2' homepage = 'https://developer.nvidia.com/hpc-sdk/' description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)""" toolchain = SYSTEM # By downloading, you accept the HPC SDK Software License Agreement (https://docs.nvidia.com/hpc-sdk/eula/index.html) accept_eula = True source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/'] local_tarball_tmpl = 'nvhpc_2021_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz' sources = [local_tarball_tmpl % '%(arch)s'] checksums = [ { local_tarball_tmpl % 'x86_64': '8905e96a063baeca1da35f7fd20e3376', } ] local_gccver = '9.3.0' dependencies = [ ('GCCcore', local_gccver), ('binutils', '2.34', '', ('GCCcore', local_gccver)), # This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails ('numactl', '2.0.13', '', ('GCCcore', local_gccver)) ] # specify default CUDA version that should be used by NVHPC # should match one of the CUDA versions that are included with this NVHPC version # (see install_components/Linux_x86_64/20.7/cuda/) # for NVHPC 20.7, those are: 11.0, 10.2, 10.1; # this version can be tweaked from the EasyBuild command line with # --try-amend=default_cuda_version="10.2" (for example) default_cuda_version = '11.0' # NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig. # The following list gives examples for the easyconfig # # NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA # 1) Bundled CUDA # If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with # default_cuda_version = "11.0" # in this easyconfig file; alternatively, it can be specified through the command line during installation with # --try-amend=default_cuda_version="10.2" # 2) CUDA provided via EasyBuild # Use CUDAcore as a dependency, for example # dependencies = [('CUDAcore', '11.0.2')] # The parameter default_cuda_version still can be set as above. # If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA) # # Define a NVHPC-default Compute Capability # cuda_compute_capabilities = "8.0" cuda_compute_capabilities = '7.0' #V100 GPU # Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0 # Only single values supported, not lists of values! # # Options to add/remove things to/from environment module (defaults shown) # module_byo_compilers = Yes # Remove compilers from PATH (Bring-your-own compilers) # module_nvhpc_own_mpi = False # Add NVHPC's own pre-compiled OpenMPI # module_add_math_libs = Yes # Add NVHPC's math libraries (which should be there from CUDA anyway) # module_add_profilers = Yes # Add NVHPC's NVIDIA Profilers # module_add_nccl = Yes # Add NVHPC's NCCL library # module_add_nvshmem = Yes # Add NVHPC's NVSHMEM library # module_add_cuda = Yes # Add NVHPC's bundled CUDA # this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS) moduleclass = 'compiler'