# JH 2020 name = 'NVHPC' version = '20.11' versionsuffix = '-CUDA-11.0-OpenMPI-3.1.5' homepage = 'https://developer.nvidia.com/hpc-sdk/' description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)""" toolchain = SYSTEM # NVHPC can be downloaded freely from NVIDIA's website at https://developer.nvidia.com/hpc-sdk # It requires accepting the HPC SDK Software License Agreement (https://docs.nvidia.com/hpc-sdk/eula/index.html) # If you accept the License Agreement, you may also uncomment the following line to automatically download the sources source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/'] sources = ['nvhpc_2020_%(version_major)s%(version_minor)s_Linux_x86_64_cuda_multi.tar.gz'] checksums = ['c80fc26e5ba586696f7030f03054c1aaca0752a891c7923faf47eb23b66857ec'] local_gccver = '9.3.0' dependencies = [ ('GCCcore', local_gccver), ('binutils', '2.34', '', ('GCCcore', local_gccver)), # This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails ('numactl', '2.0.13', '', ('GCCcore', local_gccver)), ] # specify default CUDA version that should be used by NVHPC # should match one of the CUDA versions that are included with this NVHPC version # (see install_components/Linux_x86_64/20.9/cuda/) # for NVHPC 20.11, those are: 11.1, 11.0, 10.2; # this version can be tweaked from the EasyBuild command line with --try-amend=default_cuda_version="10.2" (for example) default_cuda_version = '11.0' # Driver Version: 450.80.02 # NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig. # The following list gives examples for the easyconfig # # NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA # 1) Bundled CUDA # If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with # default_cuda_version = "11.0" # in this easyconfig file; alternatively, it can be specified through the command line during installation with # --try-amend=default_cuda_version="10.2" # 2) CUDA provided via EasyBuild # Use CUDAcore as a dependency, for example # dependencies = [('CUDAcore', '11.0.2')] # The parameter default_cuda_version still can be set as above. # If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA) # # Define a NVHPC-default Compute Capability # cuda_compute_capabilities = "7.0" # Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0 # Only single values supported, not lists of values! cuda_compute_capabilities = '7.0' # V100 GPU # Options to add/remove things to/from environment module (defaults shown) # module_byo_compilers = Yes # Remove compilers from PATH (Bring-your-own compilers) # module_nvhpc_own_mpi = Yes # Add NVHPC's own pre-compiled OpenMPI # module_add_math_libs = Yes # Add NVHPC's math libraries (which should be there from CUDA anyway) # module_add_profilers = Yes # Add NVHPC's NVIDIA Profilers # module_add_nccl = Yes # Add NVHPC's NCCL library # module_add_nvshmem = Yes # Add NVHPC's NVSHMEM library # module_add_cuda = Yes # Add NVHPC's bundled CUDA local_openmpi_path = "Linux_x86_64/%(version)s/comm_libs/openmpi/openmpi-3.1.5" modextrapaths = { 'PATH': "%s/bin" % (local_openmpi_path), 'LD_LIBRARY_PATH': "%s/lib" % (local_openmpi_path), 'LIBRARY_PATH': "%s/lib" % (local_openmpi_path), 'CPATH': "%s/include" % (local_openmpi_path), 'MANPATH': "%s/share/man" % (local_openmpi_path), 'PKG_CONFIG_PATH': "%s/pkgconfig" % (local_openmpi_path), } modextravars = { 'OMPI_MCA_btl_openib_if_include': 'mlx5_0', 'OMPI_MCA_btl_tcp_if_include': '10.33.4.0/24', 'OMPI_MCA_orte_base_help_aggregate': '0', } # this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS) moduleclass = 'compiler'