# IT4Innovations # LK 2024 name = 'NVHPC' version = '24.3' versionsuffix = '-CUDA-%(cudaver)s' homepage = 'https://developer.nvidia.com/hpc-sdk/' description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)""" toolchain = SYSTEM local_tarball_tmpl = 'nvhpc_2024_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz' # By downloading, you accept the HPC SDK Software License Agreement # https://docs.nvidia.com/hpc-sdk/eula/index.html # accept_eula = True source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/'] sources = [local_tarball_tmpl % '%(arch)s'] checksums = [ { local_tarball_tmpl % 'aarch64': '8c2ce561d5901a03eadce7f07dce5fbc55e8e88c87b74cf60e01e2eca231c41c', local_tarball_tmpl % 'ppc64le': 'e7330eb35e23dcd9b0b3bedc67c0d5443c4fd76b59caa894a76ecb0d17f71f43', local_tarball_tmpl % 'x86_64': 'a9fe5ec878e9c4cc332de732c6739f97ac064ce76ad3d0af6d282658d27124cb', } ] local_gccver = '12.2.0' dependencies = [ ('GCCcore', local_gccver), ('binutils', '2.39', '', ('GCCcore', local_gccver)), # This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails ('numactl', '2.0.16', '', ('GCCcore', local_gccver)), ('CUDA', '12.3.0', '', SYSTEM), ] module_add_cuda = False default_cuda_version = '%(cudaver)s' import os if os.environ.get("CLUSTERNAME") in ["KAROLINA"]: cuda_compute_capabilities = "8.0" else: cuda_compute_capabilities = "7.0" moduleclass = 'compiler'