mirror of
https://code.it4i.cz/sccs/easyconfigs-it4i.git
synced 2025-04-16 19:50:50 +01:00
new file: a/Automake/Automake-1.17-GCCcore-14.2.0.eb
deleted: c/CASTEP/CASTEP-25.12-foss-2023b.eb new file: c/CASTEP/CASTEP-25.12-intel-2024a.eb new file: n/NCCL/NCCL-2.21.5-GCCcore-14.2.0-CUDA-12.8.0.eb modified: n/NVHPC/NVHPC-24.9-CUDA-12.6.0.eb new file: n/NVHPC/NVHPC-25.3-CUDA-12.8.0.eb new file: o/OpenMPI/OpenMPI-5.0.7-25.3-CUDA-12.8.0.eb new file: o/OpenMPI/OpenMPI-5.0.7-GCC-14.2.0.eb new file: u/UCC-CUDA/UCC-CUDA-1.3.0-GCCcore-14.2.0-CUDA-12.8.0.eb deleted: u/UCC/UCC-1.3.0-GCCcore-14.2.0.eb new file: v/VASP/VASP-6.5.1-NVHPC-24.3-CUDA-12.3.0-adjust-makefile.patch new file: v/VASP/VASP-6.5.1-NVHPC-24.3-CUDA-12.3.0.eb new file: v/VASP/VASP-6.5.1-intel-2024a.eb new file: v/VASP/VASP-6.5.1-intel-hdf5.patch
This commit is contained in:
parent
bb451554fc
commit
64aa7c8083
46
a/Automake/Automake-1.17-GCCcore-14.2.0.eb
Normal file
46
a/Automake/Automake-1.17-GCCcore-14.2.0.eb
Normal file
@ -0,0 +1,46 @@
|
||||
# IT4Innovations
|
||||
# LK 2025
|
||||
|
||||
easyblock = 'ConfigureMake'
|
||||
|
||||
name = 'Automake'
|
||||
version = '1.17'
|
||||
|
||||
homepage = 'https://www.gnu.org/software/automake/automake.html'
|
||||
|
||||
description = "Automake: GNU Standards-compliant Makefile generator"
|
||||
|
||||
toolchain = {'name': 'GCCcore', 'version': '14.2.0'}
|
||||
|
||||
source_urls = [GNU_SOURCE]
|
||||
sources = [SOURCELOWER_TAR_GZ]
|
||||
patches = ['Automake-%(version)s_perl_env_space.patch']
|
||||
checksums = [
|
||||
{'automake-1.17.tar.gz': '397767d4db3018dd4440825b60c64258b636eaf6bf99ac8b0897f06c89310acd'},
|
||||
{'Automake-1.17_perl_env_space.patch': 'a416eeb854df009f0cdec0484282a3cf7ff6b2637a59e1188932d946625196ab'},
|
||||
]
|
||||
|
||||
builddependencies = [
|
||||
('binutils', '2.42'),
|
||||
]
|
||||
|
||||
dependencies = [
|
||||
('Autoconf', '2.72'),
|
||||
# non-standard Perl modules are required,
|
||||
# see https://github.com/easybuilders/easybuild-easyconfigs/issues/1822
|
||||
('Perl', '5.38.2'),
|
||||
]
|
||||
|
||||
preconfigopts = "export PERL='/usr/bin/env perl' && "
|
||||
|
||||
sanity_check_paths = {
|
||||
'files': ['bin/aclocal', 'bin/automake'],
|
||||
'dirs': []
|
||||
}
|
||||
|
||||
sanity_check_commands = [
|
||||
"aclocal --help",
|
||||
"automake --help",
|
||||
]
|
||||
|
||||
moduleclass = 'devel'
|
@ -1,52 +0,0 @@
|
||||
# IT4Innovation
|
||||
# PH 2025
|
||||
|
||||
easyblock = 'ConfigureMake'
|
||||
|
||||
name = 'CASTEP'
|
||||
version = '25.12'
|
||||
|
||||
homepage = 'http://www.castep.org'
|
||||
description = """
|
||||
CASTEP is an electronic structure materials modelling code based on density
|
||||
functional theory (DFT), with functionality including geometry optimization
|
||||
molecular dynamics, phonons, NMR chemical shifts and much more.
|
||||
"""
|
||||
|
||||
toolchain = {'name': 'foss', 'version': '2023b'}
|
||||
|
||||
download_instructions = """CASTEP is proprietary software, available under a free-of-charge license for academic use
|
||||
only. Visit http://www.castep.org and navigate to "Getting Castep" to apply for a license."""
|
||||
|
||||
sources = [SOURCE_TAR_GZ]
|
||||
checksums = ['e21177bfe4cb3f3d098b666c90771e3da2826503b002b8e325e3ca1e230cfc7d']
|
||||
|
||||
dependencies = [
|
||||
('Perl', '5.38.0'),
|
||||
('Python', '3.11.5'),
|
||||
('SciPy-bundle', '2023.11'), # for elastic constants and castepconv utility
|
||||
]
|
||||
|
||||
skipsteps = ['configure']
|
||||
|
||||
_generic_opts = ' COMMS_ARCH=mpi FFT=fftw3 MATH_LIBS="-lflexiblas" '
|
||||
|
||||
buildopts = _generic_opts + 'FFTLIBDIR=$FFT_LIB_DIR MATHLIBDIR=$BLAS_LIB_DIR'
|
||||
buildopts += ' castep tools utilities'
|
||||
|
||||
preinstallopts = 'mkdir -p %(installdir)s/bin &&'
|
||||
installopts = _generic_opts + 'INSTALL_DIR="%(installdir)s/bin"'
|
||||
installopts += ' install-castep install-tools install-utilities'
|
||||
|
||||
sanity_check_paths = {
|
||||
'files': ['bin/%s' % x for x in ['castep.mpi', 'optados.mpi', 'orbitals2bands', 'dispersion.pl',
|
||||
'elastics.py', 'ceteprouts.pm']],
|
||||
'dirs': [],
|
||||
}
|
||||
|
||||
sanity_check_commands = [
|
||||
'castep.mpi --help',
|
||||
'optados.mpi --help',
|
||||
]
|
||||
|
||||
moduleclass = 'phys'
|
64
c/CASTEP/CASTEP-25.12-intel-2024a.eb
Normal file
64
c/CASTEP/CASTEP-25.12-intel-2024a.eb
Normal file
@ -0,0 +1,64 @@
|
||||
# IT4Innovation
|
||||
# PH 2025
|
||||
|
||||
easyblock = 'ConfigureMake'
|
||||
|
||||
name = 'CASTEP'
|
||||
version = '25.12'
|
||||
|
||||
homepage = 'http://www.castep.org'
|
||||
description = """CASTEP is an electronic structure materials modelling code based on density functional theory (DFT),
|
||||
with functionality including geometry optimization molecular dynamics, phonons, NMR chemical shifts and much more."""
|
||||
|
||||
toolchain = {'name': 'intel', 'version': '2024a'}
|
||||
|
||||
# CASTEP is proprietary software, available under a free-of-charge license for academic use only.
|
||||
# Visit http://www.castep.org and navigate to "Getting Castep" to apply for a license.
|
||||
#local_patch_ver = version.split('.')[-1]
|
||||
|
||||
import os
|
||||
if os.environ.get("CLUSTERNAME") in ["KAROLINA"]:
|
||||
toolchainopts = {'usempi': True, 'openmp': True, 'pic': True, 'lowopt': True, 'optarch': 'march=core-avx2'}
|
||||
else:
|
||||
toolchainopts = {'usempi': True, 'openmp': True, 'pic': True}
|
||||
|
||||
sources = [SOURCE_TAR_GZ]
|
||||
|
||||
checksums = ['e21177bfe4cb3f3d098b666c90771e3da2826503b002b8e325e3ca1e230cfc7d']
|
||||
|
||||
# Python and perl really ought to be a dependency for building, running the test suite as well as
|
||||
# run-time dependencies for various auxiliary utilities.
|
||||
# In practice the system ones will work fine
|
||||
|
||||
dependencies = [
|
||||
#('Perl', '5.38.0'),
|
||||
('Python', '3.12.3'),
|
||||
#('SciPy-bundle', '2024.05', '-gfbf-2024a', True), # for elastic constants and castepconv utility
|
||||
('imkl', '2024.2.0', '', True),
|
||||
]
|
||||
|
||||
skipsteps = ['configure']
|
||||
|
||||
#local_buildopts = 'ARCH=linux_x86_64_gfortran COMMS_ARCH=mpi FFT=mkl MATHLIBS=mkl FFTLIBDIR=$MKLROOT/lib/intel64 MATHLIBDIR=$MKLROOT/lib/intel64'
|
||||
local_buildopts = 'ARCH=linux_x86_64_ifx COMMS_ARCH=mpi SUBARCH=mpi FFT=mkl MATHLIBS=mkl'
|
||||
|
||||
buildopts = local_buildopts + ' castep tools utilities'
|
||||
|
||||
preinstallopts = 'mkdir -p %(installdir)s/bin &&'
|
||||
installopts = local_buildopts + ' INSTALL_DIR="%(installdir)s/bin" install-castep install-tools install-utilities'
|
||||
|
||||
sanity_check_paths = {
|
||||
'files': ['bin/%s' % x for x in ['castep.mpi', 'orbitals2bands', 'dispersion.pl',
|
||||
'elastics.py', 'ceteprouts.pm']],
|
||||
'dirs': [],
|
||||
}
|
||||
|
||||
sanity_check_commands = [
|
||||
'mpirun -n 1 castep.mpi --help',
|
||||
#'optados.mpi --help',
|
||||
]
|
||||
|
||||
# Run the "quick" set of regression tests
|
||||
runtest = local_buildopts + ' check-quick'
|
||||
|
||||
moduleclass = 'chem'
|
29
n/NCCL/NCCL-2.21.5-GCCcore-14.2.0-CUDA-12.8.0.eb
Normal file
29
n/NCCL/NCCL-2.21.5-GCCcore-14.2.0-CUDA-12.8.0.eb
Normal file
@ -0,0 +1,29 @@
|
||||
# IT4Innovations
|
||||
# LK 2025
|
||||
|
||||
name = 'NCCL'
|
||||
version = '2.21.5'
|
||||
versionsuffix = '-CUDA-%(cudaver)s'
|
||||
|
||||
homepage = 'https://developer.nvidia.com/nccl'
|
||||
description = """The NVIDIA Collective Communications Library (NCCL) implements multi-GPU and multi-node collective
|
||||
communication primitives that are performance optimized for NVIDIA GPUs."""
|
||||
|
||||
toolchain = {'name': 'GCCcore', 'version': '14.2.0'}
|
||||
|
||||
github_account = 'NVIDIA'
|
||||
source_urls = [GITHUB_SOURCE]
|
||||
sources = ['v%(version)s-1.tar.gz']
|
||||
checksums = ['1923596984d85e310b5b6c52b2c72a1b93da57218f2bc5a5c7ac3d59297a3303']
|
||||
|
||||
builddependencies = [('binutils', '2.42')]
|
||||
|
||||
dependencies = [
|
||||
('CUDA', '12.8.0', '', SYSTEM),
|
||||
('UCX-CUDA', '1.18.0', versionsuffix),
|
||||
]
|
||||
|
||||
# default CUDA compute capabilities to use (override via --cuda-compute-capabilities)
|
||||
cuda_compute_capabilities = ['8.0']
|
||||
|
||||
moduleclass = 'lib'
|
@ -33,41 +33,12 @@ dependencies = [
|
||||
|
||||
module_add_cuda = False
|
||||
|
||||
# specify default CUDA version that should be used by NVHPC
|
||||
# should match one of the CUDA versions that are included with this NVHPC version
|
||||
# (see install_components/Linux_x86_64/$version/cuda/) where $version is the NVHPC version
|
||||
# this version can be tweaked from the EasyBuild command line with
|
||||
# --try-amend=default_cuda_version="11.0" (for example)
|
||||
default_cuda_version = '%(cudaver)s'
|
||||
|
||||
# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
|
||||
# The following list gives examples for the easyconfig
|
||||
#
|
||||
# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
|
||||
# 1) Bundled CUDA
|
||||
# If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
|
||||
# default_cuda_version = "11.0"
|
||||
# in this easyconfig file; alternatively, it can be specified through the command line during installation with
|
||||
# --try-amend=default_cuda_version="10.2"
|
||||
# 2) CUDA provided via EasyBuild
|
||||
# Use CUDA as a dependency, for example
|
||||
# dependencies = [('CUDA', '11.5.0')]
|
||||
# The parameter default_cuda_version still can be set as above.
|
||||
# If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
|
||||
#
|
||||
# Define a NVHPC-default Compute Capability
|
||||
# cuda_compute_capabilities = "8.0"
|
||||
# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
|
||||
# Only single values supported, not lists of values!
|
||||
#
|
||||
# Options to add/remove things to/from environment module (defaults shown)
|
||||
# module_byo_compilers = False # Remove compilers from PATH (Bring-your-own compilers)
|
||||
# module_nvhpc_own_mpi = False # Add NVHPC's own pre-compiled OpenMPI
|
||||
# module_add_math_libs = False # Add NVHPC's math libraries (which should be there from CUDA anyway)
|
||||
# module_add_profilers = False # Add NVHPC's NVIDIA Profilers
|
||||
# module_add_nccl = False # Add NVHPC's NCCL library
|
||||
# module_add_nvshmem = False # Add NVHPC's NVSHMEM library
|
||||
# module_add_cuda = False # Add NVHPC's bundled CUDA
|
||||
import os
|
||||
if os.environ.get("CLUSTERNAME") in ["KAROLINA"]:
|
||||
cuda_compute_capabilities = "8.0"
|
||||
else:
|
||||
cuda_compute_capabilities = "7.0"
|
||||
|
||||
# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
|
||||
moduleclass = 'compiler'
|
||||
|
47
n/NVHPC/NVHPC-25.3-CUDA-12.8.0.eb
Normal file
47
n/NVHPC/NVHPC-25.3-CUDA-12.8.0.eb
Normal file
@ -0,0 +1,47 @@
|
||||
# IT4Innovations
|
||||
# LK 2025
|
||||
|
||||
name = 'NVHPC'
|
||||
version = '25.3'
|
||||
versionsuffix = '-CUDA-%(cudaver)s'
|
||||
|
||||
homepage = 'https://developer.nvidia.com/hpc-sdk/'
|
||||
description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
|
||||
|
||||
toolchain = SYSTEM
|
||||
|
||||
local_tarball_tmpl = 'nvhpc_2025_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
|
||||
# By downloading, you accept the HPC SDK Software License Agreement
|
||||
# https://docs.nvidia.com/hpc-sdk/eula/index.html
|
||||
# accept_eula = True
|
||||
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
|
||||
sources = [local_tarball_tmpl % '%(arch)s']
|
||||
checksums = [
|
||||
{
|
||||
local_tarball_tmpl % 'aarch64':
|
||||
'a2b86cf5141c0a9b0925999521693981451a8d2403367c36c46238163be6f2bb',
|
||||
local_tarball_tmpl % 'x86_64':
|
||||
'e2b2c911478a5db6a15d1fd258a8c4004dbfccf6f32f4132fe142a24fb7e6f8f',
|
||||
}
|
||||
]
|
||||
|
||||
local_gccver = '14.2.0'
|
||||
dependencies = [
|
||||
('GCCcore', local_gccver),
|
||||
('binutils', '2.42', '', ('GCCcore', local_gccver)),
|
||||
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
|
||||
('numactl', '2.0.19', '', ('GCCcore', local_gccver)),
|
||||
('CUDA', '12.8.0', '', SYSTEM),
|
||||
]
|
||||
|
||||
module_add_cuda = False
|
||||
|
||||
default_cuda_version = '%(cudaver)s'
|
||||
|
||||
import os
|
||||
if os.environ.get("CLUSTERNAME") in ["KAROLINA"]:
|
||||
cuda_compute_capabilities = "8.0"
|
||||
else:
|
||||
cuda_compute_capabilities = "7.0"
|
||||
|
||||
moduleclass = 'compiler'
|
93
o/OpenMPI/OpenMPI-5.0.7-25.3-CUDA-12.8.0.eb
Normal file
93
o/OpenMPI/OpenMPI-5.0.7-25.3-CUDA-12.8.0.eb
Normal file
@ -0,0 +1,93 @@
|
||||
# IT4Innovations
|
||||
# LK 2025
|
||||
|
||||
name = 'OpenMPI'
|
||||
version = '5.0.7'
|
||||
|
||||
homepage = 'https://www.open-mpi.org/'
|
||||
description = """The Open MPI Project is an open source MPI-3 implementation."""
|
||||
|
||||
toolchain = {'name': 'NVHPC', 'version': '25.3-CUDA-12.8.0'}
|
||||
|
||||
source_urls = ['https://www.open-mpi.org/software/ompi/v%(version_major_minor)s/downloads']
|
||||
sources = [SOURCELOWER_TAR_BZ2]
|
||||
patches = [
|
||||
('OpenMPI-5.0.7_fix-sshmem-build-failure.patch'),
|
||||
]
|
||||
checksums = [
|
||||
{'openmpi-5.0.7.tar.bz2': '119f2009936a403334d0df3c0d74d5595a32d99497f9b1d41e90019fee2fc2dd'},
|
||||
{'OpenMPI-5.0.7_fix-sshmem-build-failure.patch':
|
||||
'7382a5bbe44c6eff9ab05c8f315a8911d529749655126d4375e44e809bfedec7'},
|
||||
]
|
||||
|
||||
builddependencies = [
|
||||
('pkgconf', '2.3.0'),
|
||||
('Autotools', '20240712'),
|
||||
]
|
||||
|
||||
dependencies = [
|
||||
('CUDA', '12.8.0', '', True),
|
||||
('zlib', '1.3.1'),
|
||||
('hwloc', '2.11.2'),
|
||||
('libevent', '2.1.12'),
|
||||
('UCX', '1.18.0'),
|
||||
('UCX-CUDA', '1.18.0', '-CUDA-%(cudaver)s'),
|
||||
('libfabric', '2.0.0'),
|
||||
('PMIx', '5.0.6'),
|
||||
('UCC', '1.3.0'),
|
||||
# ('UCC-CUDA', '1.3.0', '-CUDA-%(cudaver)s'),
|
||||
('PRRTE', '3.0.8'),
|
||||
]
|
||||
|
||||
# CUDA related patches and custom configure option can be removed if CUDA support isn't wanted.
|
||||
configopts = '--with-cuda=/usr/local/cuda '
|
||||
configopts += ' CC=pgcc CXX=pgc++ FC=pgfortran'
|
||||
configopts += ' CXXFLAGS="-fPIC"'
|
||||
|
||||
# IT4I-specific settings
|
||||
#
|
||||
#configopts += '--enable-shared '
|
||||
configopts += ' --enable-mpi-thread-multiple'
|
||||
configopts += ' --with-verbs'
|
||||
configopts += ' --enable-mpirun-prefix-by-default'
|
||||
configopts += ' --with-hwloc=$EBROOTHWLOC' # hwloc support
|
||||
configopts += ' --with-slurm' # Enable slurm
|
||||
configopts += ' --with-ucx=$EBROOTUCX'
|
||||
|
||||
import os
|
||||
if os.environ.get("CLUSTERNAME") in ["BARBORA"]:
|
||||
modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx5_0',
|
||||
'OMPI_MCA_btl_tcp_if_include': '10.33.4.0/24',
|
||||
'OMPI_MCA_orte_base_help_aggregate': '0',
|
||||
'SLURM_MPI_TYPE': 'pmix_v4',
|
||||
}
|
||||
elif os.environ.get("CLUSTERNAME") in ["KAROLINA"]:
|
||||
modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx5_0',
|
||||
'OMPI_MCA_orte_base_help_aggregate': '0',
|
||||
'SLURM_MPI_TYPE': 'pmix_v4',
|
||||
}
|
||||
else:
|
||||
modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx4_0',
|
||||
'OMPI_MCA_oob_tcp_if_include': '10.0.0.0/8',
|
||||
'SLURM_MPI_TYPE': 'pmix_v4',
|
||||
}
|
||||
|
||||
osdependencies = [('libibverbs-dev', 'libibverbs-devel', 'rdma-core-devel')]
|
||||
|
||||
postinstallcmds = [
|
||||
'echo "# By default, for Open MPI 4.0 and later, infiniband ports on a device are not used by default." >> %(installdir)s/etc/openmpi-mca-params.conf',
|
||||
'echo "btl_openib_allow_ib = true" >> %(installdir)s/etc/openmpi-mca-params.conf',
|
||||
]
|
||||
|
||||
local_libs = ["mpi_mpifh", "mpi", "ompitrace", "open-pal", "open-rte"]
|
||||
|
||||
sanity_check_paths = {
|
||||
'files': [
|
||||
"bin/%s" %
|
||||
binfile for binfile in [
|
||||
"ompi_info", "opal_wrapper" ]] + [
|
||||
"include/%s.h" %
|
||||
x for x in [
|
||||
"mpi-ext", "mpif-config", "mpif", "mpi", "mpi_portable_platform"]], 'dirs': [], }
|
||||
|
||||
moduleclass = 'mpi'
|
69
o/OpenMPI/OpenMPI-5.0.7-GCC-14.2.0.eb
Normal file
69
o/OpenMPI/OpenMPI-5.0.7-GCC-14.2.0.eb
Normal file
@ -0,0 +1,69 @@
|
||||
# IT4Innovations
|
||||
# LK 2025
|
||||
|
||||
name = 'OpenMPI'
|
||||
version = '5.0.7'
|
||||
|
||||
homepage = 'https://www.open-mpi.org/'
|
||||
description = """The Open MPI Project is an open source MPI-3 implementation."""
|
||||
|
||||
toolchain = {'name': 'GCC', 'version': '14.2.0'}
|
||||
|
||||
source_urls = ['https://www.open-mpi.org/software/ompi/v%(version_major_minor)s/downloads']
|
||||
sources = [SOURCELOWER_TAR_BZ2]
|
||||
patches = [
|
||||
('OpenMPI-5.0.6_build-with-internal-cuda-header.patch', 1),
|
||||
('OpenMPI-5.0.7_fix-sshmem-build-failure.patch'),
|
||||
]
|
||||
checksums = [
|
||||
{'openmpi-5.0.7.tar.bz2': '119f2009936a403334d0df3c0d74d5595a32d99497f9b1d41e90019fee2fc2dd'},
|
||||
{'OpenMPI-5.0.6_build-with-internal-cuda-header.patch':
|
||||
'4821f0740ae4b97f3ff5259f7bac67a11d8cdeede3b1425825c241cf6a2864bb'},
|
||||
{'OpenMPI-5.0.7_fix-sshmem-build-failure.patch':
|
||||
'7382a5bbe44c6eff9ab05c8f315a8911d529749655126d4375e44e809bfedec7'},
|
||||
]
|
||||
|
||||
builddependencies = [
|
||||
('pkgconf', '2.3.0'),
|
||||
('Autotools', '20240712'),
|
||||
]
|
||||
|
||||
dependencies = [
|
||||
('zlib', '1.3.1'),
|
||||
('hwloc', '2.11.2'),
|
||||
('libevent', '2.1.12'),
|
||||
('UCX', '1.18.0'),
|
||||
('libfabric', '2.0.0'),
|
||||
('PMIx', '5.0.6'),
|
||||
('UCC', '1.3.0'),
|
||||
('PRRTE', '3.0.8'),
|
||||
]
|
||||
|
||||
# CUDA related patches and custom configure option can be removed if CUDA support isn't wanted.
|
||||
preconfigopts = 'gcc -Iopal/mca/cuda/include -shared opal/mca/cuda/lib/cuda.c -o opal/mca/cuda/lib/libcuda.so && '
|
||||
configopts = '--with-cuda=%(start_dir)s/opal/mca/cuda --with-show-load-errors=no '
|
||||
configopts += '--enable-shared --enable-mpi-thread-multiple --with-verbs '
|
||||
configopts += '--enable-mpirun-prefix-by-default '
|
||||
configopts += '--with-hwloc=$EBROOTHWLOC ' # hwloc support
|
||||
configopts += '--with-slurm ' # Enable slurm
|
||||
configopts += '--with-ucx=$EBROOTUCX '
|
||||
|
||||
import os
|
||||
if os.environ.get("CLUSTERNAME") in ["BARBORA"]:
|
||||
modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx5_0',
|
||||
'OMPI_MCA_btl_tcp_if_include': '10.33.4.0/24',
|
||||
'OMPI_MCA_orte_base_help_aggregate': '0',
|
||||
'SLURM_MPI_TYPE': 'pmix_v4',
|
||||
}
|
||||
elif os.environ.get("CLUSTERNAME") in ["KAROLINA"]:
|
||||
modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx5_0',
|
||||
'OMPI_MCA_orte_base_help_aggregate': '0',
|
||||
'SLURM_MPI_TYPE': 'pmix_v4',
|
||||
}
|
||||
else:
|
||||
modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx4_0',
|
||||
'OMPI_MCA_oob_tcp_if_include': '10.0.0.0/8',
|
||||
'SLURM_MPI_TYPE': 'pmix_v4',
|
||||
}
|
||||
|
||||
moduleclass = 'mpi'
|
59
u/UCC-CUDA/UCC-CUDA-1.3.0-GCCcore-14.2.0-CUDA-12.8.0.eb
Normal file
59
u/UCC-CUDA/UCC-CUDA-1.3.0-GCCcore-14.2.0-CUDA-12.8.0.eb
Normal file
@ -0,0 +1,59 @@
|
||||
# IT4Innovations
|
||||
# LK 2025
|
||||
|
||||
easyblock = 'ConfigureMake'
|
||||
|
||||
name = 'UCC-CUDA'
|
||||
version = '1.3.0'
|
||||
versionsuffix = '-CUDA-%(cudaver)s'
|
||||
|
||||
homepage = 'https://www.openucx.org/'
|
||||
description = """UCC (Unified Collective Communication) is a collective
|
||||
communication operations API and library that is flexible, complete, and
|
||||
feature-rich for current and emerging programming models and runtimes.
|
||||
|
||||
This module adds the UCC CUDA support.
|
||||
"""
|
||||
|
||||
toolchain = {'name': 'GCCcore', 'version': '14.2.0'}
|
||||
toolchainopts = {'pic': True}
|
||||
|
||||
source_urls = ['https://github.com/openucx/ucc/archive/refs/tags']
|
||||
sources = ['v%(version)s.tar.gz']
|
||||
#patches = [
|
||||
# '%(name)s-1.3.0_link_against_existing_UCC_libs.patch',
|
||||
# '%(name)s-1.3.0_cuda_12_mem_ops.patch',
|
||||
#]
|
||||
checksums = [
|
||||
{'v1.3.0.tar.gz': 'b56379abe5f1c125bfa83be305d78d81a64aa271b7b5fff0ac17b86725ff3acf'},
|
||||
{'UCC-CUDA-1.3.0_link_against_existing_UCC_libs.patch': '328e0f7e4de76a9dc6ecc07427581df661c27f6c0ace24f49a7b3289a39777c7'},
|
||||
{'UCC-CUDA-1.3.0_cuda_12_mem_ops.patch': 'fc3ea1487d29dc626db2363ef5a79e7f0906f6a7507a363fa6167a812b143eb6'},
|
||||
]
|
||||
|
||||
builddependencies = [
|
||||
('binutils', '2.42'),
|
||||
('Autotools', '20240712'),
|
||||
]
|
||||
|
||||
dependencies = [
|
||||
('UCC', '1.3.0'),
|
||||
('CUDA', '12.8.0', '', SYSTEM),
|
||||
('UCX-CUDA', '1.18.0', '-CUDA-%(cudaver)s'),
|
||||
('NCCL', '2.21.5', '-CUDA-%(cudaver)s'),
|
||||
]
|
||||
|
||||
preconfigopts = "./autogen.sh && "
|
||||
|
||||
buildopts = '-C src/components/mc/cuda V=1 && make -C src/components/tl/nccl V=1'
|
||||
installopts = '-C src/components/mc/cuda && make -C src/components/tl/nccl install'
|
||||
|
||||
sanity_check_paths = {
|
||||
'files': ['lib/ucc/libucc_mc_cuda.%s' % SHLIB_EXT, 'lib/ucc/libucc_tl_nccl.%s' % SHLIB_EXT],
|
||||
'dirs': ['lib']
|
||||
}
|
||||
|
||||
sanity_check_commands = ["ucc_info -c"]
|
||||
|
||||
modextrapaths = {'EB_UCC_EXTRA_COMPONENT_PATH': 'lib/ucc'}
|
||||
|
||||
moduleclass = 'lib'
|
@ -1,44 +0,0 @@
|
||||
# IT4Innovations
|
||||
# LK 2024
|
||||
|
||||
easyblock = 'ConfigureMake'
|
||||
|
||||
name = 'UCC'
|
||||
version = '1.3.0'
|
||||
|
||||
homepage = 'https://www.openucx.org/'
|
||||
description = """UCC (Unified Collective Communication) is a collective
|
||||
communication operations API and library that is flexible, complete, and
|
||||
feature-rich for current and emerging programming models and runtimes.
|
||||
"""
|
||||
|
||||
toolchain = {'name': 'GCCcore', 'version': '14.2.0'}
|
||||
toolchainopts = {'pic': True}
|
||||
|
||||
source_urls = ['https://github.com/openucx/ucc/archive/refs/tags']
|
||||
sources = ['v%(version)s.tar.gz']
|
||||
patches = ['UCC-1.1.0-multiple_component_paths.patch']
|
||||
checksums = [
|
||||
{'v1.3.0.tar.gz': 'b56379abe5f1c125bfa83be305d78d81a64aa271b7b5fff0ac17b86725ff3acf'},
|
||||
{'UCC-1.1.0-multiple_component_paths.patch': '3081d0f694331daa4a88a0fa3fb54b9a918015248ae5eb7b3157b924abd31bee'},
|
||||
]
|
||||
|
||||
builddependencies = [
|
||||
('binutils', '2.42'),
|
||||
('Autotools', '20231222'),
|
||||
]
|
||||
|
||||
dependencies = [
|
||||
('UCX', '1.16.0'),
|
||||
]
|
||||
|
||||
preconfigopts = "./autogen.sh && "
|
||||
|
||||
sanity_check_paths = {
|
||||
'files': ['bin/ucc_info'],
|
||||
'dirs': ['include', 'lib']
|
||||
}
|
||||
|
||||
sanity_check_commands = ["ucc_info -c"]
|
||||
|
||||
moduleclass = 'lib'
|
@ -0,0 +1,64 @@
|
||||
--- arch/makefile.include.nvhpc_acc.orig 2025-04-09 12:20:02.277358084 +0200
|
||||
+++ arch/makefile.include.nvhpc_acc 2025-04-09 12:22:34.890663227 +0200
|
||||
@@ -17,8 +17,8 @@
|
||||
# N.B.: you might need to change the cuda-version here
|
||||
# to one that comes with your NVIDIA-HPC SDK
|
||||
CC = mpicc -acc -gpu=cc60,cc70,cc80,cuda11.8
|
||||
-FC = mpif90 -acc -gpu=cc60,cc70,cc80,cuda11.8
|
||||
-FCL = mpif90 -acc -gpu=cc60,cc70,cc80,cuda11.8 -c++libs
|
||||
+FC = mpif90 -acc -gpu=cc60,cc70,cc80,cuda12.3
|
||||
+FCL = mpif90 -acc -gpu=cc60,cc70,cc80,cuda12.3 -c++libs
|
||||
|
||||
FREE = -Mfree
|
||||
|
||||
@@ -59,19 +59,19 @@
|
||||
|
||||
# Specify your NV HPC-SDK installation (mandatory)
|
||||
#... first try to set it automatically
|
||||
-NVROOT =$(shell which nvfortran | awk -F /compilers/bin/nvfortran '{ print $$1 }')
|
||||
+#NVROOT =$(shell which nvfortran | awk -F /compilers/bin/nvfortran '{ print $$1 }')
|
||||
|
||||
# If the above fails, then NVROOT needs to be set manually
|
||||
-#NVHPC ?= /opt/nvidia/hpc_sdk
|
||||
-#NVVERSION = 21.11
|
||||
-#NVROOT = $(NVHPC)/Linux_x86_64/$(NVVERSION)
|
||||
+NVHPC ?= ${EBROOTQD}
|
||||
+NVVERSION = ${EBVERSIONNVHPC}
|
||||
+NVROOT = $(NVHPC)/Linux_x86_64/$(NVVERSION)
|
||||
|
||||
## Improves performance when using NV HPC-SDK >=21.11 and CUDA >11.2
|
||||
-#OFLAG_IN = -fast -Mwarperf
|
||||
-#SOURCE_IN := nonlr.o
|
||||
+OFLAG_IN = -fast -Mwarperf
|
||||
+SOURCE_IN := nonlr.o
|
||||
|
||||
# Software emulation of quadruple precsion (mandatory)
|
||||
-QD ?= $(NVROOT)/compilers/extras/qd
|
||||
+QD ?= ${EBROOTQD}
|
||||
LLIBS += -L$(QD)/lib -lqdmod -lqd
|
||||
INCS += -I$(QD)/include/qd
|
||||
|
||||
@@ -87,7 +87,7 @@
|
||||
LLIBS += $(SCALAPACK) $(LAPACK) $(BLAS)
|
||||
|
||||
# FFTW (mandatory)
|
||||
-FFTW_ROOT ?= /path/to/your/fftw/installation
|
||||
+FFTW_ROOT ?= ${EBROOTFFTWMPI}
|
||||
LLIBS += -L$(FFTW_ROOT)/lib -lfftw3
|
||||
INCS += -I$(FFTW_ROOT)/include
|
||||
|
||||
@@ -97,10 +97,10 @@
|
||||
#LLIBS += -cudalib=cusolvermp,cublasmp -lnvhpcwrapcal
|
||||
|
||||
# HDF5-support (optional but strongly recommended, and mandatory for some features)
|
||||
-#CPP_OPTIONS+= -DVASP_HDF5
|
||||
-#HDF5_ROOT ?= /path/to/your/hdf5/installation
|
||||
-#LLIBS += -L$(HDF5_ROOT)/lib -lhdf5_fortran
|
||||
-#INCS += -I$(HDF5_ROOT)/include
|
||||
+CPP_OPTIONS+= -DVASP_HDF5
|
||||
+HDF5_ROOT ?= ${EBROOTHDF5}
|
||||
+LLIBS += -L$(HDF5_ROOT)/lib -lhdf5_fortran
|
||||
+INCS += -I$(HDF5_ROOT)/include
|
||||
|
||||
# For the VASP-2-Wannier90 interface (optional)
|
||||
#CPP_OPTIONS += -DVASP2WANNIER90
|
80
v/VASP/VASP-6.5.1-NVHPC-24.3-CUDA-12.3.0.eb
Normal file
80
v/VASP/VASP-6.5.1-NVHPC-24.3-CUDA-12.3.0.eb
Normal file
@ -0,0 +1,80 @@
|
||||
# IT4Innovations
|
||||
# LK 2025
|
||||
|
||||
easyblock = 'MakeCp'
|
||||
|
||||
name = 'VASP'
|
||||
version = '6.5.1'
|
||||
local_cudaversion = '12.3.0'
|
||||
versionsuffix = '-CUDA-%s' % local_cudaversion
|
||||
|
||||
homepage = 'https://www.vasp.at'
|
||||
docurls = 'https://www.vasp.at/wiki/index.php/The_VASP_Manual'
|
||||
description = """
|
||||
The Vienna Ab initio Simulation Package (VASP) is a local computer program for atomic scale
|
||||
materials modelling, e.g. electronic structure calculations and quantum-mechanical molecular dynamics,
|
||||
from first principles.
|
||||
|
||||
This is a GPU-enabled build.
|
||||
|
||||
To use VASP, you need an academic license from University of Vienna.
|
||||
Follow the instructions at https://www.vasp.at/index.php/faqs.
|
||||
|
||||
Please send us the ID of your VASP license, list of authorized users for whom you require access,
|
||||
and their email which is associated with your license (use only https://support.it4i.cz/rt).
|
||||
We are responsible for verifying your licenses.
|
||||
"""
|
||||
|
||||
toolchain = {'name': 'NVHPC', 'version': '24.3-CUDA-12.3.0'}
|
||||
toolchainopts = {'pic': True}
|
||||
|
||||
# Vasp is proprietary software, see http://www.vasp.at/index.php/faqs on
|
||||
# how to get access to the code
|
||||
sources = ['%(namelower)s.%(version)s.tgz']
|
||||
patches = ['VASP-%(version)s-NVHPC-24.3-CUDA-12.3.0-adjust-makefile.patch']
|
||||
checksums = [
|
||||
{'vasp.%(version)s.tgz': 'a53fd9dd2a66472a4aa30074dbda44634fc663ea2628377fc01d870e37136f61'},
|
||||
{'VASP-%(version)s-NVHPC-24.3-CUDA-12.3.0-adjust-makefile.patch': '80c166c2039ea83e00291091b77ec013adfc4b0e09d9cb1b71ef73bdaa56df96'},
|
||||
]
|
||||
|
||||
dependencies = [
|
||||
('OpenMPI', '4.1.6'),
|
||||
('FFTW.MPI', '3.3.10'),
|
||||
('FFTW', '3.3.10'),
|
||||
('imkl', '2022.2.1', '', True),
|
||||
('ScaLAPACK', '3.0'),
|
||||
('HDF5', '1.14.3'),
|
||||
('QD', '2.3.17'),
|
||||
]
|
||||
|
||||
prebuildopts = 'cp arch/makefile.include.nvhpc_acc ./makefile.include && '
|
||||
|
||||
# VASP uses LIBS as a list of folders
|
||||
prebuildopts += 'unset LIBS && '
|
||||
|
||||
# AMD/Intel CPU switch - We set xHost by default; change it to -march=core-avx2 when necessary
|
||||
import os
|
||||
if os.environ.get("CLUSTERNAME") in ["KAROLINA"]:
|
||||
prebuildopts += 'sed -i "s|-xHOST|-march=core-avx2|" makefile.include && '
|
||||
prebuildopts += 'sed -i "s|-march=xHost|-march=core-avx2|" makefile.include && '
|
||||
|
||||
prebuildopts += 'sed -i "s|cuda12.1|cuda12.3|g" makefile.include && '
|
||||
|
||||
buildopts = 'std gam ncl '
|
||||
|
||||
parallel = 1
|
||||
|
||||
files_to_copy = [(['bin/vasp_std', 'bin/vasp_gam', 'bin/vasp_ncl'], 'bin')]
|
||||
|
||||
sanity_check_paths = {
|
||||
'files': ['bin/vasp_std', 'bin/vasp_gam', 'bin/vasp_ncl'],
|
||||
'dirs': []
|
||||
}
|
||||
|
||||
modluafooter = """
|
||||
add_property('state','license')
|
||||
add_property('arch', 'gpu')
|
||||
"""
|
||||
|
||||
|
||||
moduleclass = 'chem'
|
65
v/VASP/VASP-6.5.1-intel-2024a.eb
Normal file
65
v/VASP/VASP-6.5.1-intel-2024a.eb
Normal file
@ -0,0 +1,65 @@
|
||||
# IT4Innovations
|
||||
# LK 2025
|
||||
|
||||
easyblock = 'MakeCp'
|
||||
|
||||
name = 'VASP'
|
||||
version = '6.5.1'
|
||||
|
||||
homepage = 'http://www.vasp.at'
|
||||
docurls = 'https://www.vasp.at/wiki/index.php/The_VASP_Manual'
|
||||
description = """The Vienna Ab initio Simulation Package (VASP) is a local computer program for atomic scale
|
||||
materials modelling, e.g. electronic structure calculations and quantum-mechanical molecular dynamics,
|
||||
from first principles.
|
||||
|
||||
To use VASP, you need an academic license from University of Vienna. Follow the instructions at https://www.vasp.at/index.php/faqs.
|
||||
|
||||
Please send us a list of authorized users and their IDs for which you need access (use only http://support.it4i.cz/rt). We are responsible for verifying your licenses."""
|
||||
|
||||
toolchain = {'name': 'intel', 'version': '2024a'}
|
||||
|
||||
# Vasp is proprietary software, see http://www.vasp.at/index.php/faqs on
|
||||
# how to get access to the code
|
||||
sources = ['%(namelower)s.%(version)s.tgz']
|
||||
patches = ['VASP-%(version)s-intel-hdf5.patch']
|
||||
checksums = [
|
||||
{'vasp.%(version)s.tgz': 'a53fd9dd2a66472a4aa30074dbda44634fc663ea2628377fc01d870e37136f61'},
|
||||
{'VASP-%(version)s-intel-hdf5.patch': '1289ed84508c9e655ba00da623231e48cd773edda1344bdf3ea13e08ae910d9b'},
|
||||
]
|
||||
|
||||
# use serial compilation of W90, see https://www.vasp.at/wiki/index.php/Makefile.include#Wannier90_.28optional.29
|
||||
# Important: In case of Wannier90 3.x, you should compile a serial version by removing COMMS=mpi in the make.inc of Wannier90.
|
||||
dependencies = [
|
||||
('HDF5', '1.14.3'),
|
||||
('Wannier90', '3.1.0', '-serial'),
|
||||
]
|
||||
|
||||
prebuildopts = 'cp arch/makefile.include.intel ./makefile.include && '
|
||||
|
||||
# AMD/Intel CPU switch - We set xHost by default; change it to -march=core-avx2 when necessary
|
||||
import os
|
||||
if os.environ.get("CLUSTERNAME") in ["KAROLINA"]:
|
||||
prebuildopts += 'sed -i "s|-xHOST|-march=core-avx2|" makefile.include && '
|
||||
prebuildopts += 'sed -i "s|-march=xHost|-march=core-avx2|" makefile.include && '
|
||||
|
||||
# Fix icc vs icx
|
||||
prebuildopts += 'sed -i "s|CC_LIB = icc|CC_LIB = icx|" makefile.include && '
|
||||
|
||||
# Fix icpc vs icpx
|
||||
prebuildopts += 'sed -i "s|CXX_PARS = icpc|CXX_PARS = icpx|" makefile.include && '
|
||||
|
||||
# VASP uses LIBS as a list of folders
|
||||
prebuildopts += 'unset LIBS && '
|
||||
|
||||
buildopts = 'std gam ncl '
|
||||
|
||||
max_parallel = 1
|
||||
|
||||
files_to_copy = [(['bin/vasp_std', 'bin/vasp_gam', 'bin/vasp_ncl'], 'bin')]
|
||||
sanity_check_paths = {
|
||||
'files': ['bin/vasp_std', 'bin/vasp_gam', 'bin/vasp_ncl'],
|
||||
'dirs': []
|
||||
}
|
||||
modluafooter = 'add_property("state","license")'
|
||||
|
||||
moduleclass = 'chem'
|
31
v/VASP/VASP-6.5.1-intel-hdf5.patch
Normal file
31
v/VASP/VASP-6.5.1-intel-hdf5.patch
Normal file
@ -0,0 +1,31 @@
|
||||
--- arch/makefile.include.intel.orig 2025-04-09 10:56:34.041874570 +0200
|
||||
+++ arch/makefile.include.intel 2025-04-09 10:58:31.197543756 +0200
|
||||
@@ -49,20 +49,20 @@
|
||||
# Intel MKL (FFTW, BLAS, LAPACK, and scaLAPACK)
|
||||
# (Note: for Intel Parallel Studio's MKL use -mkl instead of -qmkl)
|
||||
FCL += -qmkl=sequential
|
||||
-MKLROOT ?= /path/to/your/mkl/installation
|
||||
+MKLROOT ?= ${MKLROOT}
|
||||
LLIBS += -L$(MKLROOT)/lib/intel64 -lmkl_scalapack_lp64 -lmkl_blacs_intelmpi_lp64
|
||||
INCS =-I$(MKLROOT)/include/fftw
|
||||
|
||||
# HDF5-support (optional but strongly recommended, and mandatory for some features)
|
||||
-#CPP_OPTIONS+= -DVASP_HDF5
|
||||
-#HDF5_ROOT ?= /path/to/your/hdf5/installation
|
||||
-#LLIBS += -L$(HDF5_ROOT)/lib -lhdf5_fortran
|
||||
-#INCS += -I$(HDF5_ROOT)/include
|
||||
+CPP_OPTIONS+= -DVASP_HDF5
|
||||
+HDF5_ROOT ?= ${EBROOTHDF5}
|
||||
+LLIBS += -L$(HDF5_ROOT)/lib -lhdf5_fortran
|
||||
+INCS += -I$(HDF5_ROOT)/include
|
||||
|
||||
# For the VASP-2-Wannier90 interface (optional)
|
||||
-#CPP_OPTIONS += -DVASP2WANNIER90
|
||||
-#WANNIER90_ROOT ?= /path/to/your/wannier90/installation
|
||||
-#LLIBS += -L$(WANNIER90_ROOT)/lib -lwannier
|
||||
+CPP_OPTIONS += -DVASP2WANNIER90
|
||||
+WANNIER90_ROOT ?= {EBROOTWANNIER90}
|
||||
+LLIBS += -L$(WANNIER90_ROOT)/lib -lwannier
|
||||
|
||||
# For machine learning library vaspml (experimental)
|
||||
#CPP_OPTIONS += -Dlibvaspml
|
Loading…
x
Reference in New Issue
Block a user