easyconfigs-it4i/o/OpenMPI/OpenMPI-5.0.5-NVHPC-24.3-CUDA-12.3.0.eb
Lukas Krupcik 2524ea09c8 new file: o/OpenMPI/OpenMPI-5.0.3-GCC-13.3.0.eb
new file:   o/OpenMPI/OpenMPI-5.0.5-NVHPC-24.3-CUDA-12.3.0.eb
	new file:   p/PMIx/PMIx-5.0.2-GCCcore-12.2.0.eb
	new file:   p/PRRTE/PRRTE-3.0.5-GCCcore-12.2.0.eb
2024-10-18 11:03:56 +02:00

120 lines
4.1 KiB
Plaintext

# IT4Innovations
# LK 2024
name = 'OpenMPI'
version = '5.0.5'
homepage = 'https://www.open-mpi.org/'
description = """The Open MPI Project is an open source MPI-3 implementation."""
toolchain = {'name': 'NVHPC', 'version': '24.3-CUDA-12.3.0'}
source_urls = ['https://www.open-mpi.org/software/ompi/v%(version_major_minor)s/downloads']
sources = [SOURCELOWER_TAR_BZ2]
#patches = [
# 'OpenMPI-4.1.1_build-with-internal-cuda-header.patch',
# 'OpenMPI-4.1.1_opal-datatype-cuda-performance.patch',
#]
checksums = [
'6588d57c0a4bd299a24103f4e196051b29e8b55fbda49e11d5b3d32030a32776', # openmpi-5.0.5.tar.bz2
# OpenMPI-4.1.1_build-with-internal-cuda-header.patch
#'63eac52736bdf7644c480362440a7f1f0ae7c7cae47b7565f5635c41793f8c83',
# OpenMPI-4.1.1_opal-datatype-cuda-performance.patch
#'b767c7166cf0b32906132d58de5439c735193c9fd09ec3c5c11db8d5fa68750e',
]
builddependencies = [
('pkgconf', '1.9.3'),
('Perl', '5.36.0'),
('Autotools', '20220317'),
]
dependencies = [
('zlib', '1.2.12'),
('CUDA', '12.3.0', '', True),
('hwloc', '2.8.0'),
('libevent', '2.1.12'),
('UCX', '1.16.0'),
('UCX-CUDA', '1.16.0', '-CUDA-%(cudaver)s'),
('libfabric', '1.16.1'),
('PMIx', '5.0.2'),
('UCC', '1.3.0'),
('UCC-CUDA', '1.3.0', '-CUDA-%(cudaver)s'),
('PRRTE', '3.0.5'),
]
# Update configure to include changes from the "internal-cuda" patch
# by running a subset of autogen.pl sufficient to achieve this
# without doing the full, long-running regeneration.
#preconfigopts = ' && '.join([
# 'cd config',
# 'autom4te --language=m4sh opal_get_version.m4sh -o opal_get_version.sh',
# 'cd ..',
# 'autoconf',
# 'autoheader',
# 'aclocal',
# 'automake',
# ''
#])
# CUDA related patches and custom configure option can be removed if CUDA support isn't wanted.
#configopts = ' --with-cuda=internal'
#preconfigopts = 'gcc -Iopal/mca/cuda/include -shared opal/mca/cuda/lib/cuda.c -o opal/mca/cuda/lib/libcuda.so && '
configopts = '--with-cuda=/usr/local/cuda '
configopts += ' CC=pgcc CXX=pgc++ FC=pgfortran'
configopts += ' CXXFLAGS="-fPIC"'
# IT4I-specific settings
#configopts += '--enable-shared '
configopts += ' --enable-mpi-thread-multiple'
configopts += ' --with-verbs'
configopts += ' --enable-mpirun-prefix-by-default'
configopts += ' --with-hwloc=$EBROOTHWLOC' # hwloc support
configopts += ' --with-slurm' # Enable slurm
#configopts += ' --enable-mpi-cxx' # 5.X.X not supported
configopts += ' --with-ucx=$EBROOTUCX'
#configopts += ' --without-prrte' # only srun, without mpirun
import os
if os.environ.get("CLUSTERNAME") in ["BARBORA"]:
configopts += ' --with-knem=/opt/knem-1.1.4.90mlnx1'
if os.environ.get("CLUSTERNAME") in ["KAROLINA"]:
configopts += ' --with-knem=/opt/knem-1.1.4.90mlnx3'
osdependencies = [('libibverbs-dev', 'libibverbs-devel', 'rdma-core-devel')]
postinstallcmds = [
'echo "# By default, for Open MPI 4.0 and later, infiniband ports on a device are not used by default." >> %(installdir)s/etc/openmpi-mca-params.conf',
'echo "btl_openib_allow_ib = true" >> %(installdir)s/etc/openmpi-mca-params.conf',
]
local_libs = ["mpi_mpifh", "mpi", "ompitrace", "open-pal", "open-rte"]
sanity_check_paths = {
'files': [
"bin/%s" %
binfile for binfile in [
"ompi_info", "opal_wrapper" ]] + [
"include/%s.h" %
x for x in [
"mpi-ext", "mpif-config", "mpif", "mpi", "mpi_portable_platform"]], 'dirs': [], }
if os.environ.get("CLUSTERNAME") in ["BARBORA"]:
modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx5_0',
'OMPI_MCA_btl_tcp_if_include': '10.33.4.0/24',
'OMPI_MCA_orte_base_help_aggregate': '0',
'SLURM_MPI_TYPE': 'pmix_v4',
}
elif os.environ.get("CLUSTERNAME") in ["KAROLINA"]:
modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx5_0',
'OMPI_MCA_orte_base_help_aggregate': '0',
'SLURM_MPI_TYPE': 'pmix_v4',
}
else:
modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx4_0',
'OMPI_MCA_oob_tcp_if_include': '10.0.0.0/8',
'SLURM_MPI_TYPE': 'pmix_v4',
}
moduleclass = 'mpi'