easyconfigs-it4i/o/OpenMPI/OpenMPI-5.0.7-GCC-14.2.0.eb
Lukas Krupcik 64aa7c8083 new file: a/Automake/Automake-1.17-GCCcore-14.2.0.eb
deleted:    c/CASTEP/CASTEP-25.12-foss-2023b.eb
	new file:   c/CASTEP/CASTEP-25.12-intel-2024a.eb
	new file:   n/NCCL/NCCL-2.21.5-GCCcore-14.2.0-CUDA-12.8.0.eb
	modified:   n/NVHPC/NVHPC-24.9-CUDA-12.6.0.eb
	new file:   n/NVHPC/NVHPC-25.3-CUDA-12.8.0.eb
	new file:   o/OpenMPI/OpenMPI-5.0.7-25.3-CUDA-12.8.0.eb
	new file:   o/OpenMPI/OpenMPI-5.0.7-GCC-14.2.0.eb
	new file:   u/UCC-CUDA/UCC-CUDA-1.3.0-GCCcore-14.2.0-CUDA-12.8.0.eb
	deleted:    u/UCC/UCC-1.3.0-GCCcore-14.2.0.eb
	new file:   v/VASP/VASP-6.5.1-NVHPC-24.3-CUDA-12.3.0-adjust-makefile.patch
	new file:   v/VASP/VASP-6.5.1-NVHPC-24.3-CUDA-12.3.0.eb
	new file:   v/VASP/VASP-6.5.1-intel-2024a.eb
	new file:   v/VASP/VASP-6.5.1-intel-hdf5.patch
2025-04-11 09:10:46 +02:00

70 lines
2.4 KiB
Plaintext

# IT4Innovations
# LK 2025
name = 'OpenMPI'
version = '5.0.7'
homepage = 'https://www.open-mpi.org/'
description = """The Open MPI Project is an open source MPI-3 implementation."""
toolchain = {'name': 'GCC', 'version': '14.2.0'}
source_urls = ['https://www.open-mpi.org/software/ompi/v%(version_major_minor)s/downloads']
sources = [SOURCELOWER_TAR_BZ2]
patches = [
('OpenMPI-5.0.6_build-with-internal-cuda-header.patch', 1),
('OpenMPI-5.0.7_fix-sshmem-build-failure.patch'),
]
checksums = [
{'openmpi-5.0.7.tar.bz2': '119f2009936a403334d0df3c0d74d5595a32d99497f9b1d41e90019fee2fc2dd'},
{'OpenMPI-5.0.6_build-with-internal-cuda-header.patch':
'4821f0740ae4b97f3ff5259f7bac67a11d8cdeede3b1425825c241cf6a2864bb'},
{'OpenMPI-5.0.7_fix-sshmem-build-failure.patch':
'7382a5bbe44c6eff9ab05c8f315a8911d529749655126d4375e44e809bfedec7'},
]
builddependencies = [
('pkgconf', '2.3.0'),
('Autotools', '20240712'),
]
dependencies = [
('zlib', '1.3.1'),
('hwloc', '2.11.2'),
('libevent', '2.1.12'),
('UCX', '1.18.0'),
('libfabric', '2.0.0'),
('PMIx', '5.0.6'),
('UCC', '1.3.0'),
('PRRTE', '3.0.8'),
]
# CUDA related patches and custom configure option can be removed if CUDA support isn't wanted.
preconfigopts = 'gcc -Iopal/mca/cuda/include -shared opal/mca/cuda/lib/cuda.c -o opal/mca/cuda/lib/libcuda.so && '
configopts = '--with-cuda=%(start_dir)s/opal/mca/cuda --with-show-load-errors=no '
configopts += '--enable-shared --enable-mpi-thread-multiple --with-verbs '
configopts += '--enable-mpirun-prefix-by-default '
configopts += '--with-hwloc=$EBROOTHWLOC ' # hwloc support
configopts += '--with-slurm ' # Enable slurm
configopts += '--with-ucx=$EBROOTUCX '
import os
if os.environ.get("CLUSTERNAME") in ["BARBORA"]:
modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx5_0',
'OMPI_MCA_btl_tcp_if_include': '10.33.4.0/24',
'OMPI_MCA_orte_base_help_aggregate': '0',
'SLURM_MPI_TYPE': 'pmix_v4',
}
elif os.environ.get("CLUSTERNAME") in ["KAROLINA"]:
modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx5_0',
'OMPI_MCA_orte_base_help_aggregate': '0',
'SLURM_MPI_TYPE': 'pmix_v4',
}
else:
modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx4_0',
'OMPI_MCA_oob_tcp_if_include': '10.0.0.0/8',
'SLURM_MPI_TYPE': 'pmix_v4',
}
moduleclass = 'mpi'