From 7406a1d55cadf6fa01e61f4e190e87b63054711d Mon Sep 17 00:00:00 2001 From: Pavel Holba Date: Thu, 18 Jul 2024 15:45:07 +0200 Subject: [PATCH] Add new file, not yet done --- ...penMPI-4.1.6-NVHPC-23.5-CUDA-12.2.0-dgx.eb | 114 ++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 o/OpenMPI/OpenMPI-4.1.6-NVHPC-23.5-CUDA-12.2.0-dgx.eb diff --git a/o/OpenMPI/OpenMPI-4.1.6-NVHPC-23.5-CUDA-12.2.0-dgx.eb b/o/OpenMPI/OpenMPI-4.1.6-NVHPC-23.5-CUDA-12.2.0-dgx.eb new file mode 100644 index 00000000..55bcd8cd --- /dev/null +++ b/o/OpenMPI/OpenMPI-4.1.6-NVHPC-23.5-CUDA-12.2.0-dgx.eb @@ -0,0 +1,114 @@ +# IT4Innovations +# LK 2023 + +name = 'OpenMPI' +version = '4.1.6' +versionsuffix = '-CUDA-12.2.0' + +homepage = 'https://www.open-mpi.org/' +description = """The Open MPI Project is an open source MPI-3 implementation.""" + +toolchain = {'name': 'NVHPC', 'version': '23.5'} + +source_urls = ['https://www.open-mpi.org/software/ompi/v%(version_major_minor)s/downloads'] +sources = [SOURCELOWER_TAR_BZ2] +patches = [ + 'OpenMPI-4.1.1_build-with-internal-cuda-header.patch', + 'OpenMPI-4.1.1_opal-datatype-cuda-performance.patch', +] +checksums = [ + 'f740994485516deb63b5311af122c265179f5328a0d857a567b85db00b11e415', # openmpi-4.1.6.tar.bz2 + # OpenMPI-4.1.1_build-with-internal-cuda-header.patch + '63eac52736bdf7644c480362440a7f1f0ae7c7cae47b7565f5635c41793f8c83', + # OpenMPI-4.1.1_opal-datatype-cuda-performance.patch + 'b767c7166cf0b32906132d58de5439c735193c9fd09ec3c5c11db8d5fa68750e', +] + +builddependencies = [ + ('pkgconf', '1.9.3'), + ('Perl', '5.36.0'), + ('Autotools', '20220317'), +] + +dependencies = [ + ('zlib', '1.2.12'), + ('CUDA', '12.2.0', '', True), + ('hwloc', '2.8.0'), + ('libevent', '2.1.12'), + ('UCX', '1.14.1'), + ('UCX-CUDA', '1.14.1', '-CUDA-%(cudaver)s'), + ('libfabric', '1.16.1'), + ('PMIx', '4.2.2'), + ('UCC', '1.1.0'), + ('UCC-CUDA', '1.1.0', '-CUDA-%(cudaver)s'), +] + +# Update configure to include changes from the "internal-cuda" patch +# by running a subset of autogen.pl sufficient to achieve this +# without doing the full, long-running regeneration. +preconfigopts = ' && '.join([ + 'cd config', + 'autom4te --language=m4sh opal_get_version.m4sh -o opal_get_version.sh', + 'cd ..', + 'autoconf', + 'autoheader', + 'aclocal', + 'automake', + '' +]) + +# CUDA related patches and custom configure option can be removed if CUDA support isn't wanted. +configopts = ' --with-cuda=internal' +configopts += ' CC=pgcc CXX=pgc++ FC=pgfortran' +configopts += ' CXXFLAGS="-fPIC"' + +# IT4I-specific settings + +#configopts += '--enable-shared ' +configopts += ' --enable-mpi-thread-multiple' +configopts += ' --with-verbs' +configopts += ' --enable-mpirun-prefix-by-default' +configopts += ' --with-hwloc=$EBROOTHWLOC' # hwloc support +configopts += ' --with-slurm ' # Enable slurm +configopts += ' --enable-mpi-cxx' # Enable building the C++ MPI bindings +configopts += ' --with-ucx=$EBROOTUCX' +configopts += ' --with-pmix=/opt/it4i-libs/PMIx/4.2.6' + +osdependencies = [('libibverbs-dev', 'libibverbs-devel', 'rdma-core-devel')] + +postinstallcmds = [ + 'echo "# By default, for Open MPI 4.0 and later, infiniband ports on a device are not used by default." >> %(installdir)s/etc/openmpi-mca-params.conf', + 'echo "btl_openib_allow_ib = true" >> %(installdir)s/etc/openmpi-mca-params.conf', +] + +local_libs = ["mpi_mpifh", "mpi", "ompitrace", "open-pal", "open-rte"] +sanity_check_paths = { + 'files': [ + "bin/%s" % + binfile for binfile in [ + "ompi_info", "opal_wrapper", "orterun"]] + [ + "lib/lib%s.%s" % + (libfile, SHLIB_EXT) for libfile in local_libs] + [ + "include/%s.h" % + x for x in [ + "mpi-ext", "mpif-config", "mpif", "mpi", "mpi_portable_platform"]], 'dirs': [], } + +import os +if os.environ.get("CLUSTERNAME") in ["BARBORA"]: + modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx5_0', + 'OMPI_MCA_btl_tcp_if_include': '10.33.4.0/24', + 'OMPI_MCA_orte_base_help_aggregate': '0', + 'SLURM_MPI_TYPE': 'pmix_v4', + } +elif os.environ.get("CLUSTERNAME") in ["KAROLINA"]: + modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx5_0', + 'OMPI_MCA_orte_base_help_aggregate': '0', + 'SLURM_MPI_TYPE': 'pmix_v4', + } +else: + modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx4_0', + 'OMPI_MCA_oob_tcp_if_include': '10.0.0.0/8', + 'SLURM_MPI_TYPE': 'pmix_v4', + } + +moduleclass = 'mpi'