# IT4Innovations 2021
# LK JK

name = 'OpenMPI'
version = '4.0.5'
versionsuffix = '-CUDA-11.2.2-test'

homepage = 'https://www.open-mpi.org/'
description = """The Open MPI Project is an open source MPI-3 implementation."""

toolchain = {'name': 'NVHPC', 'version': '21.2'}

source_urls = ['https://www.open-mpi.org/software/ompi/v%(version_major_minor)s/downloads']
sources = [SOURCELOWER_TAR_GZ]

dependencies = [
    ('UCX', '1.9.0', '-CUDA-11.2.2'),
    ('CUDAcore', '11.2.2', '', True),
    ('GDRCopy', '2.1', '-CUDA-11.2.2'),
]

preconfigopts = 'export CC="pgcc -noswitcherror" && export CXX="pgc++ -noswitcherror" && export FC="pgfortran -noswitcherror" && '

configopts = '--enable-shared --enable-mpi-thread-multiple --without-verbs '
configopts += '--with-hwloc=$EBROOTHWLOC '  # hwloc support
configopts += '--enable-mpirun-prefix-by-default '
configopts += '--with-slurm ' # Enable slurm
configopts += '--enable-mpi-cxx '  # Enable building the C++ MPI bindings
configopts += '--with-ucx=$EBROOTUCX '
configopts += '--with-cuda=$EBROOTCUDACORE '
configopts += ' --with-pmix=/opt/it4i-libs/PMIx/4.2.6'

import os
if os.environ.get("CLUSTERNAME") in ["BARBORA"]:
   modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx5_0',
                'OMPI_MCA_btl_tcp_if_include': '10.33.4.0/24',
                'OMPI_MCA_orte_base_help_aggregate': '0',
                'SLURM_MPI_TYPE': 'pmix_v4',
   }
elif os.environ.get("CLUSTERNAME") in ["KAROLINA"]:
   modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx5_0',
                'OMPI_MCA_orte_base_help_aggregate': '0',
                'SLURM_MPI_TYPE': 'pmix_v4',
   }
else:
   modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx4_0',
                'OMPI_MCA_oob_tcp_if_include': '10.0.0.0/8',
                'SLURM_MPI_TYPE': 'pmix_v4',
   }

moduleclass = 'mpi'