easyconfigs-it4i/m/MPI_NET/mpi.net-1.2.0-unsafe.patch
2017-03-09 11:57:40 +01:00

2685 lines
138 KiB
Diff

diff --git a/MPI/CustomUnsafe.cs b/MPI/CustomUnsafe.cs
new file mode 100644
index 0000000..3ce52f2
--- /dev/null
+++ b/MPI/CustomUnsafe.cs
@@ -0,0 +1,2614 @@
+/* Copyright (C) 2007 The Trustees of Indiana University
+ *
+ * Use, modification and distribution is subject to the Boost Software
+ * License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Authors: Douglas Gregor
+ * Andrew Lumsdaine
+ *
+ * This file provides an interface to the native C MPI from within C#,
+ * exposing all of the various functions and constants that are part
+ * of MPI. Since the MPI standard provides an API and not an ABI, the
+ * contents of this file are tied specifically to Microsoft's MPI
+ * implementation, MS-MPI, for which this file can be used in 32- or
+ * 64-bit Windows environments without any additional customization,
+ * built with the provided Visual Studio projects.
+ *
+ * For all other environments and MPI implementations, the data types
+ * and constants must be changed to suit the environment. These
+ * changes occur in three places. First, each of the MPI.NET source
+ * files contains a block of MPI data type definitions that are
+ * controlled by various preprocessor defines (e.g.,
+ * MPI_HANDLES_ARE_POINTERS); the configure script will select the
+ * appropriate set of defines when it runs. Second, when using the
+ * configure script, this file will be translated at build time into
+ * CustomUnsafe.cs by Unsafe.pl. This Perl script parses mpi.h, then
+ * updates the constants in this file to reflect the constants
+ * provided by the MPI implementation. This process may also involve
+ * creating a small C shared library that helps map some additional
+ * constants into C# (see the generated cbridge.c and its library,
+ * libmpinet). Finally, the configure script generates a file
+ * MPI.dll.config, which is a Mono-specific file that remaps some of
+ * the DLL and MPI names to platform-specific versions. This file is
+ * also generated by the configure script.
+ */
+using System;
+using System.Runtime.InteropServices;
+
+namespace MPI
+{
+ // MPI data type definitions
+#if MPI_HANDLES_ARE_POINTERS
+ using MPI_Aint = IntPtr;
+ using MPI_Comm = IntPtr;
+ using MPI_Datatype = IntPtr;
+ using MPI_Errhandler = IntPtr;
+ using MPI_File = IntPtr;
+ using MPI_Group = IntPtr;
+ using MPI_Info = IntPtr;
+ using MPI_Op = IntPtr;
+ using MPI_Request = IntPtr;
+ using MPI_User_function = IntPtr;
+ using MPI_Win = IntPtr;
+#else
+ using MPI_Aint = IntPtr;
+ using MPI_Comm = Int32;
+ using MPI_Datatype = Int32;
+ using MPI_Errhandler = Int32;
+ using MPI_File = IntPtr;
+ using MPI_Group = Int32;
+ using MPI_Info = Int32;
+ using MPI_Op = Int32;
+ using MPI_Request = Int32;
+ using MPI_User_function = IntPtr;
+ using MPI_Win = Int32;
+#endif
+
+ /// <summary>
+ /// Direct, low-level interface to the system MPI library.
+ /// </summary>
+ ///
+ /// <remarks>
+ /// This low-level interface provides direct access to the unmanaged
+ /// MPI library provided by the system. It is by nature unsafe, and
+ /// should only be used by programmers experienced both in the use
+ /// of MPI from lower-level languages (e.g., C, Fortran) and with an
+ /// understanding of the interaction between managed and unmanaged
+ /// code, especially those issues that pertain to memory
+ /// pinning/unpinning.
+ ///
+ /// <para>A second use of the Unsafe class is purely for documentation reasons. Each member of the
+ /// Unsafe class corresponds to an entity in the standard, C MPI, and the documentation of each member
+ /// of Unsafe will point to the corresponding functionality within MPI.NET. Programmers already familiar
+ /// with the Message Passing Interface in C can use the Unsafe class as a reference to better understand
+ /// how to apply their knowledge of C MPI to MPI.NET programs.</para>
+ /// </remarks>
+ public unsafe class Unsafe
+ {
+
+ // I did not manage to modify the Unsafe.pl script to cater for replacing OPAL_* constants. I resort to the following to get by:
+ // taken from /usr/lib/openmpi/include/mpi.h in the Debian openmpi-dev package
+ public const int OPAL_MAX_PROCESSOR_NAME = 256;
+ public const int OPAL_MAX_ERROR_STRING = 256;
+
+#if MPICH2
+ private const string MPI_DLL = "mpich2.dll";
+#else
+ private const string MPI_DLL = "msmpi.dll";
+#endif
+
+ /// <summary>
+ /// Low-level representation of the status of an MPI communication operation.
+ /// </summary>
+ /// <remarks>
+ /// This structure is used internally by MPI.NET, and most users will instead see the MPI.NET version
+ /// of the message-status class, <see cref="Status"/>. Unless you are interacting directly with the
+ /// low-level MPI interface (which is not typically recommended), use <see cref="Status"/> instead.
+ /// </remarks>
+ [StructLayout(LayoutKind.Sequential)]
+ public struct MPI_Status
+ {
+ public int MPI_SOURCE;
+ public int MPI_TAG;
+ public int MPI_ERROR;
+ internal int _cancelled;
+ internal UIntPtr _ucount;
+ }
+
+ #region Predefined communicators
+ /// <summary>
+ /// Predefined communicator containing all of the MPI processes. See <see cref="Communicator.world"/>.
+ /// </summary>
+ public static readonly MPI_Comm MPI_COMM_WORLD = mpinet_MPI_COMM_WORLD();
+
+ /// <summary>
+ /// Predefined communicator containing only the calling process. See <see cref="Communicator.self"/>.
+ /// </summary>
+ public static readonly MPI_Comm MPI_COMM_SELF = mpinet_MPI_COMM_SELF();
+
+ /// <summary>
+ /// Predefined communicator representing "no communicator". In the higher-level interface, this
+ /// is represented by a <c>null</c> <see cref="Communicator"/> object.
+ /// </summary>
+ public static readonly MPI_Comm MPI_COMM_NULL = mpinet_MPI_COMM_NULL();
+ #endregion
+
+ #region Built-in data types
+ /// <summary>
+ /// A single character. There is no equivalent to this type in C# or .NET.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_CHAR = mpinet_MPI_CHAR();
+
+ /// <summary>
+ /// A single, signed character. This is equivalent to the <c>sbyte</c> type
+ /// in C# and the <c>System.SByte</c> type in .NET.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_SIGNED_CHAR = mpinet_MPI_SIGNED_CHAR();
+
+ /// <summary>
+ /// A single, unsigned character. There is no equivalent to this type in C# or .NET.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_UNSIGNED_CHAR = mpinet_MPI_UNSIGNED_CHAR();
+
+ /// <summary>
+ /// A single byte. This is equivalent to the <c>byte</c> type
+ /// in C# and the <c>System.Byte</c> type in .NET.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_BYTE = mpinet_MPI_BYTE();
+
+ /// <summary>
+ /// A single, wide character. The equivalent is <c>char</c> in C# and <c>System.Char</c> in .NET.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_WCHAR = mpinet_MPI_WCHAR();
+
+ /// <summary>
+ /// A signed short integer. This is equivalent to the <c>short</c> type in C# and
+ /// <c>System.Int16</c> in .NET.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_SHORT = mpinet_MPI_SHORT();
+
+ /// <summary>
+ /// An unsigned short integer. This is equivalent to the <c>ushort</c> type in C# and
+ /// <c>System.UInt16</c> in .NET.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_UNSIGNED_SHORT = mpinet_MPI_UNSIGNED_SHORT();
+
+ /// <summary>
+ /// A signed integer. This is equivalent to the <c>int</c> type in C# and
+ /// <c>System.Int32</c> in .NET.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_INT = mpinet_MPI_INT();
+
+ /// <summary>
+ /// An unsigned integer. This is equivalent to the <c>uint</c> type in C# and
+ /// <c>System.UInt32</c> in .NET.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_UNSIGNED = mpinet_MPI_UNSIGNED();
+
+ /// <summary>
+ /// A long signed integer. There is no equivalent in C# or .NET, because the 64-bit
+ /// integer in C# and .NET is mapped to <see cref="MPI_LONG_LONG_INT"/>.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_LONG = mpinet_MPI_LONG();
+
+ /// <summary>
+ /// A long unsigned integer. There is no equivalent in C# or .NET, because the 64-bit
+ /// unsigned integer in C# and .NET is mapped to <see cref="MPI_UNSIGNED_LONG_LONG"/>.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_UNSIGNED_LONG = mpinet_MPI_UNSIGNED_LONG();
+
+ /// <summary>
+ /// A single-precision floating-point value. The equivalent is <c>float</c> in C#
+ /// and <c>System.Single</c> in .NET.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_FLOAT = mpinet_MPI_FLOAT();
+
+ /// <summary>
+ /// A double-precision floating-point value. The equivalent is <c>double</c> in C#
+ /// and <c>System.Double</c> in .NET.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_DOUBLE = mpinet_MPI_DOUBLE();
+
+ /// <summary>
+ /// An extended-precision floating-point value. There is no equivalent in C# or .NET.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_LONG_DOUBLE = mpinet_MPI_LONG_DOUBLE();
+
+ /// <summary>
+ /// A long long signed integer. The equivalent is <c>long</c> in C# and
+ /// <c>System.Int64</c> in .NET. This is a synonym for <see cref="MPI_LONG_LONG"/>.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_LONG_LONG_INT = mpinet_MPI_LONG_LONG_INT();
+
+ /// <summary>
+ /// A long long unsigned integer. The equivalent is <c>ulong</c> in C# and
+ /// <c>System.UInt64</c> in .NET.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_UNSIGNED_LONG_LONG = mpinet_MPI_UNSIGNED_LONG_LONG();
+
+ /// <summary>
+ /// A long long signed integer. The equivalent is <c>long</c> in C# and
+ /// <c>System.Int64</c> in .NET. This is a synonym for <see cref="MPI_LONG_LONG_INT"/>.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_LONG_LONG = mpinet_MPI_LONG_LONG();
+
+ /// <summary>
+ /// A special data type used to indicate data that has been packed with <see cref="MPI_Pack"/>.
+ /// This type is only used by the lowest-level MPI operations. The .NET equivalent is the
+ /// <see cref="DatatypeCache.Packed"/> type.
+ /// </summary>
+ public static readonly MPI_Datatype MPI_PACKED = mpinet_MPI_PACKED();
+
+ /// <summary>
+ /// A special datatype value that indicates "no datatype".
+ /// </summary>
+ public static readonly MPI_Datatype MPI_DATATYPE_NULL = mpinet_MPI_DATATYPE_NULL();
+ #endregion
+
+ #region Comparison constants
+ /// <summary>
+ /// Constant used in comparisons of MPI objects to denote that two objects are identical.
+ /// See <see cref="Comparison.Identical"/>.
+ /// </summary>
+ public const int MPI_IDENT = 0;
+
+ /// <summary>
+ /// Constant used in comparisons of MPI objects to denote that two objects are congruent,
+ /// meaning that the objects act the same way but are not identical.
+ /// See <see cref="Comparison.Congruent"/>.
+ /// </summary>
+ public const int MPI_CONGRUENT = 1;
+
+ /// <summary>
+ /// Constant used in comparisons of MPI objects to denote that two objects are similar,
+ /// but assign different ranks to each of the processes.
+ /// See <see cref="Comparison.Similar"/>.
+ /// </summary>
+ public const int MPI_SIMILAR = 2;
+
+ /// <summary>
+ /// Constant used in comparisons of MPI objects to denote that two objects are completely
+ /// different.
+ /// See <see cref="Comparison.Unequal"/>.
+ /// </summary>
+ public const int MPI_UNEQUAL = 3;
+ #endregion
+
+ /// <summary>
+ /// A special marker used for the "buf" parameter to point-to-point operations
+ /// and some collectives that indicates that the derived datatype contains absolute
+ /// (rather than relative) addresses. The use of <c>MPI_BOTTOM</c> is not recommended.
+ /// This facility is unused in C# and .NET.
+ /// </summary>
+ public static unsafe IntPtr MPI_BOTTOM = new IntPtr(0);
+
+ /// <summary>
+ /// Special value for the source or dest argument to any communication operation,
+ /// which indicates that the communication is a no-op. Not supported in MPI.NET.
+ /// </summary>
+ public const int MPI_PROC_NULL = -2 ;
+
+ /// <summary>
+ /// Special value used for Intercommunicator collective operations which indicates the root
+ /// process for a collective operation.
+ /// </summary>
+ public const int MPI_ROOT = -4 ;
+
+ /// <summary>
+ /// Constant used to indicate that the <c>MPI_Status</c> argument of an MPI
+ /// operation will be ignored.
+ /// </summary>
+ public static readonly MPI_Status* MPI_STATUS_IGNORE = ((MPI_Status *) 0);
+
+ /// <summary>
+ /// Constant used to indicate that the array of <c>MPI_Status</c> arguments
+ /// to an MPI operation will be ignored.
+ /// </summary>
+ public static readonly MPI_Status* MPI_STATUSES_IGNORE = ((MPI_Status *) 0);
+
+ /// <summary>
+ /// An empty group containing no processes. See <see cref="Group.empty"/>.
+ /// </summary>
+ public static readonly MPI_Group MPI_GROUP_EMPTY = mpinet_MPI_GROUP_EMPTY();
+
+ /// <summary>
+ /// A constant used to indicate the "null" group of processes. Corresponds to a null <see cref="Group"/>.
+ /// </summary>
+ public static readonly MPI_Group MPI_GROUP_NULL = mpinet_MPI_GROUP_NULL();
+
+ /// <summary>
+ /// A special info key used to indicate that no extra information is being
+ /// passed into a routine.
+ /// </summary>
+ public static readonly MPI_Info MPI_INFO_NULL = mpinet_MPI_INFO_NULL();
+
+ /// <summary>
+ /// "Undefined" value used to identify when a rank is not a part of a group.
+ /// See <see cref="Group.NoProcess"/>.
+ /// </summary>
+ public const int MPI_UNDEFINED = -32766 ;
+
+ /// <summary>
+ /// A constant used to indicate whether a communicator has a Cartesian topology.
+ /// </summary>
+ public const int MPI_CART = 1 ;
+
+ /// <summary>
+ /// A constant used to indicate whether a communicator has a Graph topology.
+ /// </summary>
+ public const int MPI_GRAPH = 2 ;
+
+ #region Datatype constructors
+ /// <summary>
+ /// Creates a new datatype from a contiguous block of values
+ /// of the same type. Not used by MPI.NET.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Type_contiguous(int count, MPI_Datatype oldtype, out MPI_Datatype newtype);
+
+ /// <summary>
+ /// Creates a new datatype from a strided block of values of
+ /// the same type. Not used by MPI.NET.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Type_vector(int count, int blocklength, int stride, MPI_Datatype oldtype, out MPI_Datatype newtype);
+
+ /// <summary>
+ /// Creates a new datatype from a strided block of values of
+ /// the same type. Not used by MPI.NET.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Type_hvector(int count, int blocklength, MPI_Aint stride, MPI_Datatype oldtype, out MPI_Datatype newtype);
+
+ /// <summary>
+ /// Creates a new datatype from discontiguous blocks of values
+ /// of the same type. Not used by MPI.NET.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Type_indexed(int count, int[] array_of_blocklengths, int[] array_of_displacements, MPI_Datatype oldtype, out MPI_Datatype newtype);
+
+ /// <summary>
+ /// Creates a new datatype from discontiguous blocks of values
+ /// of the same type. Not used by MPI.NET.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Type_hindexed(int count, int[] array_of_blocklengths, MPI_Aint[] array_of_displacements, MPI_Datatype oldtype, out MPI_Datatype newtype);
+
+ /// <summary>
+ /// Creates a new datatype from a structure containing
+ /// discontiguous blocks of different types.
+ /// This is the most general type constructor, and is used by
+ /// the <see cref="DatatypeCache"/> to
+ /// create MPI datatypes from .NET value types.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Type_struct(int count, int[] array_of_blocklengths, MPI_Aint[] array_of_displacements, MPI_Datatype[] array_of_types, out MPI_Datatype newtype);
+ #endregion
+
+ #region Pack and unpack
+ /// <summary>
+ /// Packs (serializes) data into a byte buffer. This serialized representation can be transmitted via MPI
+ /// with the datatype <see cref="MPI_PACKED"/>and unpacked with <see cref="MPI_Unpack"/>. Serialization
+ /// in MPI.NET is automatic, so this routine is very rarely used.
+ /// </summary>
+ /// <param name="inbuf">
+ /// Pointer to the input buffer, containing <paramref name="incount"/> values with the MPI datatype
+ /// <paramref name="datatype"/>.
+ /// </param>
+ /// <param name="incount">The number of values in <paramref name="inbuf"/>.</param>
+ /// <param name="datatype">The MPI datatype of the values in <paramref name="inbuf"/>.</param>
+ /// <param name="outbuf">
+ /// A pointer to the buffer of bytes into which we will be packing the serialized representation
+ /// of the data in <paramref name="inbuf"/>.
+ /// </param>
+ /// <param name="outsize">The size (in bytes) of <paramref name="outbuf"/>.</param>
+ /// <param name="position">
+ /// A pointer to the position (in bytes) into <paramref name="outbuf"/> where the packed data
+ /// will be written. This position will be updated to the next available position in the buffer
+ /// once the serialized data has been written.
+ /// </param>
+ /// <param name="comm">The communicator over which the packed data will be sent.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Pack(IntPtr inbuf, int incount, MPI_Datatype datatype, IntPtr outbuf, int outsize, ref int position, MPI_Comm comm);
+
+ /// <summary>
+ /// Unpacks (deserializes) data from a byte buffer. The serialized representation will have been
+ /// packed by <see cref="MPI_Pack"/> and (possibly) transmitted via MPI using the datatype
+ /// <see cref="MPI_PACKED"/>. Serialization in MPI.NET is automatic, so this routine is very
+ /// rarely used.
+ /// </summary>
+ /// <param name="inbuf">A pointer to the buffer of bytes that will be unpacked.</param>
+ /// <param name="insize">The number of bytes in <paramref name="inbuf"/>.</param>
+ /// <param name="position">
+ /// A pointer to the position (in bytes) inside the buffer from which data will be unpacked.
+ /// This position will be updated to reflect the position of the next value in the buffer
+ /// after the data has been unpacked.
+ /// </param>
+ /// <param name="outbuf">
+ /// A pointer to the buffer where the unpacked data will be written. This buffer contains
+ /// values whose MPI datatype is <paramref name="datatype"/>.
+ /// </param>
+ /// <param name="outcount">Number of values that will be stored into <paramref name="outbuf"/>.</param>
+ /// <param name="datatype">The type of data in <paramref name="outbuf"/>.</param>
+ /// <param name="comm">
+ /// The communicator for which the packing was performed (and which may have been used to transmit the
+ /// serialized data).
+ /// </param>
+ /// <returns></returns>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Unpack(IntPtr inbuf, int insize, ref int position, IntPtr outbuf, int outcount, MPI_Datatype datatype, MPI_Comm comm);
+
+ /// <summary>
+ /// Determine the maximum amount of space that packing <paramref name="incount"/> values with the
+ /// MPI datatype <paramref name="datatype"/> will require. This routine is useful for allocating
+ /// buffer space when packing data with <see cref="MPI_Pack"/>.
+ /// </summary>
+ /// <param name="incount">
+ /// The number of elements of type <paramref name="datatype"/> to be packed.
+ /// </param>
+ /// <param name="datatype">The type of data to be packed.</param>
+ /// <param name="comm">The communicator over which the packed data would be transmitted.</param>
+ /// <param name="size">
+ /// A pointer to an integer. This integer will receive the maximum number of bytes required to
+ /// pack the data. However, it is possible that when calling <see cref="MPI_Pack"/>, fewer bytes
+ /// will be required to pack the actual data.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Pack_size(int incount, MPI_Datatype datatype, MPI_Comm comm, out int size);
+ #endregion
+
+ #region Address and extent functions
+ /// <summary>
+ /// Converts a pointer into an address for use with MPI. In many cases, this operation is simply a
+ /// cast from the pointer's value to an integer.
+ /// </summary>
+ /// <param name="location">A pointer to the memory whose address will be returned.</param>
+ /// <param name="address">
+ /// A pointer to the integer address value that will be replaced with the address pointed to
+ /// by <paramref name="location"/>.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Address(IntPtr location, out MPI_Aint address);
+
+ /// <summary>
+ /// Determines the extent of the datatype.
+ /// </summary>
+ /// <param name="datatype">The datatype to query.</param>
+ /// <param name="extent">Receives the extent of <paramref name="datatype"/>.</param>
+ /// <returns></returns>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Type_extent(MPI_Datatype datatype, out MPI_Aint extent);
+
+ /// <summary>
+ /// Computes the size of a datatype.
+ /// </summary>
+ /// <param name="datatype">The MPI datatype.</param>
+ /// <param name="size">
+ /// Pointer to an integer, which will be assigned the size of the data type (in bytes).
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Type_size(MPI_Datatype datatype, out int size);
+ #endregion
+
+ #region Commit and free
+ /// <summary>
+ /// Completes creation of an MPI datatype. This routine will be called
+ /// automatically when the MPI datatype is being generated via reflection
+ /// in <see cref="DatatypeCache.GetDatatype"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Type_commit(ref MPI_Datatype datatype);
+
+ /// <summary>
+ /// Frees an MPI datatype.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Type_free(ref MPI_Datatype datatype);
+ #endregion
+
+ #region Group accessors
+ /// <summary>
+ /// Determine the number of processes in a group. See <see cref="Group.Size"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Group_size(MPI_Group group, out int size);
+
+ /// <summary>
+ /// Determine the rank of the calling process in a group. See <see cref="Group.Rank"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Group_rank(MPI_Group group, out int rank);
+
+ /// <summary>
+ /// Translate the ranks of processes in one group into those processes' corresponding
+ /// ranks in another group. See <see cref="Group.TranslateRanks"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Group_translate_ranks (MPI_Group group1, int n, int[] ranks1, MPI_Group group2, int[] ranks2);
+
+ /// <summary>
+ /// Compare two groups. See <see cref="Group.Compare"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Group_compare(MPI_Group group1, MPI_Group group2, out int result);
+ #endregion
+
+ #region Group constructors
+ /// <summary>
+ /// Retrieve the group associated with a communicator. See <see cref="Communicator.Group"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Comm_group(MPI_Comm comm, out MPI_Group group);
+
+ /// <summary>
+ /// Create a group from the union of two groups. See <see cref="Group.operator|"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Group_union(MPI_Group group1, MPI_Group group2, out MPI_Group newgroup);
+
+ /// <summary>
+ /// Create a group from the intersection of two groups. See <see cref="Group.operator&amp;"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Group_intersection(MPI_Group group1, MPI_Group group2, out MPI_Group newgroup);
+
+ /// <summary>
+ /// Create a group from the difference of two groups. See <see cref="Group.operator-"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Group_difference(MPI_Group group1, MPI_Group group2, out MPI_Group newgroup);
+
+ /// <summary>
+ /// Create a subgroup containing the processes with specific ranks in an existing group.
+ /// See <see cref="Group.IncludeOnly"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Group_incl(MPI_Group group, int n, int[] ranks, out MPI_Group newgroup);
+
+ /// <summary>
+ /// Create a subgroup containing all processes in existing group except those specified.
+ /// See <see cref="Group.Exclude"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Group_excl(MPI_Group group, int n, int[] ranks, out MPI_Group newgroup);
+
+ /// <summary>
+ /// Create a subgroup of processes in a group, based on a set of (first, last, stride) rank triples.
+ /// Note: this precise functionality is not exposed directly in the normal MPI layer; however, the
+ /// same semantics can be attained with <see cref="Group.IncludeOnly"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Group_range_incl(MPI_Group group, int n, int[] ranges, out MPI_Group newgroup);
+
+ /// <summary>
+ /// Create a subgroup of processes containing all of the processes in the source group except those described
+ /// by one of th provided(first, last, stride) rank triples.
+ /// Note: this precise functionality is not exposed directly in the normal MPI layer; however, the
+ /// same semantics can be attained with <see cref="Group.Exclude"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Group_range_excl(MPI_Group group, int n, int[] ranges, out MPI_Group newgroup);
+ #endregion
+
+ #region Group destructors
+ /// <summary>
+ /// Frees a group. This routine will be invoked automatically by <see cref="Group.Dispose"/>
+ /// or the finalizer for <see cref="Group"/>.
+ /// </summary>
+ /// <param name="group"></param>
+ /// <returns></returns>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Group_free(ref MPI_Group group);
+ #endregion
+
+ #region Communicator accessors
+ /// <summary>
+ /// Determines the number of processes in the communicator. See <see cref="Communicator.Size"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Comm_size(MPI_Comm comm, out int size);
+
+ /// <summary>
+ /// Determines the rank of the calling process in the communicator. See <see cref="Communicator.Rank"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Comm_rank(MPI_Comm comm, out int rank);
+
+ /// <summary>
+ /// Compare two communicators. See <see cref="Communicator.Compare"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, out int result);
+ #endregion
+
+ #region Communicator constructors
+ /// <summary>
+ /// Duplicates a communicator, creating a new communicator with the same processes and ranks
+ /// as the original. See <see cref="Communicator.Clone"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Comm_dup(MPI_Comm comm, out MPI_Comm comm_out);
+
+ /// <summary>
+ /// Creates a new communicator from a subgroup of the processes in an existing communicator.
+ /// See <see cref="Communicator.Create"/>.
+ /// </summary>
+ /// <param name="comm">
+ /// The existing communicator, from which the new communicator will be created.
+ /// </param>
+ /// <param name="group">
+ /// A group specifying zero or more processes in the communicator <paramref name="comm"/>.
+ /// </param>
+ /// <param name="newcomm">
+ /// A pointer that points to the newly-created communicator, which will contain all of
+ /// the processes in the group. The order of the processes in this new communicator will
+ /// be the same as the order of those processes in the original communicator.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Comm_create(MPI_Comm comm, MPI_Group group, out MPI_Comm newcomm);
+
+ /// <summary>
+ /// Splits a communicator into several new communicators, based on the colors provided.
+ /// See <see cref="Communicator.Split"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Comm_split(MPI_Comm comm, int color, int key, out MPI_Comm newcomm);
+
+ /// <summary>
+ /// Creates a new Cartesian communicator from another
+ /// communicator. See <see
+ /// cref="CartesianCommunicator(MPI.Intracommunicator, int, int[], bool[], bool)"/>.
+ /// </summary>
+ /// <param name="comm">
+ /// Existing communicator from which the new communicator will be created.
+ /// </param>
+ /// <param name="ndims">
+ /// Number of dimensions for Cartesian communicator's grid to have.
+ /// </param>
+ /// <param name="dims">
+ /// Array to specify sizes in each dimension.
+ /// </param>
+ /// <param name="periods">
+ /// Array of logical values (0s and 1s) indicating whether grid should be periodic in each dimension
+ /// (i.e. if the last communicator and the first in each dimension are directly connected).
+ /// </param>
+ /// <param name="reorder">
+ /// Logical value indicating whether ranks may be reordered or not.
+ /// </param>
+ /// <param name="newcomm">
+ /// Output parameter for new communicator.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Cart_create(MPI_Comm comm, int ndims, int* dims, int* periods, int reorder, MPI_Comm* newcomm);
+
+ /// <summary>
+ /// Create a lesser dimensional grid from an existing
+ /// Cartesian communicator. See <see
+ /// cref="CartesianCommunicator.Subgrid"/>.
+ /// </summary>
+ /// <param name="comm">
+ /// The existing communicator.
+ /// </param>
+ /// <param name="remain_dims">
+ /// A logical array indicating whether a dimension in the existing should be kept (1) or dropped (0).
+ /// </param>
+ /// <param name="newcomm">
+ /// The new communicator.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Cart_sub(MPI_Comm comm, int* remain_dims, MPI_Comm* newcomm);
+
+ /// <summary>
+ /// Create a topological communicator with a graph topology,
+ /// where any rank can be connected to any other rank. See
+ /// <see
+ /// cref="GraphCommunicator(MPI.Intracommunicator, int[][], bool)"/>.
+ /// </summary>
+ /// <param name="comm">
+ /// An existing Intracommunicator to use to create the new communicator.
+ /// </param>
+ /// <param name="nnodes">
+ /// The number of nodes the graph will have.
+ /// </param>
+ /// <param name="index">
+ /// An array indicating the starting index in <paramref name="edges"/> of the edges for each vertex.
+ /// </param>
+ /// <param name="edges">
+ /// An array of edge targets, indexed by <paramref name="index"/>.
+ /// </param>
+ /// <param name="reorder">
+ /// Logical indicating whether ranks can be reordered.
+ /// </param>
+ /// <param name="newComm">
+ /// The new communicator.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Graph_create(MPI_Comm comm, int nnodes, int* index, int* edges, int reorder, MPI_Comm *newComm);
+
+ #endregion
+
+ #region Communicator destructors
+ /// <summary>
+ /// Frees a communicator. This routine will be invoked automatically by <see cref="Communicator.Dispose"/>
+ /// or the finalizer for <see cref="Communicator"/>.
+ /// </summary>
+ /// <param name="comm">The communicator to free.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Comm_free(ref MPI_Comm comm);
+ #endregion
+
+ #region Communicator attributes
+ /// <summary>
+ /// Delegate describing a low-level MPI function used to copy attribute values from a communicator
+ /// when the communicator is being duplicated.
+ /// </summary>
+ /// <remarks>
+ /// This function will
+ /// be defined automatically by MPI.NET when a new <see cref="Attribute"/> is created, and will vary
+ /// depending on the type of the attribute and on the requested <see cref="AttributeDuplication"/>.
+ /// </remarks>
+ /// <param name="comm">The communicator being duplicated.</param>
+ /// <param name="keyval">The attribute's key value.</param>
+ /// <param name="extra_state">
+ /// The extra state associated with the attribute, provided by the user
+ /// in <see cref="MPI_Keyval_create"/>.
+ /// </param>
+ /// <param name="attribute_val_in">
+ /// The attribute value in the communicator to be duplicated.
+ /// </param>
+ /// <param name="attribute_val_out">
+ /// A pointer to the attribute value that will be copied into the
+ /// new communicator. The attribute value will be no larger than an IntPtr.
+ /// The user only needs to set this value if the attribute will be copied,
+ /// as determined by flag.
+ /// </param>
+ /// <param name="flag">
+ /// Set this to a non-zero value to indicate that the attribute should
+ /// be copied.
+ /// </param>
+ public delegate int MPI_Copy_function(MPI_Comm comm, int keyval, IntPtr extra_state, IntPtr attribute_val_in,
+ IntPtr attribute_val_out, out int flag);
+
+ /// <summary>
+ /// Delegate describing a low-level MPI function that takes care of de-allocating
+ /// an attribute when it is deleted from a communicator (or the communicator itself
+ /// is freed).
+ /// </summary>
+ ///
+ /// <remarks>
+ /// Often used when the attribute's value is a pointer to some per-communicator
+ /// data, and the pointer needs to be freed. This function will be defined automatically
+ /// by MPI.NET when a new <see cref="Attribute"/> is created, and will vary
+ /// depending on the type of the attribute and on the requested <see cref="AttributeDuplication"/>.
+ /// </remarks>
+ /// <param name="comm">The communicator.</param>
+ /// <param name="keyval">The attribute being removed from the communicator.</param>
+ /// <param name="attribute_val">The value of this attribute in the communicator.</param>
+ /// <param name="extra_state">
+ /// The extra state provided by the user in <see cref="MPI_Keyval_create"/>.
+ /// </param>
+ public delegate int MPI_Delete_function(MPI_Comm comm, int keyval, IntPtr attribute_val, IntPtr extra_state);
+
+ /// <summary>
+ /// Special key value that indicates an invalid key.
+ /// </summary>
+ public const int MPI_KEYVAL_INVALID = -1 ;
+
+ /// <summary>
+ /// Special "null" copy function that indicates that an attribute should not be copied.
+ /// </summary>
+ public static readonly MPI_Copy_function MPI_NULL_COPY_FN = mpinet_MPI_NULL_COPY_FN();
+
+ /// <summary>
+ /// Special "null" deletion function that indicates that no delete function should
+ /// be called when an attribute is removed from a communicator.
+ /// </summary>
+ public static readonly MPI_Delete_function MPI_NULL_DELETE_FN = mpinet_MPI_NULL_DELETE_FN();
+
+ /// <summary>
+ /// Creates a new MPI attribute that can be attached to communicators. This routine is
+ /// automatically involved by MPI.NET when a new <see cref="Attribute"/> is created.
+ /// See <see cref="MPI.Attribute.Create"/>
+ /// </summary>
+ /// <param name="copy_fn">
+ /// The function used to copy the attribute when a communicator is
+ /// duplicated.
+ /// </param>
+ /// <param name="delete_fn">
+ /// The function used to delete the attribute when it is removed
+ /// from a communicator.
+ /// </param>
+ /// <param name="keyval">
+ /// Will receive a new integer that identifies this attribute.
+ /// </param>
+ /// <param name="extra_state">
+ /// A user-defined pointer that includes extra information to be
+ /// passed to the copy and delete functions. This pointer can
+ /// be used to store more information about the attribute itself.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static extern unsafe int MPI_Keyval_create(MPI_Copy_function copy_fn, MPI_Delete_function delete_fn, out int keyval, IntPtr extra_state);
+
+ /// <summary>
+ /// Frees an attribute with the given key value. The user must ensure that
+ /// this attribute has been deleted from all communicators before calling
+ /// this routine. This operation is performed by <see cref="Attribute.Dispose"/>.
+ /// </summary>
+ /// <param name="keyval">The attribute's key value.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static extern unsafe int MPI_Keyval_free(ref int keyval);
+
+ /// <summary>
+ /// Sets the value of an attribute on a communicator. Attributes in MPI.NET are
+ /// handled through the <see cref="AttributeSet"/> class, an instance of which is associated
+ /// with each communicator.
+ /// </summary>
+ /// <param name="comm">The communicator.</param>
+ /// <param name="keyval">The attribute to set.</param>
+ /// <param name="attribute_val">The new value to place into the communicator.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static extern unsafe int MPI_Attr_put(MPI_Comm comm, int keyval, IntPtr attribute_val);
+
+ /// <summary>
+ /// Retrieves the value of an attribute on a communicator. Attributes in MPI.NET are
+ /// handled through the <see cref="AttributeSet"/> class, an instance of which is associated
+ /// with each communicator.
+ /// </summary>
+ /// <param name="comm">The communicator.</param>
+ /// <param name="keyval">The attribute to get.</param>
+ /// <param name="attribute_val">
+ /// Will receive the value stored for this attribute on this
+ /// communicator, if one exists. The result is only valid
+ /// if <paramref name="flag"/> is non-zero.
+ /// </param>
+ /// <param name="flag">
+ /// Will receive a boolean value stating whether the attribute is stored
+ /// with this communicator.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static extern unsafe int MPI_Attr_get(MPI_Comm comm, int keyval, IntPtr attribute_val, out int flag);
+
+ /// <summary>
+ /// Deletes an attribute stored on the communicator. Attributes in MPI.NET are
+ /// handled through the <see cref="AttributeSet"/> class, an instance of which is associated
+ /// with each communicator.
+ /// </summary>
+ /// <param name="comm">The communicator.</param>
+ /// <param name="keyval">The attribute to delete.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static extern unsafe int MPI_Attr_delete(MPI_Comm comm, int keyval);
+ #endregion
+
+ #region Intercommunicator accessors
+ /// <summary>
+ /// Determine whether a communicator is an intercommunicator. In MPI.NET, intercommunicators
+ /// will have type <see cref="MPI.Intercommunicator"/>.
+ /// </summary>
+ /// <param name="comm">The communicator.</param>
+ /// <param name="flag">
+ /// Pointer to a flag, which will be set to a non-zero value if <paramref name="comm"/>
+ /// is an intercommunicator.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Comm_test_inter(MPI_Comm comm, out int flag);
+
+ /// <summary>
+ /// Determines the number of processes in the remote group of an intercommunicator.
+ /// See <see cref="Intercommunicator.RemoteSize"/>.
+ /// </summary>
+ /// <param name="comm">The intercommunicator.</param>
+ /// <param name="size">
+ /// Will receive the number of processes in the remote group of
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Comm_remote_size(MPI_Comm comm, out int size);
+
+ /// <summary>
+ /// Retrieves the remote group from an intercommunicator.
+ /// See <see cref="Intercommunicator.RemoteGroup"/>.
+ /// </summary>
+ /// <param name="comm">The intercommunicator.</param>
+ /// <param name="group">
+ /// Will receive the group containing all of the processes in the remote group
+ /// of <paramref name="comm"/>.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Comm_remote_group(MPI_Comm comm, out MPI_Group group);
+
+ /// <summary>
+ /// Create a new intercommunicator from two disjoint intracommunicators.
+ /// See <see cref="Intercommunicator(Intracommunicator, int, Intracommunicator, int, int)"/>.
+ /// </summary>
+ /// <param name="local_comm">The local communicator.</param>
+ /// <param name="local_leader">The leader of the local communicator.</param>
+ /// <param name="bridge_comm">
+ /// Communicator that bridges the intercommunicators, allowing the leaders to communicate.
+ /// </param>
+ /// <param name="remote_leader">
+ /// The rank of the remote group's leader within <paramref name="bridge_comm"/>.
+ /// </param>
+ /// <param name="tag">Tag used for communication to create the intercommunicator.</param>
+ /// <param name="newintercomm">Will receive the new intercommunicator.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, MPI_Comm bridge_comm, int remote_leader, int tag, out MPI_Comm newintercomm);
+
+ /// <summary>
+ /// Merge the two groups in an intercommunicator into a single intracommunicator.
+ /// See <see cref="MPI.Intercommunicator.Merge"/>
+ /// </summary>
+ /// <param name="intercomm">The intercommunicator.</param>
+ /// <param name="high">
+ /// Whether this group of processes has the higher ranks in the resuling intercommunicator.
+ /// </param>
+ /// <param name="newintracomm">The resulting intracommunicator.</param>
+ /// <returns></returns>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Intercomm_merge(MPI_Comm intercomm, int high, out MPI_Comm newintracomm);
+ #endregion
+
+ #region Cartesian communicator methods
+ /// <summary>
+ /// Gets the number of dimensions in the Cartesian communicator.
+ /// See <see cref="CartesianCommunicator.Dimensions"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Cartdim_get(MPI_Comm comm, int* result);
+
+ /// <summary>
+ /// Retrieves the primary topological information on a Cartesian communicator: the number of dimensions,
+ /// the size in each dimension, the periodicity in each dimension. Also gives the coordinates of the
+ /// calling process. See <see cref="CartesianCommunicator.Dimensions"/>,
+ /// <see cref="CartesianCommunicator.Periodic"/>, and <see cref="CartesianCommunicator.Coordinates"/>.
+ /// </summary>
+ /// <param name="comm">
+ /// The communicator.
+ /// </param>
+ /// <param name="ndims">
+ /// The number of dimensions.
+ /// </param>
+ /// <param name="dims">
+ /// Output parameter for size of each dimension. Should be as long as <paramref name="ndims"/>.
+ /// </param>
+ /// <param name="periods">
+ /// Output parameter for periodicity in each dimension. 0 for false, 1 for true. Should be as long as <paramref name="ndims"/>.
+ /// </param>
+ /// <param name="coords">
+ /// Output parameter for coordinates of calling process. Should be as long as <paramref name="ndims"/>.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Cart_get(MPI_Comm comm, int ndims, int* dims, int* periods, int* coords);
+
+ /// <summary>
+ /// Determines the rank of a process in the Cartesian communicator given its coordinates.
+ /// See <see cref="CartesianCommunicator.GetCartesianRank"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Cart_rank(MPI_Comm comm, int* coords, int* rank);
+
+ /// <summary>
+ /// Determines the coordinates of a process given its rank in the Cartesian communicator.
+ /// See <see cref="CartesianCommunicator.GetCartesianCoordinates"/>.
+ /// </summary>
+ /// <param name="comm">
+ /// The communicator.
+ /// </param>
+ /// <param name="rank">
+ /// The rank of the process in the Cartesian communicator.
+ /// </param>
+ /// <param name="maxdims">
+ /// Length of <paramref name="coords"/>
+ /// </param>
+ /// <param name="coords">
+ /// Output parameter.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int* coords);
+
+ /// <summary>
+ /// Calculates the necessary source and destination ranks for shifting data over the
+ /// Cartesian communicator. See <see cref="CartesianCommunicator.Shift"/>.
+ /// </summary>
+ /// <param name="comm">
+ /// The communicator.
+ /// </param>
+ /// <param name="direction">
+ /// An integer specifying which of the dimensions along which to shift.
+ /// </param>
+ /// <param name="disp">
+ /// How far to shift (negative values means "downward," positive values "upward").
+ /// </param>
+ /// <param name="rank_source">
+ /// Output parameter for rank to receive from.
+ /// </param>
+ /// <param name="rank_dest">
+ /// Output parameter for rank to send to.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Cart_shift(MPI_Comm comm, int direction, int disp, int* rank_source, int* rank_dest);
+
+ /// <summary>
+ /// Returns a recommended configuration for a new Cartesian grid.
+ /// See <see cref="CartesianCommunicator.Map"/>.
+ /// </summary>
+ /// <param name="comm">
+ /// The existing communicator.
+ /// </param>
+ /// <param name="ndims">
+ /// The number of dimensions for the Cartesian grid.
+ /// </param>
+ /// <param name="dims">
+ /// An array of length <paramref name="ndims"/> indicating the size of the grid in each dimension.
+ /// </param>
+ /// <param name="periods">
+ /// A logical array of length <paramref name="ndims"/> indicating whether the grid is periodic in any given dimension.
+ /// </param>
+ /// <param name="newrank">
+ /// The new rank of the calling process.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Cart_map(MPI_Comm comm, int ndims, int* dims, int* periods, out int newrank);
+
+ /// <summary>
+ /// Find out the communicator topology. In MPI.NET, one queries the communicator's topology by
+ /// looking at the type, e.g., <see cref="Intracommunicator"/>, <see cref="GraphCommunicator"/>, or
+ /// <see cref="CartesianCommunicator"/>.
+ /// </summary>
+ /// <param name="comm">
+ /// The communicator.
+ /// </param>
+ /// <param name="status">
+ /// Value indicating communicator topology; one of MPI_CART, MPI_GRAPH, or MPI_UNDEFINED (if the communicator
+ /// has no topology).
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Topo_test(MPI_Comm comm, out int status);
+
+ #endregion
+
+ #region Graph communicator methods
+ /// <summary>
+ /// Retrieve the dimensions of a Graph communicator. See <see cref="GraphCommunicator.Edges"/>
+ /// and <see cref="GraphCommunicator.NumEdges"/>.
+ /// </summary>
+ /// <param name="comm">
+ /// The communicator.
+ /// </param>
+ /// <param name="nnodes">
+ /// Output parameter for the number of nodes in the graph.
+ /// </param>
+ /// <param name="nedges">
+ /// Output parameter for the number of edges in the graph.
+ /// </param>
+ /// <returns></returns>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Graphdims_get(MPI_Comm comm, out int nnodes, out int nedges);
+
+ /// <summary>
+ /// Retrieve the index and edges arrays used to create the graph communicator. See
+ /// <see cref="GraphCommunicator.Edges"/> and <see cref="GraphCommunicator.NumEdges"/>.
+ /// </summary>
+ /// <param name="comm">
+ /// The communicator.
+ /// </param>
+ /// <param name="maxindex">
+ /// The size of <paramref name="index"/>.
+ /// </param>
+ /// <param name="maxedges">
+ /// The size of <paramref name="edges"/>
+ /// </param>
+ /// <param name="index">
+ /// Output array in which to store the index array.
+ /// </param>
+ /// <param name="edges">
+ /// Output array in which to store the edges array.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Graph_get(MPI_Comm comm, int maxindex, int maxedges, int* index, int* edges);
+
+ /// <summary>
+ /// Retrieve the number of neighbors of a node. See <see cref="GraphCommunicator.Neighbors"/>
+ /// and <see cref="GraphCommunicator.NeighborsOf"/>.
+ /// </summary>
+ /// <param name="comm">
+ /// The communicator.
+ /// </param>
+ /// <param name="rank">
+ /// The rank of the node of interest.
+ /// </param>
+ /// <param name="nneighbors">
+ /// Output parameter to store the number of neighbors.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Graph_neighbors_count(MPI_Comm comm, int rank, out int nneighbors);
+
+
+ /// <summary>
+ /// Retrieve a list of the neighbors of a node. See <see cref="GraphCommunicator.Neighbors"/>
+ /// and <see cref="GraphCommunicator.NeighborsOf"/>.
+ /// </summary>
+ /// <param name="comm">
+ /// The communicator.
+ /// </param>
+ /// <param name="rank">
+ /// The rank of the node of interest.
+ /// </param>
+ /// <param name="maxneighbors">
+ /// The size of <paramref name="neighbors"/>.
+ /// </param>
+ /// <param name="neighbors">
+ /// Output array to store the list of neighbors.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Graph_neighbors(MPI_Comm comm, int rank, int maxneighbors, int* neighbors);
+
+ /// <summary>
+ /// Returns a recommended configuration for a new Graph communicator.
+ /// See <see cref="GraphCommunicator.Map"/>.
+ /// </summary>
+ /// <param name="comm">
+ /// The existing communicator.
+ /// </param>
+ /// <param name="nnodes">
+ /// The number of nodes to assume.
+ /// </param>
+ /// <param name="index">
+ /// An index array to use (where the semantics is the same as for <see cref="MPI_Graph_create"/>.
+ /// </param>
+ /// <param name="edges">
+ /// An array of edges as for the constructor <see cref="MPI_Graph_create"/>.
+ /// </param>
+ /// <param name="newrank">
+ /// The new rank of the calling process.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Graph_map(MPI_Comm comm, int nnodes, int* index, int* edges, out int newrank);
+ #endregion
+
+
+ /// <summary>
+ /// Suggest a shape for a new Cartesian communicator, given the number of dimensions.
+ /// See <see cref="CartesianCommunicator.ComputeDimensions"/>.
+ /// </summary>
+ /// <param name="nnodes">
+ /// The number of nodes the grid will contain.
+ /// </param>
+ /// <param name="ndims">
+ /// The number of dimensions the grid will have.
+ /// </param>
+ /// <param name="dims">
+ /// An array indicating the size in each dimension. Any nonzero entries will be left
+ /// alone; only 0 values will be changed to shape the grid.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Dims_create(int nnodes, int ndims, int *dims);
+
+ #region Startup
+ /// <summary>
+ /// Initializes MPI. This routine must be called before any other MPI routine.
+ /// It will be invoked by the <see cref="Environment"/> constructor.
+ /// </summary>
+ /// <param name="argc">The number of arguments in <paramref name="argv"/>.</param>
+ /// <param name="argv">
+ /// A pointer to an array of C-style strings containing all of
+ /// the command-line arguments.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Init(ref int argc, ref byte **argv);
+
+ /// <summary>
+ /// Finalizes (shuts down) MPI. This routine must be called before exiting the
+ /// program. It will be invoked by <see cref="Environment.Dispose"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Finalize();
+
+ /// <summary>
+ /// Determine whether MPI has already been initialized. See <see cref="Environment.Initialized"/>.
+ /// </summary>
+ /// <param name="flag">Will be set to a non-zero value if the MPI environment has been initialized.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Initialized(out int flag);
+
+ /// <summary>
+ /// Determine whether MPI has already been finalized. See <see cref="Environment.Finalized"/>.
+ /// </summary>
+ /// <param name="flag">Will be set to a non-zero value if the MPI environment has been finalized.</param>
+ /// <returns></returns>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Finalized(out int flag);
+
+ /// <summary>
+ /// Aborts the current MPI program. See <see cref="Environment.Abort"/>
+ /// and <see cref="Communicator.Abort"/>.
+ /// </summary>
+ /// <param name="comm">The communicator that will be used to abort the program.</param>
+ /// <param name="errcode">The error code to be returned from the MPI process.</param>
+ /// <returns>This routine does not return.</returns>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Abort(MPI_Comm comm, int errcode);
+ #endregion
+
+ #region Threading
+ /// <summary>
+ /// Indicates that the MPI program is single-threaded. See <see cref="Threading.Single"/>.
+ /// </summary>
+ public const int MPI_THREAD_SINGLE = 0;
+ /// <summary>
+ /// Indicates that the MPI program is multi-threaded, but all MPI operations will be called
+ /// from the main thread. See <see cref="Threading.Funneled"/>.
+ /// </summary>
+ public const int MPI_THREAD_FUNNELED = 1;
+ /// <summary>
+ /// Indicates that the MPI program is multi-threaded, but only one thread will call into MPI
+ /// at any given time. See <see cref="Threading.Serialized"/>.
+ /// </summary>
+ public const int MPI_THREAD_SERIALIZED = 2;
+ /// <summary>
+ /// Indicates that the MPI program is multi-threaded, and any thread can call into MPI
+ /// at any time. See <see cref="Threading.Multiple"/>.
+ /// </summary>
+ public const int MPI_THREAD_MULTIPLE = 3;
+
+ /// <summary>
+ /// Initializes the MPI library with thread support. This operation subsumes <see cref="MPI_Init"/>.
+ /// See <see cref="MPI.Environment(ref string[], MPI.Threading)"/>.
+ /// </summary>
+ /// <param name="argc">Pointer to the number of arguments passed on the command line.</param>
+ /// <param name="argv">Pointer to the command-line arguments (array of C-style strings).</param>
+ /// <param name="required">
+ /// The threading level required by the caller, which must be one of the <c>MPI_THREAD_*</c>
+ /// constants.
+ /// </param>
+ /// <param name="provided">
+ /// Returns the actual threading level that the MPI implementation is providing, which will be
+ /// one of the <c>MPI_THREAD_*</c> constants.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Init_thread(ref int argc, ref byte **argv, int required, out int provided);
+
+ /// <summary>
+ /// Determine whether the calling thread is the main MPI thread (that called <see cref="MPI_Init"/>
+ /// or <see cref="MPI_Init_thread"/>. See <see cref="MPI.Environment.IsMainThread"/>.
+ /// </summary>
+ /// <param name="flag">Returns whether the calling thread is the main thread.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Is_thread_main(out int flag);
+
+ /// <summary>
+ /// Determine the level of threading support provided by the MPI library.
+ /// See <see cref="MPI.Environment.Threading"/>.
+ /// </summary>
+ /// <param name="provided">Returns one of the <c>MPI_THREAD_*</c> constants.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Query_thread(out int provided);
+ #endregion
+
+ #region Predefined keys
+ /// <summary>
+ /// Predefined attribute key that can be used to determine the maximum
+ /// tag value that users are allowed to provide to a communication
+ /// request.
+ /// See <see cref="Environment.MaxTag"/>.
+ /// </summary>
+ public const int MPI_TAG_UB = 0;
+
+ /// <summary>
+ /// Predefined attribute key that can be used to determine the rank of
+ /// the host process associated with <see cref="MPI_COMM_WORLD"/>.
+ /// If there is no host, the result will be <see cref="MPI_PROC_NULL"/>.
+ /// See <see cref="Environment.HostRank"/>.
+ /// </summary>
+ public const int MPI_HOST = 1;
+
+ /// <summary>
+ /// Predefined attribute key that can be used to determine the rank of
+ /// the process than can perform I/O via the language-standard I/O
+ /// mechanism. If every process can provided language-standard I/O, the
+ /// resulting value will be <see cref="MPI_ANY_SOURCE"/>; if no process
+ /// can support language-standard I/O, the result will be
+ /// <see cref="MPI_PROC_NULL"/>.
+ /// See <see cref="Environment.IORank"/>.
+ /// </summary>
+ public const int MPI_IO = 2;
+
+ /// <summary>
+ /// Predefined attribute key that can be used to determine whether the
+ /// clocks (accessed via <see cref="MPI_Wtime"/>) are synchronized
+ /// across all processes.
+ /// See <see cref="Environment.IsTimeGlobal"/>.
+ /// </summary>
+ public const int MPI_WTIME_IS_GLOBAL = 3;
+ #endregion
+
+ /// <summary>
+ /// The maximum length of the string returned by <see cref="MPI_Get_processor_name"/>.
+ /// </summary>
+ public const int MPI_MAX_PROCESSOR_NAME = OPAL_MAX_PROCESSOR_NAME ;
+
+ /// <summary>
+ /// Retrieve the name of the processor or compute node that is currently executing.
+ /// See <see cref="Environment.ProcessorName"/>.
+ /// </summary>
+ /// <param name="name">
+ /// Pointer to an array of bytes that will, on return, contain the name of
+ /// the currenly executing processor. If the processor name requires more
+ /// than *<paramref name="len"/> ASCII characters, only the first
+ /// *<paramref name="len"/> characters will be written. To be sure that you
+ /// will always get the full processor name, <paramref name="name"/> should
+ /// refer to at least <see cref="MPI_MAX_PROCESSOR_NAME"/> characters.
+ /// </param>
+ /// <param name="len">
+ /// A pointer to the number of bytes in the processor name. On input, this is
+ /// the number of bytes that <paramref name="name"/> refers to. When this
+ /// function returns, this will point to the number of bytes in the actual name.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Get_processor_name(byte[] name, ref int len);
+
+ #region Error handling
+ /// <summary>
+ /// The maximum number of characters that can occur in an error string returned from
+ /// <see cref="MPI_Error_string"/>.
+ /// </summary>
+ public const int MPI_MAX_ERROR_STRING = OPAL_MAX_ERROR_STRING ;
+
+ /// <summary>
+ /// Error value indicating no error.
+ /// </summary>
+ public const int MPI_SUCCESS = 0;
+
+ /// <summary>
+ /// Error class indicating an invalid buffer pointer.
+ /// </summary>
+ public const int MPI_ERR_BUFFER = 1;
+
+ /// <summary>
+ /// Error class indicating an invalid count argument.
+ /// </summary>
+ public const int MPI_ERR_COUNT = 2;
+
+ /// <summary>
+ /// Error class indicating an invalid data type argument.
+ /// </summary>
+ public const int MPI_ERR_TYPE = 3;
+
+ /// <summary>
+ /// Error class indicating an invalid tag argument.
+ /// </summary>
+ public const int MPI_ERR_TAG = 4;
+
+ /// <summary>
+ /// Error class indicating an invalid communicator.
+ /// </summary>
+ public const int MPI_ERR_COMM = 5;
+
+ /// <summary>
+ /// Error class indicating an invalid rank.
+ /// </summary>
+ public const int MPI_ERR_RANK = 6;
+
+ /// <summary>
+ /// Error class indicating an invalid root.
+ /// </summary>
+ public const int MPI_ERR_ROOT = 8;
+
+ /// <summary>
+ /// Error class indicating that a message was truncated on receive.
+ /// </summary>
+ public const int MPI_ERR_TRUNCATE = 15;
+
+ /// <summary>
+ /// Error class indicating an invalid group argument.
+ /// </summary>
+ public const int MPI_ERR_GROUP = 9;
+
+ /// <summary>
+ /// Error class indicating an invalid operation argument.
+ /// </summary>
+ public const int MPI_ERR_OP = 10;
+
+ /// <summary>
+ /// Error class indicating an invalid request argument.
+ /// </summary>
+ public const int MPI_ERR_REQUEST = 7;
+
+ /// <summary>
+ /// Error class indicating an invalid topology for a communicator argument.
+ /// </summary>
+ public const int MPI_ERR_TOPOLOGY = 11;
+
+ /// <summary>
+ /// Error class indicating an invalid dimension argument (for cartesian communicators).
+ /// </summary>
+ public const int MPI_ERR_DIMS = 12;
+
+ /// <summary>
+ /// Error class indicating an invalid argument.
+ /// </summary>
+ public const int MPI_ERR_ARG = 13;
+
+ /// <summary>
+ /// Error class indicating an error that is know, but not described by other MPI
+ /// error classes.
+ /// </summary>
+ public const int MPI_ERR_OTHER = 16;
+
+ /// <summary>
+ /// Error class indicating that an unkown error occurred.
+ /// </summary>
+ public const int MPI_ERR_UNKNOWN = 14;
+
+ /// <summary>
+ /// Error class indicating that an internal error occurred in the MPI implementation.
+ /// </summary>
+ public const int MPI_ERR_INTERN = 17;
+
+ /// <summary>
+ /// Error class indicating that the actual error code is in the status argument.
+ /// </summary>
+ public const int MPI_ERR_IN_STATUS = 18;
+
+ /// <summary>
+ /// Error class indicating that a request is still pending.
+ /// </summary>
+ public const int MPI_ERR_PENDING = 19;
+
+ /// <summary>
+ /// Error class indicating an invalid file handle argument.
+ /// </summary>
+ public const int MPI_ERR_FILE = 30;
+
+ /// <summary>
+ /// Error class indicating that permission was denied when accessing a file.
+ /// </summary>
+ public const int MPI_ERR_ACCESS = 20;
+
+ /// <summary>
+ /// Error class indicating that the amode argument passed to MPI_File_open is invalid.
+ /// </summary>
+ public const int MPI_ERR_AMODE = 21;
+
+ /// <summary>
+ /// Error class indicating an invalid file name.
+ /// </summary>
+ public const int MPI_ERR_BAD_FILE = 23;
+
+ /// <summary>
+ /// Error class indicating that the file already exists.
+ /// </summary>
+ public const int MPI_ERR_FILE_EXISTS = 28;
+
+ /// <summary>
+ /// Error class indicating that the file is already in use.
+ /// </summary>
+ public const int MPI_ERR_FILE_IN_USE = 29;
+
+ /// <summary>
+ /// Error class indicating that there is not enough space for the file.
+ /// </summary>
+ public const int MPI_ERR_NO_SPACE = 41;
+
+ /// <summary>
+ /// Error class indicating that no such file exists.
+ /// </summary>
+ public const int MPI_ERR_NO_SUCH_FILE = 42;
+
+ /// <summary>
+ /// Error class indicating an I/O error.
+ /// </summary>
+ public const int MPI_ERR_IO = 35;
+
+ /// <summary>
+ /// Error class indicating that the file is read-only.
+ /// </summary>
+ public const int MPI_ERR_READ_ONLY = 45;
+
+ /// <summary>
+ /// Error class indicating that an error occurred in a user-supplied data conversion function.
+ /// </summary>
+ public const int MPI_ERR_CONVERSION = 25;
+
+ /// <summary>
+ /// Error class indicating that conversion functions could not be registered because a conversion
+ /// function has already been registered for this data representation identifier.
+ /// </summary>
+ public const int MPI_ERR_DUP_DATAREP = 27;
+
+ /// <summary>
+ /// Error class indicating that an unsupported data representation was passed to <c>MPI_FILE_SET_VIEW</c>.
+ /// </summary>
+ public const int MPI_ERR_UNSUPPORTED_DATAREP = 51;
+
+ /// <summary>
+ /// Error class indicating an invalid info argument.
+ /// </summary>
+ public const int MPI_ERR_INFO = 34;
+
+ /// <summary>
+ /// Error class indicating an invalid info key.
+ /// </summary>
+ public const int MPI_ERR_INFO_KEY = 31;
+
+ /// <summary>
+ /// Error class indicating an invalid info value.
+ /// </summary>
+ public const int MPI_ERR_INFO_VALUE = 33;
+
+ /// <summary>
+ /// Error class indicating that the requested info key is not defined.
+ /// </summary>
+ public const int MPI_ERR_INFO_NOKEY = 32;
+
+ /// <summary>
+ /// Error class indicating that an attempt has been made to look up a service
+ /// name that has not been published.
+ /// </summary>
+ public const int MPI_ERR_NAME = 38;
+
+ /// <summary>
+ /// Error class indicating that no memory is available when trying to allocate
+ /// memory with <c>MPI_Alloc_mem</c>.
+ /// </summary>
+ public const int MPI_ERR_NO_MEM = 39;
+
+ /// <summary>
+ /// Error class indicating that a collective argument is not the same on all processes,
+ /// or collective routines were called in a different order.
+ /// </summary>
+ public const int MPI_ERR_NOT_SAME = 40;
+
+ /// <summary>
+ /// Error class indicating that a named port does not exist or has been closed.
+ /// </summary>
+ public const int MPI_ERR_PORT = 43;
+
+ /// <summary>
+ /// Error class indicating that the user's quota has been exceeded.
+ /// </summary>
+ public const int MPI_ERR_QUOTA = 44;
+
+ /// <summary>
+ /// Error class indicating that an attempt to unpublish a service
+ /// name that has already been unpublished or never was published.
+ /// </summary>
+ public const int MPI_ERR_SERVICE = 48;
+
+ /// <summary>
+ /// Error class indicating that an attempt to spawn a process has failed.
+ /// </summary>
+ public const int MPI_ERR_SPAWN = 50;
+
+ /// <summary>
+ /// Error class indicating that an operation is unsupported.
+ /// </summary>
+ public const int MPI_ERR_UNSUPPORTED_OPERATION = 52;
+
+ /// <summary>
+ /// Error class indicating an invalid window argument.
+ /// </summary>
+ public const int MPI_ERR_WIN = 53;
+
+ /// <summary>
+ /// Error class indicating an invalid base argument.
+ /// </summary>
+ public const int MPI_ERR_BASE = 24;
+
+ /// <summary>
+ /// Error class indicating an invalid locktype argument.
+ /// </summary>
+ public const int MPI_ERR_LOCKTYPE = 37;
+
+ /// <summary>
+ /// Error class indicating an invalid attribute key.
+ /// </summary>
+ public const int MPI_ERR_KEYVAL = 36;
+
+ /// <summary>
+ /// Error class indicating that there were conflicting accesses within a window.
+ /// </summary>
+ public const int MPI_ERR_RMA_CONFLICT = 46;
+
+ /// <summary>
+ /// Error class indicating that RMA calls were incorrectly synchronized.
+ /// </summary>
+ public const int MPI_ERR_RMA_SYNC = 47;
+
+ /// <summary>
+ /// Error class indicating an invalid size argument.
+ /// </summary>
+ public const int MPI_ERR_SIZE = 49;
+
+ /// <summary>
+ /// Error class indicating an invalid displacement argument.
+ /// </summary>
+ public const int MPI_ERR_DISP = 26;
+
+ /// <summary>
+ /// Error class indicating an invalid assert argument.
+ /// </summary>
+ public const int MPI_ERR_ASSERT = 22;
+
+ /// <summary>
+ /// The last valid error code for a predefined error class.
+ /// </summary>
+ public const int MPI_ERR_LASTCODE = 54;
+
+ /// <summary>
+ /// Predefined error handler that indicates that the MPI program should be terminated
+ /// if an error occurs. This is the default error handler in the low-level MPI, which
+ /// is overridden by MPI.NET.
+ /// </summary>
+ public static readonly MPI_Errhandler MPI_ERRORS_ARE_FATAL = mpinet_MPI_ERRORS_ARE_FATAL();
+
+ /// <summary>
+ /// Predefined error handler that indicates that the MPI routine that detected an error
+ /// should return an error code. MPI.NET uses this error handler to translate MPI
+ /// errors into program exceptions.
+ /// </summary>
+ public static readonly MPI_Errhandler MPI_ERRORS_RETURN = mpinet_MPI_ERRORS_RETURN();
+
+ /// <summary>
+ /// Predefined error handler that represents "no" error handler.
+ /// </summary>
+ public static readonly MPI_Errhandler MPI_ERRHANDLER_NULL = mpinet_MPI_ERRHANDLER_NULL();
+
+ /// <summary>
+ /// Creates a new MPI error handler from a user function. Attaching this error handler
+ /// to a communicator will invoke the user error handler when an error occurs.
+ /// This feature is not supported in MPI.NET; instead, MPI.NET installs its own error
+ /// handler that translates MPI errors into .NET exceptions.
+ /// </summary>
+ /// <param name="function">The user's function.</param>
+ /// <param name="errhandler">The newly-created error handler.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Errhandler_create(IntPtr function, out MPI_Errhandler errhandler);
+
+ /// <summary>
+ /// Set the error handler for a given communicator. This feature is not supported in MPI.NET;
+ /// instead, MPI.NET installs its own error handler that translates MPI errors into .NET exceptions.
+ /// </summary>
+ /// <param name="comm">The communicator.</param>
+ /// <param name="errhandler">The error handler.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Errhandler_set(MPI_Comm comm, MPI_Errhandler errhandler);
+
+ /// <summary>
+ /// Retrieve the error handler for a given communicator. This feature is not supported in MPI.NET;
+ /// instead, MPI.NET installs its own error handler that translates MPI errors into .NET exceptions.
+ /// </summary>
+ /// <param name="comm">The communicator.</param>
+ /// <param name="errhandler">The error handler attached to the communicator.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Errhandler_get(MPI_Comm comm, out MPI_Errhandler errhandler);
+
+ /// <summary>
+ /// Free a user-defined error handler that was created with <see cref="MPI_Errhandler_create"/>.
+ /// This feature is not supported in MPI.NET; instead, MPI.NET installs its own error handler
+ /// that translates MPI errors into .NET exceptions.
+ /// </summary>
+ /// <param name="errhandler"></param>
+ /// <returns></returns>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Errhandler_free(ref MPI_Errhandler errhandler);
+
+ /// <summary>
+ /// Retrieves an error string corresponding to the given MPI error code. Used internally by
+ /// MPI.NET to provide an error string in MPI.NET exceptions.
+ /// </summary>
+ /// <param name="errorcode">The error code.</param>
+ /// <param name="text">Byte array that will be filled with a string describing the error.</param>
+ /// <param name="resultlen">Returns the number of elements placed into the buffer <paramref name="text"/>.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Error_string(int errorcode, byte[] text, ref int resultlen);
+
+ /// <summary>
+ /// Maps an MPI error code into an error class. Error classes describe (in general) what kind of
+ /// error occurred, and can be used to provide better information to the user. The MPI_ERR_* constants
+ /// give the various error classes present in MPI. Used internally by MPI.NET.
+ /// </summary>
+ /// <param name="errorcode">The error code returned from MPI.</param>
+ /// <param name="errorclass">Set to the error class</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Error_class(int errorcode, out int errorclass);
+ #endregion
+
+ #region Point-to-point communication
+ /// <summary>
+ /// Predefined value for the "source" parameter to MPI receive or probe operations,
+ /// which indicates that a message from any process may be matched.
+ /// See <see cref="Communicator.anySource"/>.
+ /// </summary>
+ public const int MPI_ANY_SOURCE = -1 ;
+
+ /// <summary>
+ /// Predefined value for the "tag" parameter to MPI receive or probe operations,
+ /// which indicates that a message with any tag may be matched.
+ /// See <see cref="Communicator.anyTag"/>.
+ /// </summary>
+ public const int MPI_ANY_TAG = -1 ;
+
+ /// <summary>
+ /// Send a message to another process within the communicator.
+ /// See <see cref="Communicator.Send&lt;T&gt;(T, int, int)"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Send(IntPtr buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm);
+
+ /// <summary>
+ /// Receive a message from another process within the communicator.
+ /// See <see cref="Communicator.Receive&lt;T&gt;(int, int)"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Recv(IntPtr buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, out MPI_Status status);
+
+ /// <summary>
+ /// Simultaneously send and receive a message from another process within the communicator.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Sendrecv(IntPtr sendbuf, int sendcount, MPI_Datatype senddatatype, int dest, int sendtag,
+ IntPtr recvbuf, int recvcount, MPI_Datatype recvdatatype, int source, int recvtag, MPI_Comm comm, out MPI_Status status);
+
+ /// <summary>
+ /// Determine whether a particular communication operation was cancelled.
+ /// See <see cref="Status.Cancelled"/>.
+ /// </summary>
+ /// <param name="status">Status object</param>
+ /// <param name="flag">Will be set to a non-zero value if the communnication was cancelled.</param>
+ /// <returns>Error code.</returns>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Test_cancelled(ref MPI_Status status, out int flag);
+
+ /// <summary>
+ /// Determine the number of elements transmitted by a communication operation.
+ /// See <see cref="Status.Count"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Get_count(ref MPI_Status status, MPI_Datatype datatype, out int count);
+ #endregion
+
+ #region Predefined reduce operations
+ /// <summary>
+ /// Compute the maximum value via an MPI reduction operation.
+ /// See <see cref="Operation&lt;T&gt;.Max"/>.
+ /// </summary>
+ public static readonly MPI_Op MPI_MAX = mpinet_MPI_MAX();
+
+ /// <summary>
+ /// Compute the minimum value via an MPI reduction operation.
+ /// See <see cref="Operation&lt;T&gt;.Min"/>.
+ /// </summary>
+ public static readonly MPI_Op MPI_MIN = mpinet_MPI_MIN();
+
+ /// <summary>
+ /// Compute the sum via an MPI reduction operation.
+ /// See <see cref="Operation&lt;T&gt;.Add"/>
+ /// </summary>
+ public static readonly MPI_Op MPI_SUM = mpinet_MPI_SUM();
+
+ /// <summary>
+ /// Compute the product via an MPI reduction operation.
+ /// See <see cref="Operation&lt;T&gt;.Multiply"/>
+ /// </summary>
+ public static readonly MPI_Op MPI_PROD = mpinet_MPI_PROD();
+
+ /// <summary>
+ /// Compute the logical AND via an MPI reduction operation.
+ /// See <see cref="Operation&lt;T&gt;.LogicalAnd"/>
+ /// </summary>
+ public static readonly MPI_Op MPI_LAND = mpinet_MPI_LAND();
+
+ /// <summary>
+ /// Compute the bitwise AND via an MPI reduction operation.
+ /// See <see cref="Operation&lt;T&gt;.BitwiseAnd"/>
+ /// </summary>
+ public static readonly MPI_Op MPI_BAND = mpinet_MPI_BAND();
+
+ /// <summary>
+ /// Compute the logical OR via an MPI reduction operation.
+ /// See <see cref="Operation&lt;T&gt;.LogicalOr"/>
+ /// </summary>
+ public static readonly MPI_Op MPI_LOR = mpinet_MPI_LOR();
+
+ /// <summary>
+ /// Compute the bitwise OR via an MPI reduction operation.
+ /// See <see cref="Operation&lt;T&gt;.BitwiseOr"/>
+ /// </summary>
+ public static readonly MPI_Op MPI_BOR = mpinet_MPI_BOR();
+
+ /// <summary>
+ /// Compute the logical exclusive OR via an MPI reduction operation.
+ /// There is no high-level operation corresponding to this predefined
+ /// MPI reduction.
+ /// </summary>
+ public static readonly MPI_Op MPI_LXOR = mpinet_MPI_LXOR();
+
+ /// <summary>
+ /// Compute the bitwise exclusive OR via an MPI reduction operation.
+ /// See <see cref="Operation&lt;T&gt;.ExclusiveOr"/>
+ /// </summary>
+ public static readonly MPI_Op MPI_BXOR = mpinet_MPI_BXOR();
+
+ /// <summary>
+ /// Compute the minimum value and location of that value via
+ /// an MPI reduction operation. There is no high-level operation
+ /// corresponding to this predefined MPI reduction.
+ /// </summary>
+ public static readonly MPI_Op MPI_MINLOC = mpinet_MPI_MINLOC();
+
+ /// <summary>
+ /// Compute the maximum value and location of that value via
+ /// an MPI reduction operation. There is no high-level operation
+ /// corresponding to this predefined MPI reduction.
+ /// </summary>
+ public static readonly MPI_Op MPI_MAXLOC = mpinet_MPI_MAXLOC();
+
+ /// <summary>
+ /// Placeholder operation that indicates "no operation".
+ /// </summary>
+ public static readonly MPI_Op MPI_OP_NULL = mpinet_MPI_OP_NULL();
+ #endregion
+
+ #region Non-blocking point-to-point communication
+ /// <summary>
+ /// Constant that indicates a "null" MPI request, meaning that there is no such request.
+ /// </summary>
+ public static readonly MPI_Request MPI_REQUEST_NULL = mpinet_MPI_REQUEST_NULL();
+
+ /// <summary>
+ /// An immediate (non-blocking) point-to-point send.
+ /// See <see cref="MPI.Communicator.ImmediateSend&lt;T&gt;(T, int, int)"/>.
+ /// </summary>
+ /// <param name="buf">Buffer of data to send.</param>
+ /// <param name="count">The number of elements in <paramref name="buf"/>.</param>
+ /// <param name="datatype">The type of data in <paramref name="buf"/>.</param>
+ /// <param name="dest">Rank of the destination process.</param>
+ /// <param name="tag">Tag used to transmit this data.</param>
+ /// <param name="comm">Communicator through which this data will be sent.</param>
+ /// <param name="request">Receives a request object that can be used to query this communication.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Isend(IntPtr buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, out MPI_Request request);
+
+ /// <summary>
+ /// A non-blocking receive that posts the intent to receive a value. The actual receive will be
+ /// completed when the corresponding request is completed.
+ /// See <see cref="MPI.Communicator.ImmediateReceive&lt;T&gt;(int, int)"/>.
+ /// </summary>
+ /// <param name="buf">Buffer that will receive message data.</param>
+ /// <param name="count">Number of elements in <paramref name="buf"/>.</param>
+ /// <param name="datatype">Type of data stored in <paramref name="buf"/>.</param>
+ /// <param name="source">Rank of the processor that will initiate this message, or <see cref="MPI_ANY_SOURCE"/>.</param>
+ /// <param name="tag">Message tag used to identify the message, or <see cref="MPI_ANY_TAG"/>.</param>
+ /// <param name="comm">Communicator through which the message will be sent.</param>
+ /// <param name="request">Receives a request object that can be used to query this communication.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Irecv(IntPtr buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, out MPI_Request request);
+
+ /// <summary>
+ /// Wait until the given request has completed. See <see cref="Request.Wait"/>.
+ /// </summary>
+ /// <param name="request">Request object.</param>
+ /// <param name="status">Will receive the status of the completed operation.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Wait(ref MPI_Request request, out MPI_Status status);
+
+ /// <summary>
+ /// Test whether the given request has completed. See <see cref="Request.Test"/>.
+ /// </summary>
+ /// <param name="request">Request object.</param>
+ /// <param name="flag">Will be set to a non-zero value if the request has completed.</param>
+ /// <param name="status">Will receive the status of the completed operation.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Test(ref MPI_Request request, out int flag, out MPI_Status status);
+
+ /// <summary>
+ /// Free the resources associated with a request.
+ /// </summary>
+ /// <param name="request">The request that will be freed.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Request_free(ref MPI_Request request);
+
+ /// <summary>
+ /// Waits until any of the given MPI requests completes before returning.
+ /// See <see cref="MPI.RequestList.WaitAny"/>.
+ /// </summary>
+ /// <param name="count">The number of requests in <paramref name="array_of_requests"/>.</param>
+ /// <param name="array_of_requests">An array of MPI request objects.</param>
+ /// <param name="index">Receives the index of the request that completed.</param>
+ /// <param name="status">Receives the status of the completed request.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Waitany(int count, MPI_Request[] array_of_requests, out int index, out MPI_Status status);
+
+ /// <summary>
+ /// Test whether any of the MPI requests has completed.
+ /// See <see cref="MPI.RequestList.TestAny"/>.
+ /// </summary>
+ /// <param name="count">The number of requests in <paramref name="array_of_requests"/>.</param>
+ /// <param name="array_of_requests">An array of MPI request objects.</param>
+ /// <param name="index">Receives the index of the request that completed (if <paramref name="flag"/> is non-zero).</param>
+ /// <param name="flag">Will be set to a non-zero value if a request has completed.</param>
+ /// <param name="status">Receives the status of the completed request (if <paramref name="flag"/> is non-zero).</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Testany(int count, MPI_Request[] array_of_requests, out int index, out int flag, out MPI_Status status);
+
+ /// <summary>
+ /// Wait until all of the given MPI requests have completed before returning.
+ /// See <see cref="MPI.RequestList.WaitAll"/>.
+ /// </summary>
+ /// <param name="count">The number of requests (and statuses).</param>
+ /// <param name="array_of_requests">An array of MPI request objects to be completed.</param>
+ /// <param name="array_of_statuses">An array of MPI status objects, to be filled in by the completed requests.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Waitall(int count, MPI_Request* array_of_requests, MPI_Status[] array_of_statuses);
+
+ /// <summary>
+ /// Test whether all of the given MPI requests have been completed.
+ /// See <see cref="MPI.RequestList.TestAll"/>.
+ /// </summary>
+ /// <param name="count">The number of requests (and statuses).</param>
+ /// <param name="array_of_requests">An array of MPI request objects to be completed.</param>
+ /// <param name="flag">Will be set to a non-zero value if all requests have completed.</param>
+ /// <param name="array_of_statuses">An array of MPI status objects, to be filled in by the completed requests (if <paramref name="flag"/> is non-zero).</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Testall(int count, MPI_Request* array_of_requests, out int flag, MPI_Status[] array_of_statuses);
+
+ /// <summary>
+ /// Wait until some MPI requests have completed, then provide a list of all of the requests that have completed.
+ /// See <see cref="MPI.RequestList.WaitSome"/>.
+ /// </summary>
+ /// <param name="incount">The number of requests in <paramref name="array_of_requests"/>.</param>
+ /// <param name="array_of_requests">The array of requests to be completed.</param>
+ /// <param name="outcount">Receives the number of requests that have been completed.</param>
+ /// <param name="array_of_indices">
+ /// An array that will receive the indices into <paramref name="array_of_requests"/> of the
+ /// completed requests.
+ /// </param>
+ /// <param name="array_of_statuses">
+ /// Array containing the completed status information that corresponds to the completed
+ /// requests whose indices are in <paramref name="array_of_indices"/>.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Waitsome(int incount, MPI_Request[] array_of_requests,
+ out int outcount, int[] array_of_indices, MPI_Status[] array_of_statuses);
+
+ /// <summary>
+ /// Providing a list of all of the requests that have completed, without waiting for any
+ /// requests to complete. See <see cref="MPI.RequestList.TestSome"/>.
+ /// </summary>
+ /// <param name="incount">The number of requests in <paramref name="array_of_requests"/>.</param>
+ /// <param name="array_of_requests">The array of requests to be completed.</param>
+ /// <param name="outcount">Receives the number of requests that have been completed.</param>
+ /// <param name="array_of_indices">
+ /// An array that will receive the indices into <paramref name="array_of_requests"/> of the
+ /// completed requests.
+ /// </param>
+ /// <param name="array_of_statuses">
+ /// Array containing the completed status information that corresponds to the completed
+ /// requests whose indices are in <paramref name="array_of_indices"/>.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Testsome(int incount, MPI_Request[] array_of_requests,
+ out int outcount, int[] array_of_indices, MPI_Status[] array_of_statuses);
+ #endregion
+
+ #region Probe and cancel
+ /// <summary>
+ /// Test whether a message is available. See <see cref="Communicator.ImmediateProbe"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Iprobe(int source, int tag, MPI_Comm comm, out int flag, out MPI_Status status);
+
+ /// <summary>
+ /// Wait until a message is available. See <see cref="Communicator.Probe"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Probe(int source, int tag, MPI_Comm comm, out MPI_Status status);
+
+ /// <summary>
+ /// Cancel an outstanding MPI communication request. See <see cref="Request.Cancel"/>.
+ /// </summary>
+ /// <param name="request">The request to be cancelled.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Cancel(ref MPI_Request request);
+ #endregion
+
+ #region Environment Inquiry
+ /// <summary>
+ /// Returns a floating point number of seconds, since some time in the past
+ /// See <see cref="Environment.Time"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern double MPI_Wtime();
+
+ /// <summary>
+ /// Returns a resolution of <see cref="MPI_Wtime"/>, in seconds.
+ /// See <see cref="Environment.TimeResolution"/>.
+ /// </summary>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern double MPI_Wtick();
+ #endregion
+
+ #region Collective communication
+ /// <summary>
+ /// Perform a parallel reduction operation that summarizes the results from the input provided
+ /// by all of the processes in the communicator. Semantically, this is equivalent to an
+ /// <see cref="MPI_Reduce"/> to an arbitrary root followed by an <see cref="MPI_Bcast"/> from
+ /// that process.
+ /// See <see cref="MPI.Intracommunicator.Allreduce&lt;T&gt;(T, MPI.ReductionOperation&lt;T&gt;)"/>
+ /// </summary>
+ /// <param name="sendbuf">
+ /// Buffer containing the "outgoing" values contributed by the calling process to the reduction operation.
+ /// </param>
+ /// <param name="recvbuf">
+ /// Buffer that will receive the results of the parallel reduction.
+ /// </param>
+ /// <param name="count">
+ /// The number of elements in <paramref name="sendbuf"/> and <paramref name="recvbuf"/>.
+ /// </param>
+ /// <param name="datatype">
+ /// The type of data in <paramref name="sendbuf"/> and <paramref name="recvbuf"/>.
+ /// </param>
+ /// <param name="op">
+ /// The MPI reduction operation to use, which may be one of the predefined reduction operations
+ /// or a user-defined operation created with <see cref="MPI_Op_create"/>.
+ /// </param>
+ /// <param name="comm">
+ /// The communicator over which the reduction will occur.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Allreduce(IntPtr sendbuf, IntPtr recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
+
+ /// <summary>
+ /// Gather the values provided by each process into an array containing the contributions of all
+ /// of the processes. This operation is equivalent to a <see cref="MPI_Gather"/> to an arbitrary
+ /// root followed by an <see cref="MPI_Bcast"/> from that root.
+ /// See <see cref="MPI.Intracommunicator.Allgather&lt;T&gt;(T)"/>
+ /// </summary>
+ /// <param name="sendbuf">Buffer containing the values that will be sent from this process.</param>
+ /// <param name="sendcount">The number of elements to send.</param>
+ /// <param name="sendtype">The datatype describing the send buffer.</param>
+ /// <param name="recvbuf">Buffer that will contain all of the values contributed by every process.</param>
+ /// <param name="recvcount">The number of elements to receive from each process.</param>
+ /// <param name="recvtype">The type of data that will be stored in the receive buffer.</param>
+ /// <param name="comm">The communicator over which data will be gathered.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Allgather(IntPtr sendbuf, int sendcount, MPI_Datatype sendtype,
+ IntPtr recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm);
+
+ /// <summary>
+ /// Gather the values provided by each process into an array containing the contributions of all
+ /// of the processes. This operation differs from <see cref="MPI_Allgather"/> in that it
+ /// permits different processes to provide a different number of elements to be gathered.
+ /// See <see cref="MPI.Intracommunicator.Allgather&lt;T&gt;(T)"/>
+ /// </summary>
+ /// <param name="sendbuf">Buffer containing the values that will be sent from this process.</param>
+ /// <param name="sendcount">The number of elements to send.</param>
+ /// <param name="sendtype">The datatype describing the send buffer.</param>
+ /// <param name="recvbuf">Buffer that will contain all of the values contributed by every process.</param>
+ /// <param name="recvcounts">
+ /// An array whose ith element is the number of elements to be received from the process
+ /// with rank i.
+ /// </param>
+ /// <param name="displs">
+ /// An array whose ith element is the offset (in <paramref name="recbuf"/>) at which the
+ /// data from process i should be placed.
+ /// </param>
+ /// <param name="recvtype">The type of data that will be stored in the receive buffer.</param>
+ /// <param name="comm">The communicator over which data will be gathered.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Allgatherv(IntPtr sendbuf, int sendcount, MPI_Datatype sendtype,
+ IntPtr recvbuf, int[] recvcounts, int[] displs, MPI_Datatype recvtype, MPI_Comm comm);
+
+ /// <summary>
+ /// Transmits data from every process in a communicator to every other process in the communicator.
+ /// Similar to <see cref="MPI_Allgather"/>, except that each process can send different data to
+ /// every other process. To send a different amount of data to each process, use
+ /// <see cref="MPI_Alltoallv"/> or <see cref="MPI_Alltoallw"/>.
+ /// See <see cref="MPI.Intracommunicator.Alltoall&lt;T&gt;(T[])"/>.
+ /// </summary>
+ /// <param name="sendbuf">
+ /// Buffer containing the data to send from this process. The ith position in this buffer
+ /// contains the data that will be sent to the process with rank i.
+ /// </param>
+ /// <param name="sendcount">The number of elements to send to each process.</param>
+ /// <param name="sendtype">The type of data stored in <paramref name="sendbuf"/>.</param>
+ /// <param name="recvbuf">
+ /// Buffer that will receive data sent from other processes to this process.
+ /// </param>
+ /// <param name="recvcount">
+ /// The number of elements that will be received from each process.
+ /// </param>
+ /// <param name="recvtype">The type of data stored in <paramref name="recvbuf"/>.</param>
+ /// <param name="comm">The communicator used for collective communication.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Alltoall(IntPtr sendbuf, int sendcount, MPI_Datatype sendtype,
+ IntPtr recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm);
+
+ /// <summary>
+ /// Transmits data from every process in a communicator to every other process in the communicator.
+ /// Similar to <see cref="MPI_Allgatherv"/>, except that each process can send different data to
+ /// every other process. If all of your processes send the same amount of data to each other, use
+ /// the simpler <see cref="MPI_Alltoall"/>; if you need the data sent to different processes to
+ /// have different datatypes, use <see cref="MPI_Alltoallw"/>.
+ /// See <see cref="MPI.Intracommunicator.Alltoall&lt;T&gt;(T[])"/>.
+ /// </summary>
+ /// <param name="sendbuf">
+ /// Buffer containing the data to send from this process. The ith position in this buffer
+ /// contains the data that will be sent to the process with rank i.
+ /// </param>
+ /// <param name="sendcounts">
+ /// An array whose ith element contains the number of elements to be send to the process with
+ /// rank i.
+ /// </param>
+ /// <param name="sdispls">
+ /// An array whose ith element contains the offsets into <paramref name="sendbuf"/> where the
+ /// data destined for the process with rank i begins.
+ /// </param>
+ /// <param name="sendtype">The type of data in <paramref name="sendbuf"/>.</param>
+ /// <param name="recvbuf">
+ /// Buffer that will receive data sent from other processes to this process.
+ /// </param>
+ /// <param name="recvcounts">
+ /// An array whose jth element contains the number of elements that will be received from the
+ /// process with rank j.
+ /// </param>
+ /// <param name="rdispls">
+ /// An array whose jth element contains the offset into <paramref name="recvbuf"/> where the
+ /// data received from the process with rank j begins.
+ /// </param>
+ /// <param name="recvtype">The type of data in <paramref name="recvbuf"/>.</param>
+ /// <param name="comm">The communicator used for collective communication.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Alltoallv(IntPtr sendbuf, int[] sendcounts, int[] sdispls, MPI_Datatype sendtype,
+ IntPtr recvbuf, int[] recvcounts, int[] rdispls, MPI_Datatype recvtype, MPI_Comm comm);
+
+ /// <summary>
+ /// Transmits data from every process in a communicator to every other process in the communicator.
+ /// Similar to <see cref="MPI_Allgatherv"/>, except that each process can send different data to
+ /// every other process. If all of your processes send the same amount of data to each other, use
+ /// the simpler <see cref="MPI_Alltoall"/>; if the volume of data sent to each process can be different
+ /// but all of the data has the same type, use <see cref="MPI_Alltoallv"/>.
+ /// See <see cref="MPI.Intracommunicator.Alltoall&lt;T&gt;(T[])"/>.
+ /// </summary>
+ /// <param name="sendbuf">
+ /// Buffer containing the data to send from this process. The ith position in this buffer
+ /// contains the data that will be sent to the process with rank i.
+ /// </param>
+ /// <param name="sendcnts">
+ /// An array whose ith element contains the number of elements to be send to the process with
+ /// rank i.
+ /// </param>
+ /// <param name="sdispls">
+ /// An array whose ith element contains the offsets into <paramref name="sendbuf"/> where the
+ /// data destined for the process with rank i begins.
+ /// </param>
+ /// <param name="sendtypes">
+ /// An array whose ith element contains the type of data that will be sent to rank i.
+ /// </param>
+ /// <param name="recvbuf">
+ /// Buffer that will receive data sent from other processes to this process.
+ /// </param>
+ /// <param name="recvcnts">
+ /// An array whose jth element contains the number of elements that will be received from the
+ /// process with rank j.
+ /// </param>
+ /// <param name="rdispls">
+ /// An array whose jth element contains the offset into <paramref name="recvbuf"/> where the
+ /// data received from the process with rank j begins.
+ /// </param>
+ /// <param name="recvtypes">
+ /// An array whose jth element contains the type of data that will be received from the process
+ /// with rank j.
+ /// </param>
+ /// <param name="comm">The communicator used for collective communication.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Alltoallw(IntPtr sendbuf, int[] sendcnts, int[] sdispls, MPI_Datatype[] sendtypes,
+ IntPtr recvbuf, int[] recvcnts, int[] rdispls, MPI_Datatype[] recvtypes, MPI_Comm comm);
+
+ /// <summary>
+ /// A synchronization barrier where no processor leaves the barrier until all processors have entered the barrier.
+ /// See <see cref="MPI.Communicator.Barrier"/>.
+ /// </summary>
+ /// <param name="comm">The communicator whose processes will be synchronized.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static extern int MPI_Barrier(MPI_Comm comm);
+
+ /// <summary>
+ /// Broadcast a value from the root process to every process within the communication.
+ ///
+ /// See <see cref="MPI.Intracommunicator.Broadcast&lt;T&gt;(ref T, int)"/>.
+ /// </summary>
+ /// <param name="buffer">
+ /// Pointer to the data that will be broadcast. At the root, this buffer will be
+ /// read; in all other processes, the buffer will be written.
+ /// </param>
+ /// <param name="count">The number of elements that <paramref name="buffer"/> points to.</param>
+ /// <param name="datatype">The type of data stored in the <paramref name="buffer"/>.</param>
+ /// <param name="root">The rank of the root processor, from which the data will be broadcast.</param>
+ /// <param name="comm">The communicator over which the data will be transmitted.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Bcast(IntPtr buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm);
+
+ /// <summary>
+ /// Performs a partial exclusive reduction on the data, returning the result from combining the data provided
+ /// by the first P-1 processes to the process with rank P.
+ /// See <see cref="MPI.Intracommunicator.ExclusiveScan&lt;T&gt;(T, MPI.ReductionOperation&lt;T&gt;)"/>
+ /// </summary>
+ /// <param name="sendbuf">Buffer containing the data to contribute to the reduction.</param>
+ /// <param name="recvbuf">Buffer that will receive the result of combining the first Rank values.</param>
+ /// <param name="count">Number of values in <paramref name="sendbuf"/> and <paramref name="recvbuf"/>.</param>
+ /// <param name="datatype">The type of data in <paramref name="sendbuf"/> and <paramref name="recvbuf"/>.</param>
+ /// <param name="op">The reduction operation used to combine values.</param>
+ /// <param name="comm">The communicator over which the communication will occur.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Exscan(IntPtr sendbuf, IntPtr recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
+
+ /// <summary>
+ /// Gather the values provided by each process into an array containing the contributions of all
+ /// of the processes. This routine differs from <see cref="MPI_Allgather"/> in that the results
+ /// are gathered to only the "root" process, which is identified by its <paramref name="rank"/>
+ /// in the communicator.
+ /// See <see cref="MPI.Intracommunicator.Gather&lt;T&gt;(T, int)"/>
+ /// </summary>
+ /// <param name="sendbuf">Buffer containing the values that will be sent from this process.</param>
+ /// <param name="sendcount">The number of elements to send.</param>
+ /// <param name="sendtype">The datatype describing the send buffer.</param>
+ /// <param name="recvbuf">
+ /// Buffer that will contain all of the values contributed by every process.
+ /// This argument is only significant at the root.
+ /// </param>
+ /// <param name="recvcount">
+ /// The number of elements to receive from each process.
+ /// This argument is only significant at the root.
+ /// </param>
+ /// <param name="recvtype">
+ /// The type of data that will be stored in the receive buffer.
+ /// This argument is only significant at the root.
+ /// </param>
+ /// <param name="root">The rank of the "root" process.</param>
+ /// <param name="comm">The communicator over which data will be gathered.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Gather(IntPtr sendbuf, int sendcount, MPI_Datatype sendtype,
+ IntPtr recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm);
+
+ /// <summary>
+ /// Gather the values provided by each process into an array containing the contributions of all
+ /// of the processes. This routine differs from <see cref="MPI_Allgather"/> in that the results
+ /// are gathered to only the "root" process, which is identified by its <paramref name="rank"/>
+ /// in the communicator.
+ /// See <see cref="MPI.Intracommunicator.Gather&lt;T&gt;(T, int)"/>
+ /// </summary>
+ /// <param name="sendbuf">Buffer containing the values that will be sent from this process.</param>
+ /// <param name="sendcount">The number of elements to send.</param>
+ /// <param name="sendtype">The datatype describing the send buffer.</param>
+ /// <param name="recvbuf">
+ /// Buffer that will contain all of the values contributed by every process.
+ /// This argument is only significant at the root.
+ /// </param>
+ /// <param name="recvcounts">
+ /// An array whose ith element is the number of elements to receive from process i.
+ /// This argument is only significant at the root.
+ /// </param>
+ /// <param name="displs">
+ /// An array whose ith element contains the displacement (into <paramref name="recvbuf"/>) at
+ /// which the data from process i will be placed. This argument is only significant at the root.
+ /// </param>
+ /// <param name="recvtype">
+ /// The type of data that will be stored in the receive buffer.
+ /// This argument is only significant at the root.
+ /// </param>
+ /// <param name="root">The rank of the "root" process.</param>
+ /// <param name="comm">The communicator over which data will be gathered.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Gatherv(IntPtr sendbuf, int sendcount, MPI_Datatype sendtype,
+ IntPtr recvbuf, int[] recvcounts, int[] displs, MPI_Datatype recvtype, int root, MPI_Comm comm);
+
+ /// <summary>
+ /// Perform a parallel reduction operation that summarizes the results from the data contributed
+ /// by all of the processes in a communicator. Unlike <see cref="MPI_Allreduce"/>, the results
+ /// of this operation are returned only to the process whose rank is equivalent to
+ /// <paramref name="root"/>, i.e., the "root" process.
+ /// See <see cref="MPI.Intracommunicator.Reduce&lt;T&gt;(T, MPI.ReductionOperation&lt;T&gt;, int)"/>
+ /// </summary>
+ /// <param name="sendbuf">
+ /// Buffer containing the "outgoing" values contributed by the calling process to the reduction operation.
+ /// </param>
+ /// <param name="recvbuf">
+ /// Buffer that will receive the results of the parallel reduction. This argument is
+ /// only significant at the root.
+ /// </param>
+ /// <param name="count">
+ /// The number of elements in <paramref name="sendbuf"/> and <paramref name="recvbuf"/>.
+ /// </param>
+ /// <param name="datatype">
+ /// The type of data in <paramref name="sendbuf"/> and <paramref name="recvbuf"/>.
+ /// </param>
+ /// <param name="op">
+ /// The MPI reduction operation to use, which may be one of the predefined reduction operations
+ /// or a user-defined operation created with <see cref="MPI_Op_create"/>.
+ /// </param>
+ /// <param name="root">
+ /// Identifies the root process (which will receive the intermediate of the reduction) by
+ /// its rank in the communicator <paramref name="comm"/>.
+ /// </param>
+ /// <param name="comm">
+ /// The communicator over which the reduction will occur.
+ /// </param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Reduce(IntPtr sendbuf, IntPtr recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
+
+ /// <summary>
+ /// The equivalent of a <see cref="MPI_Reduce"/> followed by a <see cref="MPI_Scatterv"/>, performing a
+ /// reduction on the data provided in <paramref name="sendbuf"/> and then scattering those results
+ /// to all of the processes. See <see cref="MPI.Intracommunicator.ReduceScatter&lt;T&gt;(T[], MPI.ReductionOperation&lt;T&gt;, int[])"/>.
+ /// </summary>
+ /// <param name="sendbuf">Buffer containing the data to be reduced.</param>
+ /// <param name="recvbuf">Buffer that will receive this process's results.</param>
+ /// <param name="recvcounts">
+ /// An array whose ith element gives the number of results that will go to the process with rank i.
+ /// </param>
+ /// <param name="datatype">The type of data in <paramref name="sendbuf"/> and <paramref name="recvbuf"/>.</param>
+ /// <param name="op">The operation used to combine each element in <paramref name="sendbuf"/>.</param>
+ /// <param name="comm">The communicator over which this collective will operate.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Reduce_scatter(IntPtr sendbuf, IntPtr recvbuf, int[] recvcounts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
+
+ /// <summary>
+ /// Performs a partial reduction on the data, returning the result from combining the data provided
+ /// by the first P processes to the process with rank P.
+ /// See <see cref="MPI.Intracommunicator.Scan&lt;T&gt;(T, MPI.ReductionOperation&lt;T&gt;)"/>
+ /// </summary>
+ /// <param name="sendbuf">Buffer containing the data to contribute to the reduction.</param>
+ /// <param name="recvbuf">Buffer that will receive the result of combining the first Rank values.</param>
+ /// <param name="count">Number of values in <paramref name="sendbuf"/> and <paramref name="recvbuf"/>.</param>
+ /// <param name="datatype">The type of data in <paramref name="sendbuf"/> and <paramref name="recvbuf"/>.</param>
+ /// <param name="op">The reduction operation used to combine values.</param>
+ /// <param name="comm">The communicator over which the communication will occur.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Scan(IntPtr sendbuf, IntPtr recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
+
+ /// <summary>
+ /// Scatters data from one process (the "root" process) to all of the processes in a communicator,
+ /// with different parts of the data going to different processes.
+ /// See <see cref="MPI.Intracommunicator.Scatter&lt;T&gt;(T[], int)"/>.
+ /// </summary>
+ /// <param name="sendbuf">
+ /// Buffer containing the data to be sent. Only significant at the root process.
+ /// </param>
+ /// <param name="sendcount">
+ /// The number of elements to send to each process. Only significant at the root process.
+ /// </param>
+ /// <param name="sendtype">
+ /// The type of data in <paramref name="sendbuf"/>. Only significant at the root process.
+ /// </param>
+ /// <param name="recvbuf">A buffer that will receive the calling process's part of the data.</param>
+ /// <param name="recvcount">The number of elements to receive.</param>
+ /// <param name="rectype">The type of data to receive.</param>
+ /// <param name="root">The rank of the "root" process, which supplies the data.</param>
+ /// <param name="comm">The communicator over which the data will be scattered.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Scatter(IntPtr sendbuf, int sendcount, MPI_Datatype sendtype,
+ IntPtr recvbuf, int recvcount, MPI_Datatype rectype, int root, MPI_Comm comm);
+
+ /// <summary>
+ /// Scatters data from one process (the "root" process) to all of the processes in a communicator,
+ /// with different parts of the data going to different processes. Unlike <see cref="MPI_Scatter"/>,
+ /// different processes may receive different amounts of data.
+ /// See <see cref="MPI.Intracommunicator.Scatter&lt;T&gt;(T[], int)"/>.
+ /// </summary>
+ /// <param name="sendbuf">
+ /// Buffer containing the data to be sent. Only significant at the root process.
+ /// </param>
+ /// <param name="sendcounts">
+ /// An array whose ith element contains the number of elements to send to process i.
+ /// Only significant at the root process.
+ /// </param>
+ /// <param name="displs">
+ /// An array whose ith element contains the offset (into <paramref name="sendbuf"/>)
+ /// if the data to be sent to process i. Only significant at the root process.
+ /// </param>
+ /// <param name="sendtype">
+ /// The type of data in <paramref name="sendbuf"/>. Only significant at the root process.
+ /// </param>
+ /// <param name="recvbuf">A buffer that will receive the calling process's part of the data.</param>
+ /// <param name="recvcount">The number of elements to receive.</param>
+ /// <param name="recvtype">The type of data to receive.</param>
+ /// <param name="root">The rank of the "root" process, which supplies the data.</param>
+ /// <param name="comm">The communicator over which the data will be scattered.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Scatterv(IntPtr sendbuf, int[] sendcounts, int[] displs, MPI_Datatype sendtype,
+ IntPtr recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm);
+
+ /// <summary>
+ /// Creates an MPI operation that invokes a user-provided function. The MPI operation
+ /// can be used with various reduction operations. MPI.NET provides support for user-defined
+ /// operations via the <see cref="MPI.Operation&lt;T&gt;"/> class.
+ /// </summary>
+ /// <param name="function">A pointer to the user-defined function.</param>
+ /// <param name="commute">Whether this function is commutative.</param>
+ /// <param name="op">Receives the newly-created MPI operation.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Op_create(MPI_User_function function, int commute, out MPI_Op op);
+
+ /// <summary>
+ /// Frees an MPI operation created via <see cref="MPI_Op_create"/>. MPI.NET will automatically
+ /// manage any operations it creates via <see cref="MPI.Operation&lt;T&gt;"/> when the corresponding
+ /// object is disposed of or finalized.
+ /// </summary>
+ /// <param name="op">The operation to be freed.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static unsafe extern int MPI_Op_free(ref MPI_Op op);
+ #endregion
+
+ #region Memory allocator
+ /// <summary>
+ /// Attempts to allocate (unmanaged) memory from MPI. This memory must be
+ /// manually freed with a call to <see cref="MPI_Free_mem"/>.
+ /// This routine is used automatically by the <see cref="UnmanagedMemoryStream"/> to
+ /// allocate memory when serializing or de-serializing objects within MPI.NET.
+ /// </summary>
+ /// <param name="size">The number of bytes to allocate.</param>
+ /// <param name="info">
+ /// Provides extra information to the MPI implementation that may
+ /// help it allocate memory for a specific reason or from a specific
+ /// memory pool.
+ /// </param>
+ /// <param name="baseptr">
+ /// Will receive a pointer to the newly-allocated memory.
+ /// </param>
+ /// <returns>
+ /// <see cref="MPI_SUCCESS"/> if memory is successfully allocated,
+ /// <see cref="MPI_ERR_NO_MEM"/> if no memory is available.
+ /// </returns>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static extern unsafe int MPI_Alloc_mem(MPI_Aint size, MPI_Info info, out IntPtr baseptr);
+
+ /// <summary>
+ /// Frees memory allocated with <see cref="MPI_Alloc_mem"/>.
+ /// This routine is used automatically by the <see cref="UnmanagedMemoryStream"/> to
+ /// allocate memory when serializing or de-serializing objects within MPI.NET.
+ /// </summary>
+ /// <param name="ptr">The pointer provided by <see cref="MPI_Alloc_mem"/>.</param>
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static extern unsafe int MPI_Free_mem(IntPtr ptr);
+ #endregion
+
+ #region Helper operations and classes
+ /// <summary>
+ /// Converts the intermediate of an MPI_*_compare operation into a <see cref="Comparison"/> enum value.
+ /// </summary>
+ public static Comparison ComparisonFromInt(int result)
+ {
+ switch (result)
+ {
+ case MPI_IDENT:
+ return Comparison.Identical;
+
+ case MPI_CONGRUENT:
+ return Comparison.Congruent;
+
+ case MPI_SIMILAR:
+ return Comparison.Similar;
+
+ case MPI_UNEQUAL:
+ return Comparison.Unequal;
+
+ default:
+ throw new InvalidOperationException("MPI internal error: Invalid comparison result");
+ }
+ }
+ #endregion
+
+ #region Process Management
+#if PROCESS_CREATION_PRESENT
+
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static extern unsafe int MPI_Comm_spawn(byte* command, byte** argv, int maxprocs, MPI_Info info, int root, MPI_Comm comm, out MPI_Comm intercomm, out int[] array_of_errorcodes);
+
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static extern unsafe int MPI_Comm_spawn(byte* command, byte** argv, int maxprocs, MPI_Info info, int root, MPI_Comm comm, out MPI_Comm intercomm, int* array_of_errorcodes);
+
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static extern unsafe int MPI_Comm_get_parent(out MPI_Comm comm);
+
+ [DllImport(MPI_DLL, CallingConvention = CallingConvention.StdCall)]
+ public static extern unsafe int MPI_Comm_spawn_multiple(int count, ref byte* array_of_commands, ref byte** array_of_argv, ref int array_of_maxprocs, ref MPI_Info array_of_info, int root, MPI_Comm comm, out MPI_Comm intercomm, out int array_of_errorcodes);
+
+
+ //public byte** MPI_ARGV_NULL = (byte**)0;
+ //public byte** MPI_ARGVS_NULL = (byte**)0;
+ //public int* MPI_ERRCODES_IGNORE = (int*)0;
+#endif
+ #endregion
+
+ // CBridge Customizations Follow
+ [DllImport("mpinet")] public static unsafe extern MPI_Comm mpinet_MPI_COMM_WORLD();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Comm mpinet_MPI_COMM_SELF();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Comm mpinet_MPI_COMM_NULL();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_CHAR();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_SIGNED_CHAR();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_UNSIGNED_CHAR();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_BYTE();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_WCHAR();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_SHORT();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_UNSIGNED_SHORT();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_INT();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_UNSIGNED();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_LONG();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_UNSIGNED_LONG();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_FLOAT();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_DOUBLE();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_LONG_DOUBLE();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_LONG_LONG_INT();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_UNSIGNED_LONG_LONG();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_LONG_LONG();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_PACKED();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Datatype mpinet_MPI_DATATYPE_NULL();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Group mpinet_MPI_GROUP_EMPTY();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Group mpinet_MPI_GROUP_NULL();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Info mpinet_MPI_INFO_NULL();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Copy_function mpinet_MPI_NULL_COPY_FN();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Delete_function mpinet_MPI_NULL_DELETE_FN();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Errhandler mpinet_MPI_ERRORS_ARE_FATAL();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Errhandler mpinet_MPI_ERRORS_RETURN();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Errhandler mpinet_MPI_ERRHANDLER_NULL();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Op mpinet_MPI_MAX();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Op mpinet_MPI_MIN();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Op mpinet_MPI_SUM();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Op mpinet_MPI_PROD();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Op mpinet_MPI_LAND();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Op mpinet_MPI_BAND();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Op mpinet_MPI_LOR();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Op mpinet_MPI_BOR();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Op mpinet_MPI_LXOR();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Op mpinet_MPI_BXOR();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Op mpinet_MPI_MINLOC();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Op mpinet_MPI_MAXLOC();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Op mpinet_MPI_OP_NULL();
+
+ [DllImport("mpinet")] public static unsafe extern MPI_Request mpinet_MPI_REQUEST_NULL();
+
+ }
+}
diff --git a/MPI/cbridge.c b/MPI/cbridge.c
new file mode 100644
index 0000000..abe1b6c
--- /dev/null
+++ b/MPI/cbridge.c
@@ -0,0 +1,58 @@
+/* Copyright (C) 2007, 2008 The Trustees of Indiana University
+ *
+ * Use, modification and distribution is subject to the Boost Software
+ * License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * This file was automatically generated from Unsafe.cs by Unsafe.pl.
+ * Edit at your own risk.
+ *
+ * Authors: Douglas Gregor
+ * Andrew Lumsdaine
+ */
+#include <mpi.h>
+
+MPI_Comm mpinet_MPI_COMM_WORLD() { return MPI_COMM_WORLD; }
+MPI_Comm mpinet_MPI_COMM_SELF() { return MPI_COMM_SELF; }
+MPI_Comm mpinet_MPI_COMM_NULL() { return MPI_COMM_NULL; }
+MPI_Datatype mpinet_MPI_CHAR() { return MPI_CHAR; }
+MPI_Datatype mpinet_MPI_SIGNED_CHAR() { return MPI_SIGNED_CHAR; }
+MPI_Datatype mpinet_MPI_UNSIGNED_CHAR() { return MPI_UNSIGNED_CHAR; }
+MPI_Datatype mpinet_MPI_BYTE() { return MPI_BYTE; }
+MPI_Datatype mpinet_MPI_WCHAR() { return MPI_WCHAR; }
+MPI_Datatype mpinet_MPI_SHORT() { return MPI_SHORT; }
+MPI_Datatype mpinet_MPI_UNSIGNED_SHORT() { return MPI_UNSIGNED_SHORT; }
+MPI_Datatype mpinet_MPI_INT() { return MPI_INT; }
+MPI_Datatype mpinet_MPI_UNSIGNED() { return MPI_UNSIGNED; }
+MPI_Datatype mpinet_MPI_LONG() { return MPI_LONG; }
+MPI_Datatype mpinet_MPI_UNSIGNED_LONG() { return MPI_UNSIGNED_LONG; }
+MPI_Datatype mpinet_MPI_FLOAT() { return MPI_FLOAT; }
+MPI_Datatype mpinet_MPI_DOUBLE() { return MPI_DOUBLE; }
+MPI_Datatype mpinet_MPI_LONG_DOUBLE() { return MPI_LONG_DOUBLE; }
+MPI_Datatype mpinet_MPI_LONG_LONG_INT() { return MPI_LONG_LONG_INT; }
+MPI_Datatype mpinet_MPI_UNSIGNED_LONG_LONG() { return MPI_UNSIGNED_LONG_LONG; }
+MPI_Datatype mpinet_MPI_LONG_LONG() { return MPI_LONG_LONG; }
+MPI_Datatype mpinet_MPI_PACKED() { return MPI_PACKED; }
+MPI_Datatype mpinet_MPI_DATATYPE_NULL() { return MPI_DATATYPE_NULL; }
+MPI_Group mpinet_MPI_GROUP_EMPTY() { return MPI_GROUP_EMPTY; }
+MPI_Group mpinet_MPI_GROUP_NULL() { return MPI_GROUP_NULL; }
+MPI_Info mpinet_MPI_INFO_NULL() { return MPI_INFO_NULL; }
+MPI_Copy_function * mpinet_MPI_NULL_COPY_FN() { return MPI_NULL_COPY_FN; }
+MPI_Delete_function * mpinet_MPI_NULL_DELETE_FN() { return MPI_NULL_DELETE_FN; }
+MPI_Errhandler mpinet_MPI_ERRORS_ARE_FATAL() { return MPI_ERRORS_ARE_FATAL; }
+MPI_Errhandler mpinet_MPI_ERRORS_RETURN() { return MPI_ERRORS_RETURN; }
+MPI_Errhandler mpinet_MPI_ERRHANDLER_NULL() { return MPI_ERRHANDLER_NULL; }
+MPI_Op mpinet_MPI_MAX() { return MPI_MAX; }
+MPI_Op mpinet_MPI_MIN() { return MPI_MIN; }
+MPI_Op mpinet_MPI_SUM() { return MPI_SUM; }
+MPI_Op mpinet_MPI_PROD() { return MPI_PROD; }
+MPI_Op mpinet_MPI_LAND() { return MPI_LAND; }
+MPI_Op mpinet_MPI_BAND() { return MPI_BAND; }
+MPI_Op mpinet_MPI_LOR() { return MPI_LOR; }
+MPI_Op mpinet_MPI_BOR() { return MPI_BOR; }
+MPI_Op mpinet_MPI_LXOR() { return MPI_LXOR; }
+MPI_Op mpinet_MPI_BXOR() { return MPI_BXOR; }
+MPI_Op mpinet_MPI_MINLOC() { return MPI_MINLOC; }
+MPI_Op mpinet_MPI_MAXLOC() { return MPI_MAXLOC; }
+MPI_Op mpinet_MPI_OP_NULL() { return MPI_OP_NULL; }
+MPI_Request mpinet_MPI_REQUEST_NULL() { return MPI_REQUEST_NULL; }