#include <source/toolbox/parallel/MPI.h>
Public Types | |
typedef int | comm |
typedef int | group |
typedef int | request |
typedef int | status |
Static Public Member Functions | |
static void | setCallAbortInSerialInsteadOfExit (bool flag=true) |
static void | abort () |
static void | init (int *argc, char **argv[]) |
static void | finalize () |
static void | initialize () |
static void | setCommunicator (MPI::comm communicator) |
static MPI::comm | getCommunicator () |
static int | getRank () |
static int | getNodes () |
static void | updateOutgoingStatistics (const int messages, const int bytes) |
static void | updateIncomingStatistics (const int messages, const int bytes) |
static int | getOutgoingMessages () |
static int | getOutgoingBytes () |
static int | getIncomingMessages () |
static int | getIncomingBytes () |
static int | getTreeDepth () |
static void | barrier () |
static double | sumReduction (const double x) |
static void | sumReduction (double *x, const int n=1) |
static float | sumReduction (const float x) |
static void | sumReduction (float *x, const int n=1) |
static dcomplex | sumReduction (const dcomplex x) |
static void | sumReduction (dcomplex *x, const int n=1) |
static int | sumReduction (const int x) |
static void | sumReduction (int *x, const int n=1) |
static double | minReduction (const double x, int *rank_of_min=NULL) |
static void | minReduction (double *x, const int n=1, int *rank_of_min=NULL) |
static float | minReduction (const float x, int *rank_of_min=NULL) |
static void | minReduction (float *x, const int n=1, int *rank_of_min=NULL) |
static int | minReduction (const int x, int *rank_of_min=NULL) |
static void | minReduction (int *x, const int n=1, int *rank_of_min=NULL) |
static double | maxReduction (const double x, int *rank_of_max=NULL) |
static void | maxReduction (double *x, const int n=1, int *rank_of_max=NULL) |
static float | maxReduction (const float x, int *rank_of_max=NULL) |
static void | maxReduction (float *x, const int n=1, int *rank_of_max=NULL) |
static int | maxReduction (const int x, int *rank_of_max=NULL) |
static void | maxReduction (int *x, const int n=1, int *rank_of_max=NULL) |
static void | allToOneSumReduction (int *x, const int n, const int root=0) |
static int | bcast (const int x, const int root) |
static void | bcast (int *x, int &length, const int root) |
static void | bcast (char *x, int &length, const int root) |
static void | send (const int *buf, const int length, const int receiving_proc_number, const bool send_length=true, int tag=-1) |
This function sends an MPI message with an integer array to another processer. | |
static void | sendBytes (const void *buf, const int number_bytes, const int receiving_proc_number) |
This function sends an MPI message with an array of bytes (MPI_BYTES) to receiving_proc_number. | |
static int | recvBytes (void *buf, int number_bytes) |
This function receives an MPI message with an array of max size number_bytes (MPI_BYTES) from any processer. | |
static void | recv (int *buf, int &length, const int sending_proc_number, const bool get_length=true, int tag=-1) |
This function receives an MPI message with an integer array from another processer. | |
static void | allGather (const int *x_in, int size_in, int *x_out, int size_out) |
static void | allGather (const double *x_in, int size_in, double *x_out, int size_out) |
static void | allGather (int x_in, int *x_out) |
static void | allGather (double x_in, double *x_out) |
Static Public Attributes | |
static comm | commWorld = 0 |
static comm | commNull = -1 |
Note that this class is a utility class to group function calls in one name space (all calls are to static functions). Thus, you should never attempt to instantiate a class of type MPI; simply call the functions as static functions using the MPI::function(...) syntax.
|
MPI Types |
|
|
|
|
|
|
|
Set boolean flag indicating whether exit or abort is called when running with one processor. Calling this function influences the behavior of calls to MPI::abort(). By default, the flag is true meaning that abort() will be called. Passing false means exit(-1) will be called. |
|
Call MPI_Abort or exit depending on whether running with one or more processes and value set by function above, if called. The default is to call exit(-1) if running with one processor and to call MPI_Abort() otherwise. This function avoids having to guard abort calls in application code. |
|
Call MPI_Init. Use of this function avoids guarding MPI init calls in application code. |
|
Call MPI_Finalize. Use of this function avoids guarding MPI finalize calls in application code. |
|
Initialize the MPI utility class. The MPI utility class must be initialized after the call to MPI_Init or MPI::init. |
|
Set the communicator that is used for the MPI communication routines. The default communicator is MPI_COMM_WORLD. |
|
Get the current MPI communicator. The default communicator is MPI_COMM_WORLD. |
|
Return the processor rank (identifier) from 0 through the number of processors minus one. |
|
Return the number of processors (nodes). |
|
Update the statistics for outgoing messages. Statistics are automatically updated for the reduction calls in MPI. |
|
Update the statistics for incoming messages. Statistics are automatically updated for the reduction calls in MPI. |
|
Return the number of outgoing messages. |
|
Return the number of outgoing message bytes. |
|
Return the number of incoming messages. |
|
Return the number of incoming message bytes. |
|
Get the depth of the reduction trees given the current number of MPI processors. |
|
Perform a global barrier across all processors. |
|
Perform a scalar sum reduction on a double across all nodes. Each processor contributes a value x of type double, and the sum is returned from the function. |
|
Perform an array sum reduction on doubles across all nodes. Each processor contributes an array of values of type double, and the element-wise sum is returned in the same array. |
|
Perform a scalar sum reduction on a float across all nodes. Each processor contributes a value x of type float, and the sum is returned from the function. |
|
Perform an array sum reduction on floats across all nodes. Each processor contributes an array of values of type float, and the element-wise sum is returned in the same array. |
|
Perform a scalar sum reduction on a dcomplex across all nodes. Each processor contributes a value x of type dcomplex, and the sum is returned from the function. |
|
Perform an array sum reduction on dcomplexes across all nodes. Each processor contributes an array of values of type dcomplex, and the element-wise sum is returned in the same array. |
|
Perform a scalar sum reduction on an integer across all nodes. Each processor contributes a value x of type int, and the sum is returned from the function. |
|
Perform an array sum reduction on integers across all nodes. Each processor contributes an array of values of type int, and the element-wise sum is returned in the same array. |
|
Perform a scalar min reduction on a double across all nodes. Each processor contributes a value x of type double, and the minimum is returned from the function. If a 'rank_of_min' argument is provided, it will set it to the rank of process holding the minimum value. |
|
Perform an array min reduction on doubles across all nodes. Each processor contributes an array of values of type double, and the element-wise minimum is returned in the same array. If a 'rank_of_min' argument is provided, it will set the array to the rank of process holding the minimum value. Like the double argument, the size of the supplied 'rank_of_min' array should be n. |
|
Perform a scalar min reduction on a float across all nodes. Each processor contributes a value x of type float, and the minimum is returned from the function. If a 'rank_of_min' argument is provided, it will set it to the rank of process holding the minimum value. |
|
Perform an array min reduction on floats across all nodes. Each processor contributes an array of values of type float, and the element-wise minimum is returned in the same array. If a 'rank_of_min' argument is provided, it will set the array to the rank of process holding the minimum value. Like the double argument, the size of the supplied 'rank_of_min' array should be n. |
|
Perform a scalar min reduction on an integer across all nodes. Each processor contributes a value x of type int, and the minimum is returned from the function. If a 'rank_of_min' argument is provided, it will set it to the rank of process holding the minimum value. |
|
Perform an array min reduction on integers across all nodes. Each processor contributes an array of values of type int, and the element-wise minimum is returned in the same array. If a 'rank_of_min' argument is provided, it will set the array to the rank of process holding the minimum value. Like the double argument, the size of the supplied 'rank_of_min' array should be n. |
|
Perform a scalar max reduction on a double across all nodes. Each processor contributes a value x of type double, and the maximum is returned from the function. If a 'rank_of_max' argument is provided, it will set it to the rank of process holding the maximum value. |
|
Perform an array max reduction on doubles across all nodes. Each processor contributes an array of values of type double, and the element-wise maximum is returned in the same array. If a 'rank_of_max' argument is provided, it will set the array to the rank of process holding the maximum value. Like the double argument, the size of the supplied 'rank_of_max' array should be n. |
|
Perform a scalar max reduction on a float across all nodes. Each processor contributes a value x of type float, and the maximum is returned from the function. If a 'rank_of_max' argument is provided, it will set it to the rank of process holding the maximum value. |
|
Perform an array max reduction on floats across all nodes. Each processor contributes an array of values of type float, and the element-wise maximum is returned in the same array. If a 'rank_of_max' argument is provided, it will set the array to the rank of process holding the maximum value. Like the double argument, the size of the supplied 'rank_of_max' array should be n. |
|
Perform a scalar max reduction on an integer across all nodes. Each processor contributes a value x of type int, and the maximum is returned from the function. If a 'rank_of_max' argument is provided, it will set it to the rank of process holding the maximum value. |
|
Perform an array max reduction on integers across all nodes. Each processor contributes an array of values of type int, and the element-wise maximum is returned in the same array. If a 'rank_of_max' argument is provided, it will set the array to the rank of process holding the maximum value. Like the double argument, the size of the supplied 'rank_of_max' array should be n. |
|
Perform an all-to-one sum reduction on an integer array. The final result is only available on the root processor. |
|
Broadcast integer from specified root process to all other processes. All processes other than root, receive a copy of the integer value. |
|
Broadcast integer array from specified root processor to all other processors. For the root processor, "array" and "length" are treated as const. |
|
Broadcast char array from specified root processor to all other processors. For the root processor, "array" and "length" are treated as const. |
|
This function sends an MPI message with an integer array to another processer. If the receiving processor knows in advance the length of the array, use "send_length = false;" otherwise, this processor will first send the length of the array, then send the data. This call must be paired with a matching call to MPI::recv.
|
|
This function sends an MPI message with an array of bytes (MPI_BYTES) to receiving_proc_number. This call must be paired with a matching call to MPI::recvBytes.
|
|
This function receives an MPI message with an array of max size number_bytes (MPI_BYTES) from any processer. This call must be paired with a matching call to MPI::sendBytes. This function returns the processor number of the sender.
|
|
This function receives an MPI message with an integer array from another processer. If this processor knows in advance the length of the array, use "get_length = false;" otherwise, the sending processor will first send the length of the array, then send the data. This call must be paired with a matching call to MPI::send.
|
|
Each processor sends an array of integers or doubles to all other processors; each processor's array may differ in length. The x_out array must be pre-allocated to the correct length (this is a bit cumbersome, but is necessary to avoid th allGather function from allocating memory that is freed elsewhere). To properly preallocate memory, before calling this method, call size_out = MPI::sumReduction(size_in) then allocate the x_out array. |
|
|
|
Each processor sends every other processor an integer or double. The x_out array should be preallocated to a length equal to the number of processors. |
|
|
|
MPI constants |
|
|