namespace Utilities
{
/**
- * A namespace for utility functions that
- * abstract certain operations using the
- * Message Passing Interface (MPI) or
- * provide fallback operations in
- * case deal.II is configured not to use
- * MPI at all.
+ * A namespace for utility functions that abstract certain operations using
+ * the Message Passing Interface (MPI) or provide fallback operations in
+ * case deal.II is configured not to use MPI at all.
*
* @ingroup utilities
*/
namespace MPI
{
/**
- * Return the number of MPI processes
- * there exist in the given communicator
- * object. If this is a sequential job,
- * it returns 1.
+ * Return the number of MPI processes there exist in the given
+ * communicator object. If this is a sequential job, it returns 1.
*/
unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator);
/**
- * Return the number of the present MPI
- * process in the space of processes
- * described by the given
- * communicator. This will be a unique
- * value for each process between zero
- * and (less than) the number of all
- * processes (given by
- * get_n_mpi_processes()).
+ * Return the number of the present MPI process in the space of processes
+ * described by the given communicator. This will be a unique value for
+ * each process between zero and (less than) the number of all processes
+ * (given by get_n_mpi_processes()).
*/
unsigned int this_mpi_process (const MPI_Comm &mpi_communicator);
/**
- * Consider an unstructured
- * communication pattern where
- * every process in an MPI
- * universe wants to send some
- * data to a subset of the other
- * processors. To do that, the
- * other processors need to know
- * who to expect messages
- * from. This function computes
- * this information.
+ * Consider an unstructured communication pattern where every process in
+ * an MPI universe wants to send some data to a subset of the other
+ * processors. To do that, the other processors need to know who to expect
+ * messages from. This function computes this information.
*
- * @param mpi_comm A communicator
- * that describes the processors
- * that are going to communicate
- * with each other.
+ * @param mpi_comm A communicator that describes the processors that are
+ * going to communicate with each other.
*
- * @param destinations The list
- * of processors the current
- * process wants to send
- * information to. This list need
- * not be sorted in any way. If
- * it contains duplicate entries
- * that means that multiple
- * messages are intended for a
- * given destination.
+ * @param destinations The list of processors the current process wants to
+ * send information to. This list need not be sorted in any way. If it
+ * contains duplicate entries that means that multiple messages are
+ * intended for a given destination.
*
- * @return A list of processors
- * that have indicated that they
- * want to send something to the
- * current processor. The
- * resulting list is not
- * sorted. It may contain
- * duplicate entries if
- * processors enter the same
- * destination more than once in
- * their destinations list.
+ * @return A list of processors that have indicated that they want to send
+ * something to the current processor. The resulting list is not sorted.
+ * It may contain duplicate entries if processors enter the same
+ * destination more than once in their destinations list.
*/
std::vector<unsigned int>
compute_point_to_point_communication_pattern (const MPI_Comm &mpi_comm,
const std::vector<unsigned int> &destinations);
/**
- * Given a communicator, generate a new
- * communicator that contains the same
- * set of processors but that has a
- * different, unique identifier.
+ * Given a communicator, generate a new communicator that contains the
+ * same set of processors but that has a different, unique identifier.
*
- * This functionality can be used to
- * ensure that different objects, such as
- * distributed matrices, each have unique
- * communicators over which they can
- * interact without interfering with each
- * other.
+ * This functionality can be used to ensure that different objects, such
+ * as distributed matrices, each have unique communicators over which they
+ * can interact without interfering with each other.
*
- * When no longer needed, the
- * communicator created here needs to
- * be destroyed using
- * <code>MPI_Comm_free</code>.
+ * When no longer needed, the communicator created here needs to be
+ * destroyed using <code>MPI_Comm_free</code>.
*/
MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator);
/**
- * Return the sum over all processors of the value @p t. This function
- * is collective over all processors given in the communicator. If
- * deal.II is not configured for use of MPI, this function simply
- * returns the value of @p t. This function corresponds to the
- * <code>MPI_Allreduce</code> function, i.e. all processors receive
- * the result of this operation.
+ * Return the sum over all processors of the value @p t. This function is
+ * collective over all processors given in the communicator. If deal.II is
+ * not configured for use of MPI, this function simply returns the value
+ * of @p t. This function corresponds to the <code>MPI_Allreduce</code>
+ * function, i.e. all processors receive the result of this operation.
*
- * @note Sometimes, not all processors need a results and in that case
- * one would call the <code>MPI_Reduce</code> function instead of the
+ * @note Sometimes, not all processors need a results and in that case one
+ * would call the <code>MPI_Reduce</code> function instead of the
* <code>MPI_Allreduce</code> function. The latter is at most twice as
* expensive, so if you are concerned about performance, it may be
- * worthwhile investigating whether your algorithm indeed needs the
- * result everywhere or whether you could get away with calling the
- * current function and getting the result everywhere.
+ * worthwhile investigating whether your algorithm indeed needs the result
+ * everywhere or whether you could get away with calling the current
+ * function and getting the result everywhere.
*
- * @note This function is only implemented for certain template
- * arguments <code>T</code>, namely <code>float, double, int,
- * unsigned int</code>.
+ * @note This function is only implemented for certain template arguments
+ * <code>T</code>, namely <code>float, double, int, unsigned int</code>.
*/
template <typename T>
T sum (const T &t,
const MPI_Comm &mpi_communicator);
/**
- * Like the previous function,
- * but take the sums over the
- * elements of an array
- * of length N. In other words,
- * the i-th element of the
- * results array is the sum over
- * the i-th entries of the input
- * arrays from each processor.
+ * Like the previous function, but take the sums over the elements of an
+ * array of length N. In other words, the i-th element of the results
+ * array is the sum over the i-th entries of the input arrays from each
+ * processor.
*/
template <typename T, unsigned int N>
inline
T (&sums)[N]);
/**
- * Like the previous function,
- * but take the sums over the
- * elements of a std::vector. In other words,
- * the i-th element of the
- * results array is the sum over
- * the i-th entries of the input
- * arrays from each processor.
+ * Like the previous function, but take the sums over the elements of a
+ * std::vector. In other words, the i-th element of the results array is
+ * the sum over the i-th entries of the input arrays from each processor.
*/
template <typename T>
inline
/**
* Return the maximum over all processors of the value @p t. This function
- * is collective over all processors given in the communicator. If
- * deal.II is not configured for use of MPI, this function simply
- * returns the value of @p t. This function corresponds to the
- * <code>MPI_Allreduce</code> function, i.e. all processors receive
- * the result of this operation.
+ * is collective over all processors given in the communicator. If deal.II
+ * is not configured for use of MPI, this function simply returns the
+ * value of @p t. This function corresponds to the
+ * <code>MPI_Allreduce</code> function, i.e. all processors receive the
+ * result of this operation.
*
- * @note Sometimes, not all processors need a results and in that case
- * one would call the <code>MPI_Reduce</code> function instead of the
+ * @note Sometimes, not all processors need a results and in that case one
+ * would call the <code>MPI_Reduce</code> function instead of the
* <code>MPI_Allreduce</code> function. The latter is at most twice as
* expensive, so if you are concerned about performance, it may be
- * worthwhile investigating whether your algorithm indeed needs the
- * result everywhere or whether you could get away with calling the
- * current function and getting the result everywhere.
+ * worthwhile investigating whether your algorithm indeed needs the result
+ * everywhere or whether you could get away with calling the current
+ * function and getting the result everywhere.
*
- * @note This function is only implemented for certain template
- * arguments <code>T</code>, namely <code>float, double, int,
- * unsigned int</code>.
+ * @note This function is only implemented for certain template arguments
+ * <code>T</code>, namely <code>float, double, int, unsigned int</code>.
*/
template <typename T>
T max (const T &t,
const MPI_Comm &mpi_communicator);
/**
- * Like the previous function,
- * but take the maxima over the
- * elements of an array
- * of length N. In other words,
- * the i-th element of the
- * results array is the maximum of
- * the i-th entries of the input
- * arrays from each processor.
+ * Like the previous function, but take the maxima over the elements of an
+ * array of length N. In other words, the i-th element of the results
+ * array is the maximum of the i-th entries of the input arrays from each
+ * processor.
*/
template <typename T, unsigned int N>
inline
T (&maxima)[N]);
/**
- * Like the previous function,
- * but take the maximum over the
- * elements of a std::vector. In other words,
- * the i-th element of the
- * results array is the maximum over
- * the i-th entries of the input
- * arrays from each processor.
+ * Like the previous function, but take the maximum over the elements of a
+ * std::vector. In other words, the i-th element of the results array is
+ * the maximum over the i-th entries of the input arrays from each
+ * processor.
*/
template <typename T>
inline
std::vector<T> &maxima);
/**
- * Data structure to store the result of
- * min_max_avg().
+ * Data structure to store the result of min_max_avg().
*/
struct MinMaxAvg
{
};
/**
- * Returns sum, average, minimum,
- * maximum, processor id of minimum and
- * maximum as a collective operation of
- * on the given MPI communicator @p
- * mpi_communicator . Each processor's
- * value is given in @p my_value and
- * the result will be returned.
- * The result is available on all
- * machines.
+ * Returns sum, average, minimum, maximum, processor id of minimum and
+ * maximum as a collective operation of on the given MPI communicator @p
+ * mpi_communicator . Each processor's value is given in @p my_value and
+ * the result will be returned. The result is available on all machines.
*
- * @note Sometimes, not all processors need a results and in that case
- * one would call the <code>MPI_Reduce</code> function instead of the
+ * @note Sometimes, not all processors need a results and in that case one
+ * would call the <code>MPI_Reduce</code> function instead of the
* <code>MPI_Allreduce</code> function. The latter is at most twice as
* expensive, so if you are concerned about performance, it may be
- * worthwhile investigating whether your algorithm indeed needs the
- * result everywhere or whether you could get away with calling the
- * current function and getting the result everywhere.
+ * worthwhile investigating whether your algorithm indeed needs the result
+ * everywhere or whether you could get away with calling the current
+ * function and getting the result everywhere.
*/
MinMaxAvg
min_max_avg (const double my_value,
/**
- * A class that is used to initialize the
- * MPI system at the beginning of a
- * program and to shut it down again at
- * the end. It also allows you to control
- * the number threads used in each MPI
- * task.
+ * A class that is used to initialize the MPI system at the beginning of a
+ * program and to shut it down again at the end. It also allows you to
+ * control the number threads used in each MPI task.
*
- * If deal.II is configured with PETSc,
- * the library will also be initialized
- * in the beginning and destructed at the
- * end automatically (internally by calling
- * PetscInitialize() and PetscFinalize()).
+ * If deal.II is configured with PETSc, the library will also be
+ * initialized in the beginning and destructed at the end automatically
+ * (internally by calling PetscInitialize() and PetscFinalize()).
*
- * If a program uses MPI one would
- * typically just create an object of
- * this type at the beginning of
- * <code>main()</code>. The constructor
- * of this class then runs
- * <code>MPI_Init()</code> with the given
- * arguments. At the end of the program,
- * the compiler will invoke the
- * destructor of this object which in
- * turns calls <code>MPI_Finalize</code>
- * to shut down the MPI system.
+ * If a program uses MPI one would typically just create an object of this
+ * type at the beginning of <code>main()</code>. The constructor of this
+ * class then runs <code>MPI_Init()</code> with the given arguments. At
+ * the end of the program, the compiler will invoke the destructor of this
+ * object which in turns calls <code>MPI_Finalize</code> to shut down the
+ * MPI system.
*
* This class is used in step-32, for example.
*/
{
public:
/**
- * Constructor. Takes the arguments
- * from the command line (in case of
- * MPI, the number of processes is
- * specified there), and sets up a
- * respective communicator by calling
- * <tt>MPI_Init()</tt>. This
- * constructor can only be called once
- * in a program, since MPI cannot be
+ * Constructor. Takes the arguments from the command line (in case of
+ * MPI, the number of processes is specified there), and sets up a
+ * respective communicator by calling <tt>MPI_Init()</tt>. This
+ * constructor can only be called once in a program, since MPI cannot be
* initialized twice.
*
- * This constructor sets max_num_threads
- * to 1 (see other constructor).
+ * This constructor sets max_num_threads to 1 (see other constructor).
*/
MPI_InitFinalize (int &argc,
char ** &argv) /*DEAL_II_DEPRECATED*/;
/**
- * Initialize MPI (and optionally PETSc)
- * and set the number of threads used by deal.II (and TBB) to the given
- * parameter. If set to numbers::invalid_unsigned_int, the number
- * of threads is determined by TBB. When in doubt, set this value
- * to 1.
+ * Initialize MPI (and optionally PETSc) and set the number of threads
+ * used by deal.II (and TBB) to the given parameter. If set to
+ * numbers::invalid_unsigned_int, the number of threads is determined by
+ * TBB. When in doubt, set this value to 1.
*
* This will call MultithreadInfo::set_thread_limit() with @p
* max_num_threads, so the environment variable DEAL_II_NUM_THREADS will
char ** &argv,
unsigned int max_num_threads);
/**
- * Destructor. Calls
- * <tt>MPI_Finalize()</tt> in
- * case this class owns the MPI
- * process.
+ * Destructor. Calls <tt>MPI_Finalize()</tt> in case this class owns the
+ * MPI process.
*/
~MPI_InitFinalize();
private:
/**
- * This flag tells the class
- * whether it owns the MPI
- * process (i.e., it has been
- * constructed using the
- * argc/argv input, or it has
- * been copied). In the former
- * case, the command
- * <tt>MPI_Finalize()</tt> will
+ * This flag tells the class whether it owns the MPI process (i.e., it
+ * has been constructed using the argc/argv input, or it has been
+ * copied). In the former case, the command <tt>MPI_Finalize()</tt> will
* be called at destruction.
*/
const bool owns_mpi;
{
#ifdef DEAL_II_WITH_MPI
/**
- * Return the corresponding MPI data
- * type id for the argument given.
+ * Return the corresponding MPI data type id for the argument given.
*/
inline MPI_Datatype mpi_type_id (const int *)
{