From: heister Date: Mon, 3 Mar 2014 14:19:15 +0000 (+0000) Subject: rewrap comments X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=0c8ab26a5cc18127f6aa98d3348f19c6eebbaa1e;p=dealii-svn.git rewrap comments git-svn-id: https://svn.dealii.org/trunk@32602 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/include/deal.II/base/mpi.h b/deal.II/include/deal.II/base/mpi.h index 4aa290c0b2..5a734f16c7 100644 --- a/deal.II/include/deal.II/base/mpi.h +++ b/deal.II/include/deal.II/base/mpi.h @@ -54,132 +54,91 @@ DEAL_II_NAMESPACE_OPEN namespace Utilities { /** - * A namespace for utility functions that - * abstract certain operations using the - * Message Passing Interface (MPI) or - * provide fallback operations in - * case deal.II is configured not to use - * MPI at all. + * A namespace for utility functions that abstract certain operations using + * the Message Passing Interface (MPI) or provide fallback operations in + * case deal.II is configured not to use MPI at all. * * @ingroup utilities */ namespace MPI { /** - * Return the number of MPI processes - * there exist in the given communicator - * object. If this is a sequential job, - * it returns 1. + * Return the number of MPI processes there exist in the given + * communicator object. If this is a sequential job, it returns 1. */ unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator); /** - * Return the number of the present MPI - * process in the space of processes - * described by the given - * communicator. This will be a unique - * value for each process between zero - * and (less than) the number of all - * processes (given by - * get_n_mpi_processes()). + * Return the number of the present MPI process in the space of processes + * described by the given communicator. This will be a unique value for + * each process between zero and (less than) the number of all processes + * (given by get_n_mpi_processes()). */ unsigned int this_mpi_process (const MPI_Comm &mpi_communicator); /** - * Consider an unstructured - * communication pattern where - * every process in an MPI - * universe wants to send some - * data to a subset of the other - * processors. To do that, the - * other processors need to know - * who to expect messages - * from. This function computes - * this information. + * Consider an unstructured communication pattern where every process in + * an MPI universe wants to send some data to a subset of the other + * processors. To do that, the other processors need to know who to expect + * messages from. This function computes this information. * - * @param mpi_comm A communicator - * that describes the processors - * that are going to communicate - * with each other. + * @param mpi_comm A communicator that describes the processors that are + * going to communicate with each other. * - * @param destinations The list - * of processors the current - * process wants to send - * information to. This list need - * not be sorted in any way. If - * it contains duplicate entries - * that means that multiple - * messages are intended for a - * given destination. + * @param destinations The list of processors the current process wants to + * send information to. This list need not be sorted in any way. If it + * contains duplicate entries that means that multiple messages are + * intended for a given destination. * - * @return A list of processors - * that have indicated that they - * want to send something to the - * current processor. The - * resulting list is not - * sorted. It may contain - * duplicate entries if - * processors enter the same - * destination more than once in - * their destinations list. + * @return A list of processors that have indicated that they want to send + * something to the current processor. The resulting list is not sorted. + * It may contain duplicate entries if processors enter the same + * destination more than once in their destinations list. */ std::vector compute_point_to_point_communication_pattern (const MPI_Comm &mpi_comm, const std::vector &destinations); /** - * Given a communicator, generate a new - * communicator that contains the same - * set of processors but that has a - * different, unique identifier. + * Given a communicator, generate a new communicator that contains the + * same set of processors but that has a different, unique identifier. * - * This functionality can be used to - * ensure that different objects, such as - * distributed matrices, each have unique - * communicators over which they can - * interact without interfering with each - * other. + * This functionality can be used to ensure that different objects, such + * as distributed matrices, each have unique communicators over which they + * can interact without interfering with each other. * - * When no longer needed, the - * communicator created here needs to - * be destroyed using - * MPI_Comm_free. + * When no longer needed, the communicator created here needs to be + * destroyed using MPI_Comm_free. */ MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator); /** - * Return the sum over all processors of the value @p t. This function - * is collective over all processors given in the communicator. If - * deal.II is not configured for use of MPI, this function simply - * returns the value of @p t. This function corresponds to the - * MPI_Allreduce function, i.e. all processors receive - * the result of this operation. + * Return the sum over all processors of the value @p t. This function is + * collective over all processors given in the communicator. If deal.II is + * not configured for use of MPI, this function simply returns the value + * of @p t. This function corresponds to the MPI_Allreduce + * function, i.e. all processors receive the result of this operation. * - * @note Sometimes, not all processors need a results and in that case - * one would call the MPI_Reduce function instead of the + * @note Sometimes, not all processors need a results and in that case one + * would call the MPI_Reduce function instead of the * MPI_Allreduce function. The latter is at most twice as * expensive, so if you are concerned about performance, it may be - * worthwhile investigating whether your algorithm indeed needs the - * result everywhere or whether you could get away with calling the - * current function and getting the result everywhere. + * worthwhile investigating whether your algorithm indeed needs the result + * everywhere or whether you could get away with calling the current + * function and getting the result everywhere. * - * @note This function is only implemented for certain template - * arguments T, namely float, double, int, - * unsigned int. + * @note This function is only implemented for certain template arguments + * T, namely float, double, int, unsigned int. */ template T sum (const T &t, const MPI_Comm &mpi_communicator); /** - * Like the previous function, - * but take the sums over the - * elements of an array - * of length N. In other words, - * the i-th element of the - * results array is the sum over - * the i-th entries of the input - * arrays from each processor. + * Like the previous function, but take the sums over the elements of an + * array of length N. In other words, the i-th element of the results + * array is the sum over the i-th entries of the input arrays from each + * processor. */ template inline @@ -188,13 +147,9 @@ namespace Utilities T (&sums)[N]); /** - * Like the previous function, - * but take the sums over the - * elements of a std::vector. In other words, - * the i-th element of the - * results array is the sum over - * the i-th entries of the input - * arrays from each processor. + * Like the previous function, but take the sums over the elements of a + * std::vector. In other words, the i-th element of the results array is + * the sum over the i-th entries of the input arrays from each processor. */ template inline @@ -204,37 +159,32 @@ namespace Utilities /** * Return the maximum over all processors of the value @p t. This function - * is collective over all processors given in the communicator. If - * deal.II is not configured for use of MPI, this function simply - * returns the value of @p t. This function corresponds to the - * MPI_Allreduce function, i.e. all processors receive - * the result of this operation. + * is collective over all processors given in the communicator. If deal.II + * is not configured for use of MPI, this function simply returns the + * value of @p t. This function corresponds to the + * MPI_Allreduce function, i.e. all processors receive the + * result of this operation. * - * @note Sometimes, not all processors need a results and in that case - * one would call the MPI_Reduce function instead of the + * @note Sometimes, not all processors need a results and in that case one + * would call the MPI_Reduce function instead of the * MPI_Allreduce function. The latter is at most twice as * expensive, so if you are concerned about performance, it may be - * worthwhile investigating whether your algorithm indeed needs the - * result everywhere or whether you could get away with calling the - * current function and getting the result everywhere. + * worthwhile investigating whether your algorithm indeed needs the result + * everywhere or whether you could get away with calling the current + * function and getting the result everywhere. * - * @note This function is only implemented for certain template - * arguments T, namely float, double, int, - * unsigned int. + * @note This function is only implemented for certain template arguments + * T, namely float, double, int, unsigned int. */ template T max (const T &t, const MPI_Comm &mpi_communicator); /** - * Like the previous function, - * but take the maxima over the - * elements of an array - * of length N. In other words, - * the i-th element of the - * results array is the maximum of - * the i-th entries of the input - * arrays from each processor. + * Like the previous function, but take the maxima over the elements of an + * array of length N. In other words, the i-th element of the results + * array is the maximum of the i-th entries of the input arrays from each + * processor. */ template inline @@ -243,13 +193,10 @@ namespace Utilities T (&maxima)[N]); /** - * Like the previous function, - * but take the maximum over the - * elements of a std::vector. In other words, - * the i-th element of the - * results array is the maximum over - * the i-th entries of the input - * arrays from each processor. + * Like the previous function, but take the maximum over the elements of a + * std::vector. In other words, the i-th element of the results array is + * the maximum over the i-th entries of the input arrays from each + * processor. */ template inline @@ -258,8 +205,7 @@ namespace Utilities std::vector &maxima); /** - * Data structure to store the result of - * min_max_avg(). + * Data structure to store the result of min_max_avg(). */ struct MinMaxAvg { @@ -272,23 +218,18 @@ namespace Utilities }; /** - * Returns sum, average, minimum, - * maximum, processor id of minimum and - * maximum as a collective operation of - * on the given MPI communicator @p - * mpi_communicator . Each processor's - * value is given in @p my_value and - * the result will be returned. - * The result is available on all - * machines. + * Returns sum, average, minimum, maximum, processor id of minimum and + * maximum as a collective operation of on the given MPI communicator @p + * mpi_communicator . Each processor's value is given in @p my_value and + * the result will be returned. The result is available on all machines. * - * @note Sometimes, not all processors need a results and in that case - * one would call the MPI_Reduce function instead of the + * @note Sometimes, not all processors need a results and in that case one + * would call the MPI_Reduce function instead of the * MPI_Allreduce function. The latter is at most twice as * expensive, so if you are concerned about performance, it may be - * worthwhile investigating whether your algorithm indeed needs the - * result everywhere or whether you could get away with calling the - * current function and getting the result everywhere. + * worthwhile investigating whether your algorithm indeed needs the result + * everywhere or whether you could get away with calling the current + * function and getting the result everywhere. */ MinMaxAvg min_max_avg (const double my_value, @@ -297,30 +238,20 @@ namespace Utilities /** - * A class that is used to initialize the - * MPI system at the beginning of a - * program and to shut it down again at - * the end. It also allows you to control - * the number threads used in each MPI - * task. + * A class that is used to initialize the MPI system at the beginning of a + * program and to shut it down again at the end. It also allows you to + * control the number threads used in each MPI task. * - * If deal.II is configured with PETSc, - * the library will also be initialized - * in the beginning and destructed at the - * end automatically (internally by calling - * PetscInitialize() and PetscFinalize()). + * If deal.II is configured with PETSc, the library will also be + * initialized in the beginning and destructed at the end automatically + * (internally by calling PetscInitialize() and PetscFinalize()). * - * If a program uses MPI one would - * typically just create an object of - * this type at the beginning of - * main(). The constructor - * of this class then runs - * MPI_Init() with the given - * arguments. At the end of the program, - * the compiler will invoke the - * destructor of this object which in - * turns calls MPI_Finalize - * to shut down the MPI system. + * If a program uses MPI one would typically just create an object of this + * type at the beginning of main(). The constructor of this + * class then runs MPI_Init() with the given arguments. At + * the end of the program, the compiler will invoke the destructor of this + * object which in turns calls MPI_Finalize to shut down the + * MPI system. * * This class is used in step-32, for example. */ @@ -328,28 +259,22 @@ namespace Utilities { public: /** - * Constructor. Takes the arguments - * from the command line (in case of - * MPI, the number of processes is - * specified there), and sets up a - * respective communicator by calling - * MPI_Init(). This - * constructor can only be called once - * in a program, since MPI cannot be + * Constructor. Takes the arguments from the command line (in case of + * MPI, the number of processes is specified there), and sets up a + * respective communicator by calling MPI_Init(). This + * constructor can only be called once in a program, since MPI cannot be * initialized twice. * - * This constructor sets max_num_threads - * to 1 (see other constructor). + * This constructor sets max_num_threads to 1 (see other constructor). */ MPI_InitFinalize (int &argc, char ** &argv) /*DEAL_II_DEPRECATED*/; /** - * Initialize MPI (and optionally PETSc) - * and set the number of threads used by deal.II (and TBB) to the given - * parameter. If set to numbers::invalid_unsigned_int, the number - * of threads is determined by TBB. When in doubt, set this value - * to 1. + * Initialize MPI (and optionally PETSc) and set the number of threads + * used by deal.II (and TBB) to the given parameter. If set to + * numbers::invalid_unsigned_int, the number of threads is determined by + * TBB. When in doubt, set this value to 1. * * This will call MultithreadInfo::set_thread_limit() with @p * max_num_threads, so the environment variable DEAL_II_NUM_THREADS will @@ -359,23 +284,16 @@ namespace Utilities char ** &argv, unsigned int max_num_threads); /** - * Destructor. Calls - * MPI_Finalize() in - * case this class owns the MPI - * process. + * Destructor. Calls MPI_Finalize() in case this class owns the + * MPI process. */ ~MPI_InitFinalize(); private: /** - * This flag tells the class - * whether it owns the MPI - * process (i.e., it has been - * constructed using the - * argc/argv input, or it has - * been copied). In the former - * case, the command - * MPI_Finalize() will + * This flag tells the class whether it owns the MPI process (i.e., it + * has been constructed using the argc/argv input, or it has been + * copied). In the former case, the command MPI_Finalize() will * be called at destruction. */ const bool owns_mpi; @@ -393,8 +311,7 @@ namespace Utilities { #ifdef DEAL_II_WITH_MPI /** - * Return the corresponding MPI data - * type id for the argument given. + * Return the corresponding MPI data type id for the argument given. */ inline MPI_Datatype mpi_type_id (const int *) {