From: Ralf Hartmann Date: Tue, 20 Sep 2005 10:54:46 +0000 (+0000) Subject: Use Utilities::System::get_*mpi_process* functions. Significant simplification of... X-Git-Tag: v8.0.0~13123 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=91a207aa32b320dc72f2175738646bf490b34c00;p=dealii.git Use Utilities::System::get_*mpi_process* functions. Significant simplification of example. git-svn-id: https://svn.dealii.org/trunk@11480 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/examples/step-17/step-17.cc b/deal.II/examples/step-17/step-17.cc index ad72f5e53d..936fd5df2c 100644 --- a/deal.II/examples/step-17/step-17.cc +++ b/deal.II/examples/step-17/step-17.cc @@ -46,8 +46,14 @@ // new stream ``pcout'' which is used // in parallel computations for // generating output only on one of - // the processes. + // the MPI processes. #include + // We are going to query the number + // of processes and the number of the + // present process by calling the + // respective functions in the + // Utilities::System namespace. +#include // Then, we are // going to replace all linear algebra // components that involve the (global) @@ -212,34 +218,6 @@ class ElasticProblem // and ``0'', respectively. const unsigned int n_mpi_processes; const unsigned int this_mpi_process; - - // In order to obtain values for the - // above two variables, we need to query - // the MPI subsystem (in case there is no - // MPI running at all, these functions - // automatically query some wrappers that - // PETSc provides and that return default - // values for a single process). We could - // initialize above variables in the - // constructor of this class, but since - // they never change we chose to mark - // them as ``const'', and so they can - // only be initialized if we package all - // the querying functions into auxiliary, - // static functions that return the - // requested values as their return - // value. The argument they take denotes - // the MPI communicator object from which - // they shall query the total number of - // processes, and the rank within this - // communicator: - static - unsigned int - get_n_mpi_processes (const MPI_Comm &mpi_communicator); - - static - unsigned int - get_this_mpi_process (const MPI_Comm &mpi_communicator); }; @@ -307,76 +285,6 @@ void RightHandSide::vector_value_list (const std::vector > &poin } - - // So here first come the two functions that - // query the number of processes associated - // with an MPI communicator object, as well - // as the rank of the present process within - // it. Note again that PETSc provides dummy - // implementations of these functions if no - // MPI support is requested. These dummy - // functions return ``1'' and ``0'' for the - // total number of processes and the rank of - // the present process within the - // communicator, respectively. - // - // Unfortunately, we have to work - // around an oddity in the way PETSc - // and some gcc versions interact. If - // we use PETSc's MPI dummy - // implementation, it expands the - // calls to the two MPI functions - // basically as ``(n_jobs=1, 0)'', - // i.e. it assigns the number one to - // the variable holding the number of - // jobs, and then uses the comma - // operator to let the entire - // expression have the value - // zero. The latter is important, - // since ``MPI_Comm_size'' returns an - // error code that we may want to - // check (we don't here, but one - // could in principle), and the trick - // with the comma operator makes sure - // that both the number of jobs is - // correctly assigned, and the return - // value is zero. Unfortunately, if - // some recent versions of gcc detect - // that the comma expression just - // stands by itself, i.e. the result - // is not assigned to another - // variable, then they warn - // ``right-hand operand of comma has - // no effect''. This unwanted side - // effect can be suppressed by - // casting the result of the entire - // expression to type ``void'' -- not - // beautiful, but helps calming down - // unwarranted compiler warnings... -template -unsigned int -ElasticProblem::get_n_mpi_processes (const MPI_Comm &mpi_communicator) -{ - int n_jobs; - (void)MPI_Comm_size (mpi_communicator, &n_jobs); - - return n_jobs; -} - - - -template -unsigned int -ElasticProblem::get_this_mpi_process (const MPI_Comm &mpi_communicator) -{ - int rank; - (void)MPI_Comm_rank (mpi_communicator, &rank); - - return rank; -} - - - // The first step in the actual // implementation of things is the // constructor of the main class. Apart from @@ -388,23 +296,29 @@ ElasticProblem::get_this_mpi_process (const MPI_Comm &mpi_communicator) // together (in more complex applications, // one could here use a communicator object // that only links a subset of all - // processes), and call above helper + // processes), and call the Utilities helper // functions to determine the number of // processes and where the present one fits // into this picture. In addition, we make // sure that output is only generated by the - // (globally) first process: + // (globally) first process. As, + // this_mpi_process is determined after + // creation of pcout, we cannot set the + // condition through the constructor, i.e. by + // pcout(std::cout, this_mpi_process==0), but + // set the condition separately. template ElasticProblem::ElasticProblem () : - pcout (std::cout, - get_this_mpi_process(MPI_COMM_WORLD) == 0), + pcout (std::cout), dof_handler (triangulation), fe (FE_Q(1), dim), mpi_communicator (MPI_COMM_WORLD), - n_mpi_processes (get_n_mpi_processes(mpi_communicator)), - this_mpi_process (get_this_mpi_process(mpi_communicator)) -{} + n_mpi_processes (Utilities::System::get_n_mpi_processes(mpi_communicator)), + this_mpi_process (Utilities::System::get_this_mpi_process(mpi_communicator)) +{ + pcout.set_condition(this_mpi_process == 0); +} diff --git a/deal.II/examples/step-18/step-18.cc b/deal.II/examples/step-18/step-18.cc index 8c368015eb..b4e6d87f60 100644 --- a/deal.II/examples/step-18/step-18.cc +++ b/deal.II/examples/step-18/step-18.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -755,19 +756,7 @@ namespace QuasiStaticElasticity // ``active_cell_iterator''). unsigned int n_local_cells; - // Finally, here are the same two - // helper functions that we already had - // in step-17 to extract some - // information from the MPI subsystem: - static - unsigned int - get_n_mpi_processes (const MPI_Comm &mpi_communicator); - - static - unsigned int - get_this_mpi_process (const MPI_Comm &mpi_communicator); - - // In addition, we have a + // Finally, we have a // static variable that denotes // the linear relationship // between the stress and @@ -1015,33 +1004,7 @@ namespace QuasiStaticElasticity // @sect3{Implementation of the ``TopLevel'' class} // Now for the implementation of the main - // class. The first two functions are - // verbatim copies from step-17: - template - unsigned int - TopLevel::get_n_mpi_processes (const MPI_Comm &mpi_communicator) - { - int n_jobs; - (void)MPI_Comm_size (mpi_communicator, &n_jobs); - - return n_jobs; - } - - - - template - unsigned int - TopLevel::get_this_mpi_process (const MPI_Comm &mpi_communicator) - { - int rank; - (void)MPI_Comm_rank (mpi_communicator, &rank); - - return rank; - } - - - - // Then initialize the + // class. First, we initialize the // stress-strain tensor, which we // have declared as a static const // variable. We chose Lame @@ -1073,10 +1036,9 @@ namespace QuasiStaticElasticity dof_handler (triangulation), quadrature_formula (2), mpi_communicator (MPI_COMM_WORLD), - n_mpi_processes (get_n_mpi_processes(mpi_communicator)), - this_mpi_process (get_this_mpi_process(mpi_communicator)), - pcout (std::cout, - get_this_mpi_process(mpi_communicator) == 0) + n_mpi_processes (Utilities::System::get_n_mpi_processes(mpi_communicator)), + this_mpi_process (Utilities::System::get_this_mpi_process(mpi_communicator)), + pcout (std::cout, this_mpi_process == 0) {}