From 624e689f6191a4d7e8c58c33421dd1c3a5c911f5 Mon Sep 17 00:00:00 2001 From: Wolfgang Bangerth Date: Fri, 8 Jan 2016 17:50:07 -0600 Subject: [PATCH] Reorder the declaration of member variables. This allows a significant simplification of the code flow in the constructor. --- examples/step-17/step-17.cc | 78 ++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 41 deletions(-) diff --git a/examples/step-17/step-17.cc b/examples/step-17/step-17.cc index 70f148db49..f953b39f51 100644 --- a/examples/step-17/step-17.cc +++ b/examples/step-17/step-17.cc @@ -116,8 +116,27 @@ namespace Step17 void refine_grid (); void output_results (const unsigned int cycle) const; - // The first difference to step-8 is the introduction of a - // stream-like variable pcout. It is, in essence, + // The first change is that we have to declare a variable that + // indicates the @ref GlossMPICommunicator "MPI communicator" over + // which we are supposed to distribute our computations. + MPI_Comm mpi_communicator; + + // Then we have two variables that tell us where in the parallel + // world we are. The first of the following variables, + // n_mpi_processes tells us how many MPI processes + // there exist in total, while the second one, + // this_mpi_process, indicates which is the number of + // the present process within this space of processes (in MPI + // language, this corresponds to the @ref GlossMPIRank "rank" of + // the process). The latter will have a unique value for each + // process between zero and (less than) + // n_mpi_processes. If this program is run on a + // single machine without MPI support, then their values are + // 1 and 0, respectively. + const unsigned int n_mpi_processes; + const unsigned int this_mpi_process; + + // Next up is a stream-like variable pcout. It is, in essence, // just something we use for convenience: in a parallel program, // if each process outputs status information, then there quickly // is a lot of clutter. Rather, we would want to only have one @@ -140,7 +159,11 @@ namespace Step17 // operator<<. ConditionalOStream pcout; - // The next few variables are taken verbatim from step-8: + // The remainder of the list of member variables is fundamentally the + // same as in step-8. However, we change the declarations of matrix + // and vector types to use parallel PETSc objects instead. Note that + // we do not use a separate sparsity pattern, since PETSc manages this + // internally as part of its matrix data structures. Triangulation triangulation; DoFHandler dof_handler; @@ -148,36 +171,10 @@ namespace Step17 ConstraintMatrix hanging_node_constraints; - // In step-8, this would have been the place where we declared the - // member variables for the sparsity pattern, the system matrix, - // right hand, and solution vector. We change these declarations - // to use parallel PETSc objects instead. Note that we do not use - // a separate sparsity pattern, since PETSc manages that as part - // of its matrix data structures. PETScWrappers::MPI::SparseMatrix system_matrix; PETScWrappers::MPI::Vector solution; PETScWrappers::MPI::Vector system_rhs; - - // The next change is that we have to declare a variable that - // indicates the @ref GlossMPICommunicator "MPI communicator" over - // which we are supposed to distribute our computations. - MPI_Comm mpi_communicator; - - // Then we have two variables that tell us where in the parallel - // world we are. The first of the following variables, - // n_mpi_processes tells us how many MPI processes - // there exist in total, while the second one, - // this_mpi_process, indicates which is the number of - // the present process within this space of processes (in MPI - // language, this corresponds to the @ref GlossMPIRank "rank" of - // the process). The latter will have a unique value for each - // process between zero and (less than) - // n_mpi_processes. If this program is run on a - // single machine without MPI support, then their values are - // 1 and 0, respectively. - const unsigned int n_mpi_processes; - const unsigned int this_mpi_process; }; @@ -260,23 +257,22 @@ namespace Step17 // links a subset of all processes), and call the Utilities::MPI // helper functions to determine the number of processes and where // the present one fits into this picture. In addition, we make sure - // that output is only generated by the (globally) first process. As - // this_mpi_process is determined after creation of - // pcout, we cannot set the condition through the constructor, - // i.e. by pcout(std::cout, this_mpi_process==0), but - // set the condition separately. + // that output is only generated by the (globally) first process. + // We do so by passing the stream we want to output to + // (std::cout) and a true/false flag as arguments where + // the latter is determined by testing whether the process currently + // executing the constructor call is the first in the MPI universe. template ElasticProblem::ElasticProblem () : - pcout (std::cout), - dof_handler (triangulation), - fe (FE_Q(1), dim), mpi_communicator (MPI_COMM_WORLD), n_mpi_processes (Utilities::MPI::n_mpi_processes(mpi_communicator)), - this_mpi_process (Utilities::MPI::this_mpi_process(mpi_communicator)) - { - pcout.set_condition(this_mpi_process == 0); - } + this_mpi_process (Utilities::MPI::this_mpi_process(mpi_communicator)), + pcout (std::cout, + (this_mpi_process == 0)), + dof_handler (triangulation), + fe (FE_Q(1), dim) + {} -- 2.39.5