void refine_grid ();
void output_results (const unsigned int cycle) const;
- // The first difference to step-8 is the introduction of a
- // stream-like variable <code>pcout</code>. It is, in essence,
+ // The first change is that we have to declare a variable that
+ // indicates the @ref GlossMPICommunicator "MPI communicator" over
+ // which we are supposed to distribute our computations.
+ MPI_Comm mpi_communicator;
+
+ // Then we have two variables that tell us where in the parallel
+ // world we are. The first of the following variables,
+ // <code>n_mpi_processes</code> tells us how many MPI processes
+ // there exist in total, while the second one,
+ // <code>this_mpi_process</code>, indicates which is the number of
+ // the present process within this space of processes (in MPI
+ // language, this corresponds to the @ref GlossMPIRank "rank" of
+ // the process). The latter will have a unique value for each
+ // process between zero and (less than)
+ // <code>n_mpi_processes</code>. If this program is run on a
+ // single machine without MPI support, then their values are
+ // <code>1</code> and <code>0</code>, respectively.
+ const unsigned int n_mpi_processes;
+ const unsigned int this_mpi_process;
+
+ // Next up is a stream-like variable <code>pcout</code>. It is, in essence,
// just something we use for convenience: in a parallel program,
// if each process outputs status information, then there quickly
// is a lot of clutter. Rather, we would want to only have one
// <code>operator<<</code>.
ConditionalOStream pcout;
- // The next few variables are taken verbatim from step-8:
+ // The remainder of the list of member variables is fundamentally the
+ // same as in step-8. However, we change the declarations of matrix
+ // and vector types to use parallel PETSc objects instead. Note that
+ // we do not use a separate sparsity pattern, since PETSc manages this
+ // internally as part of its matrix data structures.
Triangulation<dim> triangulation;
DoFHandler<dim> dof_handler;
ConstraintMatrix hanging_node_constraints;
- // In step-8, this would have been the place where we declared the
- // member variables for the sparsity pattern, the system matrix,
- // right hand, and solution vector. We change these declarations
- // to use parallel PETSc objects instead. Note that we do not use
- // a separate sparsity pattern, since PETSc manages that as part
- // of its matrix data structures.
PETScWrappers::MPI::SparseMatrix system_matrix;
PETScWrappers::MPI::Vector solution;
PETScWrappers::MPI::Vector system_rhs;
-
- // The next change is that we have to declare a variable that
- // indicates the @ref GlossMPICommunicator "MPI communicator" over
- // which we are supposed to distribute our computations.
- MPI_Comm mpi_communicator;
-
- // Then we have two variables that tell us where in the parallel
- // world we are. The first of the following variables,
- // <code>n_mpi_processes</code> tells us how many MPI processes
- // there exist in total, while the second one,
- // <code>this_mpi_process</code>, indicates which is the number of
- // the present process within this space of processes (in MPI
- // language, this corresponds to the @ref GlossMPIRank "rank" of
- // the process). The latter will have a unique value for each
- // process between zero and (less than)
- // <code>n_mpi_processes</code>. If this program is run on a
- // single machine without MPI support, then their values are
- // <code>1</code> and <code>0</code>, respectively.
- const unsigned int n_mpi_processes;
- const unsigned int this_mpi_process;
};
// links a subset of all processes), and call the Utilities::MPI
// helper functions to determine the number of processes and where
// the present one fits into this picture. In addition, we make sure
- // that output is only generated by the (globally) first process. As
- // <code>this_mpi_process</code> is determined after creation of
- // pcout, we cannot set the condition through the constructor,
- // i.e. by <code>pcout(std::cout, this_mpi_process==0)</code>, but
- // set the condition separately.
+ // that output is only generated by the (globally) first process.
+ // We do so by passing the stream we want to output to
+ // (<code>std::cout</code>) and a true/false flag as arguments where
+ // the latter is determined by testing whether the process currently
+ // executing the constructor call is the first in the MPI universe.
template <int dim>
ElasticProblem<dim>::ElasticProblem ()
:
- pcout (std::cout),
- dof_handler (triangulation),
- fe (FE_Q<dim>(1), dim),
mpi_communicator (MPI_COMM_WORLD),
n_mpi_processes (Utilities::MPI::n_mpi_processes(mpi_communicator)),
- this_mpi_process (Utilities::MPI::this_mpi_process(mpi_communicator))
- {
- pcout.set_condition(this_mpi_process == 0);
- }
+ this_mpi_process (Utilities::MPI::this_mpi_process(mpi_communicator)),
+ pcout (std::cout,
+ (this_mpi_process == 0)),
+ dof_handler (triangulation),
+ fe (FE_Q<dim>(1), dim)
+ {}