IndexSet locally_owned_dofs;
IndexSet locally_relevant_dofs;
- ConstraintMatrix constraints;
+ AffineConstraints<double> constraints;
LA::MPI::SparseMatrix system_matrix;
LA::MPI::Vector locally_relevant_solution;
//
// As with all other things in %parallel, the mantra must be that no
// processor can store all information about the entire universe. As a
- // consequence, we need to tell the constraints object for which degrees
- // of freedom it can store constraints and for which it may not expect any
- // information to store. In our case, as explained in the @ref distributed
- // module, the degrees of freedom we need to care about on each processor
- // are the locally relevant ones, so we pass this to the
- // ConstraintMatrix::reinit function. As a side note, if you forget to
- // pass this argument, the ConstraintMatrix class will allocate an array
+ // consequence, we need to tell the AffineConstraints object for which
+ // degrees of freedom it can store constraints and for which it may not
+ // expect any information to store. In our case, as explained in the @ref
+ // distributed module, the degrees of freedom we need to care about on
+ // each processor are the locally relevant ones, so we pass this to the
+ // AffineConstraints::reinit function. As a side note, if you forget to
+ // pass this argument, the AffineConstraints class will allocate an array
// with length equal to the largest DoF index it has seen so far. For
// processors with high MPI process number, this may be very large --
// maybe on the order of billions. The program would then allocate more
std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
- typename DoFHandler<dim>::active_cell_iterator cell =
- dof_handler.begin_active(),
- endc = dof_handler.end();
- for (; cell != endc; ++cell)
+ for (const auto &cell : dof_handler.active_cell_iterators())
if (cell->is_locally_owned())
{
- cell_matrix = 0;
- cell_rhs = 0;
+ cell_matrix = 0.;
+ cell_rhs = 0.;
fe_values.reinit(cell);
0.5 +
0.25 * std::sin(4.0 * numbers::PI *
fe_values.quadrature_point(q_point)[0]) ?
- 1 :
- -1);
+ 1. :
+ -1.);
for (unsigned int i = 0; i < dofs_per_cell; ++i)
{
for (unsigned int j = 0; j < dofs_per_cell; ++j)
- cell_matrix(i, j) += (fe_values.shape_grad(i, q_point) *
- fe_values.shape_grad(j, q_point) *
- fe_values.JxW(q_point));
+ cell_matrix(i, j) += fe_values.shape_grad(i, q_point) *
+ fe_values.shape_grad(j, q_point) *
+ fe_values.JxW(q_point);
- cell_rhs(i) +=
- (rhs_value * fe_values.shape_value(i, q_point) *
- fe_values.JxW(q_point));
+ cell_rhs(i) += rhs_value * //
+ fe_values.shape_value(i, q_point) * //
+ fe_values.JxW(q_point);
}
}
// sensible approach, namely creating individual files for each MPI process
// and leaving it to the visualization program to make sense of that.
//
- // To start, the top of the function looks like always. In addition to
- // attaching the solution vector (the one that has entries for all locally
+ // To start, the top of the function looks like it usually does. In addition
+ // to attaching the solution vector (the one that has entries for all locally
// relevant, not only the locally owned, elements), we attach a data vector
// that stores, for each cell, the subdomain the cell belongs to. This is
// slightly tricky, because of course not every processor knows about every
// processor number (enough for up to 10,000 processors, though we hope
// that nobody ever tries to generate this much data -- you would likely
// overflow all file system quotas), and <code>.vtu</code> indicates the
- // XML-based Visualization Toolkit (VTK) file format.
+ // XML-based Visualization Toolkit for Unstructured grids (VTU) file
+ // format.
const std::string filename =
("solution-" + Utilities::int_to_string(cycle, 2) + "." +
Utilities::int_to_string(triangulation.locally_owned_subdomain(), 4));
- std::ofstream output((filename + ".vtu"));
+ std::ofstream output(filename + ".vtu");
data_out.write_vtu(output);
// The last step is to write a "master record" that lists for the
// @sect4{main()}
// The final function, <code>main()</code>, again has the same structure as in
-// all other programs, in particular step-6. Like in the other programs that
-// use PETSc, we have to initialize and finalize PETSc, which is done using the
-// helper object MPI_InitFinalize.
+// all other programs, in particular step-6. Like the other programs that use
+// PETSc, we have to initialize and finalize PETSc, which is done using the
+// helper object Utilities::MPI::MPI_InitFinalize. The constructor of that
+// class initializes MPI other libraries that depend on it, such as p4est,
+// SLEPc, and Zoltan (though the last two are not used in this tutorial). The
+// order here is important: we cannot use any of these libraries until they
+// are initialized, so it does not make sense to do anything before creating
+// an instance of Utilities::MPI::MPI_InitFinalize.
//
-// Note how we enclose the use the use of the LaplaceProblem class in a pair
-// of braces. This makes sure that all member variables of the object are
-// destroyed by the time we destroy the mpi_initialization object. Not doing
-// this will lead to strange and hard to debug errors when
-// <code>PetscFinalize</code> first deletes all PETSc vectors that are still
-// around, and the destructor of the LaplaceProblem class then tries to delete
-// them again.
+// After the solver finishes, the LaplaceProblem destructor will run followed
+// by Utilities::MPI::MPI_InitFinalize::~MPI_InitFinalize(). This order is
+// also important: Utilities::MPI::MPI_InitFinalize::~MPI_InitFinalize() calls
+// <code>PetscFinalize</code> (and finalization functions for other
+// libraries), which will delete any in-use PETSc objects. This must be done
+// after we destruct the Laplace solver to avoid double deletion
+// errors. Fortunately, due to the order of destructor call rules of C++, we
+// do not need to worry about any of this: everything happens in the correct
+// order (i.e., the reverse of the order of construction). The last function
+// called by Utilities::MPI::MPI_InitFinalize::~MPI_InitFinalize() is
+// <code>MPI_Finalize</code>: i.e., once this object is destructed the program
+// should exit since MPI will no longer be available.
int main(int argc, char *argv[])
{
try