From: David Wells Date: Mon, 25 Jun 2018 14:16:48 +0000 (-0400) Subject: Update step-40. X-Git-Tag: v9.1.0-rc1~969^2~2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=fd899264c7934360bf3ec5996bad61c4d0d68192;p=dealii.git Update step-40. --- diff --git a/examples/step-40/doc/intro.dox b/examples/step-40/doc/intro.dox index 6ed0e30e0e..7b49b47174 100644 --- a/examples/step-40/doc/intro.dox +++ b/examples/step-40/doc/intro.dox @@ -40,8 +40,8 @@ step-18, where we show how a program can use MPI to parallelize assembling the linear system, storing it, solving it, and computing error estimators. All of these operations scale relatively trivially -(for a definition of what it means for an operation to "scale", see -@ref GlossParallelScaling "this glossary entry), +(for a definition of what it means for an operation to "scale", see +@ref GlossParallelScaling "this glossary entry"), but there was one significant drawback: for this to be moderately simple to implement, each MPI processor had to keep its own copy of the entire Triangulation and DoFHandler objects. Consequently, while diff --git a/examples/step-40/step-40.cc b/examples/step-40/step-40.cc index e3fb36345f..411eede749 100644 --- a/examples/step-40/step-40.cc +++ b/examples/step-40/step-40.cc @@ -176,7 +176,7 @@ namespace Step40 IndexSet locally_owned_dofs; IndexSet locally_relevant_dofs; - ConstraintMatrix constraints; + AffineConstraints constraints; LA::MPI::SparseMatrix system_matrix; LA::MPI::Vector locally_relevant_solution; @@ -280,13 +280,13 @@ namespace Step40 // // As with all other things in %parallel, the mantra must be that no // processor can store all information about the entire universe. As a - // consequence, we need to tell the constraints object for which degrees - // of freedom it can store constraints and for which it may not expect any - // information to store. In our case, as explained in the @ref distributed - // module, the degrees of freedom we need to care about on each processor - // are the locally relevant ones, so we pass this to the - // ConstraintMatrix::reinit function. As a side note, if you forget to - // pass this argument, the ConstraintMatrix class will allocate an array + // consequence, we need to tell the AffineConstraints object for which + // degrees of freedom it can store constraints and for which it may not + // expect any information to store. In our case, as explained in the @ref + // distributed module, the degrees of freedom we need to care about on + // each processor are the locally relevant ones, so we pass this to the + // AffineConstraints::reinit function. As a side note, if you forget to + // pass this argument, the AffineConstraints class will allocate an array // with length equal to the largest DoF index it has seen so far. For // processors with high MPI process number, this may be very large -- // maybe on the order of billions. The program would then allocate more @@ -385,14 +385,11 @@ namespace Step40 std::vector local_dof_indices(dofs_per_cell); - typename DoFHandler::active_cell_iterator cell = - dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell != endc; ++cell) + for (const auto &cell : dof_handler.active_cell_iterators()) if (cell->is_locally_owned()) { - cell_matrix = 0; - cell_rhs = 0; + cell_matrix = 0.; + cell_rhs = 0.; fe_values.reinit(cell); @@ -403,19 +400,19 @@ namespace Step40 0.5 + 0.25 * std::sin(4.0 * numbers::PI * fe_values.quadrature_point(q_point)[0]) ? - 1 : - -1); + 1. : + -1.); for (unsigned int i = 0; i < dofs_per_cell; ++i) { for (unsigned int j = 0; j < dofs_per_cell; ++j) - cell_matrix(i, j) += (fe_values.shape_grad(i, q_point) * - fe_values.shape_grad(j, q_point) * - fe_values.JxW(q_point)); + cell_matrix(i, j) += fe_values.shape_grad(i, q_point) * + fe_values.shape_grad(j, q_point) * + fe_values.JxW(q_point); - cell_rhs(i) += - (rhs_value * fe_values.shape_value(i, q_point) * - fe_values.JxW(q_point)); + cell_rhs(i) += rhs_value * // + fe_values.shape_value(i, q_point) * // + fe_values.JxW(q_point); } } @@ -557,8 +554,8 @@ namespace Step40 // sensible approach, namely creating individual files for each MPI process // and leaving it to the visualization program to make sense of that. // - // To start, the top of the function looks like always. In addition to - // attaching the solution vector (the one that has entries for all locally + // To start, the top of the function looks like it usually does. In addition + // to attaching the solution vector (the one that has entries for all locally // relevant, not only the locally owned, elements), we attach a data vector // that stores, for each cell, the subdomain the cell belongs to. This is // slightly tricky, because of course not every processor knows about every @@ -592,11 +589,12 @@ namespace Step40 // processor number (enough for up to 10,000 processors, though we hope // that nobody ever tries to generate this much data -- you would likely // overflow all file system quotas), and .vtu indicates the - // XML-based Visualization Toolkit (VTK) file format. + // XML-based Visualization Toolkit for Unstructured grids (VTU) file + // format. const std::string filename = ("solution-" + Utilities::int_to_string(cycle, 2) + "." + Utilities::int_to_string(triangulation.locally_owned_subdomain(), 4)); - std::ofstream output((filename + ".vtu")); + std::ofstream output(filename + ".vtu"); data_out.write_vtu(output); // The last step is to write a "master record" that lists for the @@ -693,17 +691,27 @@ namespace Step40 // @sect4{main()} // The final function, main(), again has the same structure as in -// all other programs, in particular step-6. Like in the other programs that -// use PETSc, we have to initialize and finalize PETSc, which is done using the -// helper object MPI_InitFinalize. +// all other programs, in particular step-6. Like the other programs that use +// PETSc, we have to initialize and finalize PETSc, which is done using the +// helper object Utilities::MPI::MPI_InitFinalize. The constructor of that +// class initializes MPI other libraries that depend on it, such as p4est, +// SLEPc, and Zoltan (though the last two are not used in this tutorial). The +// order here is important: we cannot use any of these libraries until they +// are initialized, so it does not make sense to do anything before creating +// an instance of Utilities::MPI::MPI_InitFinalize. // -// Note how we enclose the use the use of the LaplaceProblem class in a pair -// of braces. This makes sure that all member variables of the object are -// destroyed by the time we destroy the mpi_initialization object. Not doing -// this will lead to strange and hard to debug errors when -// PetscFinalize first deletes all PETSc vectors that are still -// around, and the destructor of the LaplaceProblem class then tries to delete -// them again. +// After the solver finishes, the LaplaceProblem destructor will run followed +// by Utilities::MPI::MPI_InitFinalize::~MPI_InitFinalize(). This order is +// also important: Utilities::MPI::MPI_InitFinalize::~MPI_InitFinalize() calls +// PetscFinalize (and finalization functions for other +// libraries), which will delete any in-use PETSc objects. This must be done +// after we destruct the Laplace solver to avoid double deletion +// errors. Fortunately, due to the order of destructor call rules of C++, we +// do not need to worry about any of this: everything happens in the correct +// order (i.e., the reverse of the order of construction). The last function +// called by Utilities::MPI::MPI_InitFinalize::~MPI_InitFinalize() is +// MPI_Finalize: i.e., once this object is destructed the program +// should exit since MPI will no longer be available. int main(int argc, char *argv[]) { try