From 64edb7f447263259c5c7c4aa109d9f46c76d753d Mon Sep 17 00:00:00 2001 From: Timo Heister Date: Sun, 3 May 2020 16:49:44 -0400 Subject: [PATCH] comment code --- examples/step-50/CMakeLists.txt | 13 +- examples/step-50/doc/intro.dox | 10 +- examples/step-50/doc/results.dox | 14 +- examples/step-50/step-50.cc | 392 +++++++++++++++++++++---------- 4 files changed, 290 insertions(+), 139 deletions(-) diff --git a/examples/step-50/CMakeLists.txt b/examples/step-50/CMakeLists.txt index 3f09d00c4e..4ff4ebd76f 100644 --- a/examples/step-50/CMakeLists.txt +++ b/examples/step-50/CMakeLists.txt @@ -40,17 +40,22 @@ ENDIF() # # Are all dependencies fulfilled? # -IF(NOT DEAL_II_WITH_MPI OR NOT DEAL_II_WITH_P4EST OR NOT DEAL_II_WITH_TRILINOS) # keep in one line +IF(NOT ((DEAL_II_WITH_PETSC AND NOT DEAL_II_PETSC_WITH_COMPLEX) OR DEAL_II_WITH_TRILINOS) OR NOT DEAL_II_WITH_P4EST) # keep in one line MESSAGE(FATAL_ERROR " Error! This tutorial requires a deal.II library that was configured with the following options: - DEAL_II_WITH_MPI = ON + DEAL_II_WITH_PETSC = ON + DEAL_II_PETSC_WITH_COMPLEX = OFF DEAL_II_WITH_P4EST = ON +or DEAL_II_WITH_TRILINOS = ON + DEAL_II_WITH_P4EST = ON However, the deal.II library found at ${DEAL_II_PATH} was configured with these options - DEAL_II_WITH_MPI = ${DEAL_II_WITH_MPI} + DEAL_II_WITH_PETSC = ${DEAL_II_WITH_PETSC} + DEAL_II_PETSC_WITH_COMPLEX = ${DEAL_II_PETSC_WITH_COMPLEX} DEAL_II_WITH_P4EST = ${DEAL_II_WITH_P4EST} DEAL_II_WITH_TRILINOS = ${DEAL_II_WITH_TRILINOS} -which conflict with the requirements." +which conflict with the requirements. +One or both of the aforementioned combinations of prerequisites are not met by your installation, but at least one is required for this tutorial step." ) ENDIF() diff --git a/examples/step-50/doc/intro.dox b/examples/step-50/doc/intro.dox index a383912a9b..1263229b40 100644 --- a/examples/step-50/doc/intro.dox +++ b/examples/step-50/doc/intro.dox @@ -63,19 +63,19 @@ For the active mesh, we use the parallel::distributed::Triangulation class as do in step-40 which uses functionality in the external library p4est for the distribution of the active cells among processors. For the non-active cells in the multilevel hierarchy, deal.II -implements what we will refer to as the ``first-child rule'' where, for each cell +implements what we will refer to as the "first-child rule" where, for each cell in the hierarchy, we recursively assign the parent of a cell to the owner of the first child cell. The following figures give an example of such a distribution. Here the left image represents the active cells for a sample 2D mesh partitioned using a space-filling curve (similar to p4est), the center image gives the tree representation of the active mesh, and the right image gives the multilevel hierarchy of cells. The colors and numbers represent the different processors. The circular nodes in the tree -are the non-active cells which are distributed using the ``first-child rule''. +are the non-active cells which are distributed using the "first-child rule". -Included among the output to screen in this example is a value ``Partition efficiency'' -given by 1/MGTools::workload_imbalance(). This value, which will be denoted +Included among the output to screen in this example is a value "Partition efficiency" +given by one over MGTools::workload_imbalance(). This value, which will be denoted by $\mathbb{E}$, quantifies the overhead produced by not having a perfect work balance on each level of the multigrid hierarchy (as is evident from the example above). @@ -130,7 +130,7 @@ a V-cycle. It should be noted that there is potential for some asynchronous work between multigrid levels, specifically with purely nearest neighbor MPI communication, and an adaptive mesh could be constructed such that the efficiency model would far overestimate the V-cycle slowdown -due to the asynchronous work ``covering up'' the imbalance (which assumes synchronization over levels). +due to the asynchronous work "covering up" the imbalance (which assumes synchronization over levels). However, for most realistic adaptive meshes the expectation is that this asynchronous work will only cover up a very small portion of the imbalance and the efficiency model will describe the slowdown very well. diff --git a/examples/step-50/doc/results.dox b/examples/step-50/doc/results.dox index 06caf6bbec..2f0a22d88f 100644 --- a/examples/step-50/doc/results.dox +++ b/examples/step-50/doc/results.dox @@ -21,7 +21,7 @@ Cycle 0: | Setup | 1 | 0.00264s | 5.8% | | Setup multigrid | 1 | 0.00261s | 5.7% | | Solve | 1 | 0.00355s | 7.8% | -| Solve: 1 multigrid vcycle | 1 | 0.000315s | 0.69% | +| Solve: 1 multigrid V-cycle | 1 | 0.000315s | 0.69% | | Solve: CG | 1 | 0.00186s | 4.1% | | Solve: Preconditioner setup | 1 | 0.000968s | 2.1% | +---------------------------------+-----------+------------+------------+ @@ -46,7 +46,7 @@ Cycle 1: | Setup | 1 | 0.0023s | 5.3% | | Setup multigrid | 1 | 0.00262s | 6% | | Solve | 1 | 0.00549s | 13% | -| Solve: 1 multigrid vcycle | 1 | 0.000343s | 0.79% | +| Solve: 1 multigrid V-cycle | 1 | 0.000343s | 0.79% | | Solve: CG | 1 | 0.00293s | 6.8% | | Solve: Preconditioner setup | 1 | 0.00174s | 4% | +---------------------------------+-----------+------------+------------+ @@ -57,10 +57,10 @@ Cycle 2: . @endcode Here, the timing of the `solve()` function is split up in 3 parts: setting -up the multigrid preconditioner, execution of a single multigrid vcycle, and -the CG solver. The vcycle that is timed is unnecessary for the overall solve +up the multigrid preconditioner, execution of a single multigrid V-cycle, and +the CG solver. The V-cycle that is timed is unnecessary for the overall solve and only meant to give an insight at the different costs for AMG and GMG. -Also it should be noted that when using the AMG solver, ``Workload imbalance'' +Also it should be noted that when using the AMG solver, "Workload imbalance" is not included in the output since the hierarchy of coarse meshes is not required. @@ -72,9 +72,9 @@ computations). The code is compiled using gcc 7.1.0 with intel-mpi 17.0.3. Trili The following table gives weak scale timings for this program on up to 256M DoFs and 7168 processors. Here, $\mathbb{E}$ is the partition efficiency from the - introduction (also equal to 1.0/workload imbalance), ``Setup'' is a combination + introduction (also equal to 1.0/workload imbalance), "Setup" is a combination of setup, setup multigrid, assemble, and assemble multigrid from the timing blocks, -and ``Prec'' is the preconditioner setup. Ideally all times would stay constant +and "Prec" is the preconditioner setup. Ideally all times would stay constant over each problem size for the individual solvers, but since the partition efficiency decreases from 0.371 to 0.161 from largest to smallest problem size, we expect to see an approximately $0.371/0.161=2.3$ times increase in timings diff --git a/examples/step-50/step-50.cc b/examples/step-50/step-50.cc index 66f71cba52..ea3c7f75d5 100644 --- a/examples/step-50/step-50.cc +++ b/examples/step-50/step-50.cc @@ -20,6 +20,11 @@ * Martin Kronbichler, Technical University of Munich */ + +// @sect3{Include files} + +// The include files are a combination of step-40, step-16, and step-37: + #include #include #include @@ -41,31 +46,14 @@ #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +// We use the same strategy as in step-40 to switch between PETSc and +// Trilinos: +#include -// Comment the following \#define if you have PETSc and Trilinos installed -// and you prefer using PETSc in this example: +// Comment the following \#define if you have PETSc and Trilinos installed and +// you prefer using PETSc in this example: #define FORCE_USE_OF_TRILINOS namespace LA @@ -81,14 +69,35 @@ namespace LA #endif } // namespace LA +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// The following files are used to assemble the error estimator like in step-12: +#include +#include using namespace dealii; -/** - * Matrix-free operators must use deal.II defined vectors, rest of the code is - * based on Trilinos vectors. - */ +// @sect3{Coefficients and helper classes} + +// MatrixFree operators must use the +// dealii::LinearAlgebra::distributed::Vector vector type. Here we define +// operations which copy to and from Trilinos vectors for compatibility with +// the matrix-based code. Note that this functionality does not currently +// exist for PETSc vector types, so Trilinos must be installed to use the +// MatrixFree solver in this tutorial. namespace ChangeVectorTypes { template @@ -125,7 +134,8 @@ namespace ChangeVectorTypes } // namespace ChangeVectorTypes - +// We set the right-hand side to 1.0. The @p value function returning a +// VectorizedArray is used by the matrix-free code path. template class RightHandSide : public Function { @@ -145,8 +155,11 @@ public: } }; - - +// This class represents the diffusion coefficient. We use a variable +// coefficient which is 100.0 at any point where at least one coordinate is +// less than -0.5, and 1.0 at all other points. As above, a separate value() +// returning a VectorizedArray is used for the matrix-free code. An @p +// average() function computes the arithmetic average for a set of points. template class Coefficient : public Function { @@ -161,6 +174,8 @@ public: template number average_value(const std::vector> &points) const; + // This function creates a Table of coefficient values per cell of the + // MatrixFree operator provided. template std::shared_ptr>> create_coefficient_table( const MatrixFree> &mf_storage) const; @@ -185,20 +200,15 @@ VectorizedArray Coefficient::value(const Point> &p, const unsigned int) const { - VectorizedArray return_value = VectorizedArray(); + VectorizedArray return_value = VectorizedArray(1.0); for (unsigned int i = 0; i < VectorizedArray::size(); ++i) { - bool found = false; for (int d = 0; d < dim; ++d) if (p[d][i] < -0.5) { return_value[i] = 100.0; - found = true; break; } - - if (!found) - return_value[i] = 1.0; } return return_value; @@ -226,8 +236,8 @@ std::shared_ptr>> Coefficient::create_coefficient_table( const MatrixFree> &mf_storage) const { - std::shared_ptr>> coefficient_table; - coefficient_table = std::make_shared>>(); + auto coefficient_table = + std::make_shared>>(); FEEvaluation fe_eval(mf_storage); @@ -235,23 +245,27 @@ Coefficient::create_coefficient_table( const unsigned int n_q_points = fe_eval.n_q_points; coefficient_table->reinit(n_cells, 1); + for (unsigned int cell = 0; cell < n_cells; ++cell) { fe_eval.reinit(cell); - std::vector>> points(n_q_points); + VectorizedArray average_value = 0.; for (unsigned int q = 0; q < n_q_points; ++q) - points[q] = fe_eval.quadrature_point(q); - VectorizedArray averaged_value = average_value(points); + average_value += value(fe_eval.quadrature_point(q)); + average_value /= n_q_points; - (*coefficient_table)(cell, 0) = averaged_value; + (*coefficient_table)(cell, 0) = average_value; } return coefficient_table; } +// @sect3{Problem settings} - +// We will use ParameterHandler to pass in parameters at runtime. The +// structure @p Settings parses and stores these parameters to be queried +// throughout the program. struct Settings { bool try_parse(const std::string &prm_filename); @@ -261,7 +275,9 @@ struct Settings gmg_mb, gmg_mf, amg - } solver; + }; + + SolverType solver; int dimension; double smoother_dampen; @@ -270,9 +286,89 @@ struct Settings bool output; }; + +bool Settings::try_parse(const std::string &prm_filename) +{ + ParameterHandler prm; + prm.declare_entry("dim", "2", Patterns::Integer(), "The problem dimension."); + prm.declare_entry("n_steps", + "10", + Patterns::Integer(0), + "Number of adaptive refinement steps."); + prm.declare_entry("smoother dampen", + "1.0", + Patterns::Double(0.0), + "Dampen factor for the smoother."); + prm.declare_entry("smoother steps", + "1", + Patterns::Integer(1), + "Number of smoother steps."); + prm.declare_entry( + "solver", + "MF", + Patterns::Selection("MF|MB|AMG"), + "Switch between matrix-free GMG, matrix-based GMG, and AMG."); + prm.declare_entry("output", + "false", + Patterns::Bool(), + "Output graphical results."); + + if (prm_filename.size() == 0) + { + // No .prm file provided? Print the default values and exit. + if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) + prm.print_parameters(std::cout, ParameterHandler::Text); + return false; + } + + try + { + prm.parse_input(prm_filename); + } + catch (std::exception &e) + { + if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) + std::cerr << e.what() << std::endl; + return false; + } + + if (prm.get("solver") == "MF") + this->solver = gmg_mf; + else if (prm.get("solver") == "MB") + this->solver = gmg_mb; + else if (prm.get("solver") == "AMG") + this->solver = amg; + else + AssertThrow(false, ExcNotImplemented()); + + this->dimension = prm.get_integer("dim"); + this->n_steps = prm.get_integer("n_steps"); + this->smoother_dampen = prm.get_double("smoother dampen"); + this->smoother_steps = prm.get_integer("smoother steps"); + this->output = prm.get_bool("output"); + + return true; +} + +// @sect3{LaplaceProlem class} + +// This is the main class of the program. It should look very similar to +// step-16, step-37, and step-40. For the MatrixFree setup, we use the +// MatrixFreeOperators::LaplaceOperator class which defines local_apply(), +// compute_diagonal(), and set_coefficient() functions internally. Note that +// the polynomial degree is a template parameter of this class. This is +// necesary for the matrix-free code. template class LaplaceProblem { +public: + LaplaceProblem(const Settings &settings); + void run(); + +private: + // We will use the following types throughout the program. First the + // matrix-based types, after that the matrix-free classes. For the + // matrix-free implementation, we use @p float for the level operators. using MatrixType = LA::MPI::SparseMatrix; using VectorType = LA::MPI::Vector; using PreconditionAMG = LA::MPI::PreconditionAMG; @@ -294,11 +390,6 @@ class LaplaceProblem using MatrixFreeLevelVector = LinearAlgebra::distributed::Vector; using MatrixFreeActiveVector = LinearAlgebra::distributed::Vector; -public: - LaplaceProblem(const Settings &settings); - void run(); - -private: void setup_system(); void setup_multigrid(); void assemble_system(); @@ -340,6 +431,9 @@ private: }; +// The only interesting part about the constructor is that we construct the +// multigrid hierarchy unless we use AMG. For that, we need to parse the +// parameters before this constructor completes. template LaplaceProblem::LaplaceProblem(const Settings &settings) : settings(settings) @@ -360,72 +454,13 @@ LaplaceProblem::LaplaceProblem(const Settings &settings) triangulation.refine_global(1); } +// @sect4{LaplaceProblem::setup_system()} -bool Settings::try_parse(const std::string &prm_filename) -{ - ParameterHandler prm; - prm.declare_entry("dim", "2", Patterns::Integer(), "The problem dimension."); - prm.declare_entry("n_steps", - "10", - Patterns::Integer(0), - "Number of adaptive refinement steps."); - prm.declare_entry("smoother dampen", - "1.0", - Patterns::Double(0.0), - "Dampen factor for the smoother."); - prm.declare_entry("smoother steps", - "1", - Patterns::Integer(1), - "Number of smoother steps."); - prm.declare_entry( - "solver", - "MF", - Patterns::Selection("MF|MB|AMG"), - "Switch between matrix-free GMG, matrix-based GMG, and AMG."); - prm.declare_entry("output", - "false", - Patterns::Bool(), - "Output graphical results."); - - if (prm_filename.size() == 0) - { - // No .prm file provided? Print the default values and exit. - if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) - prm.print_parameters(std::cout, ParameterHandler::Text); - return false; - } - - try - { - prm.parse_input(prm_filename); - } - catch (std::exception &e) - { - if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) - std::cerr << e.what() << std::endl; - return false; - } - - if (prm.get("solver") == "MF") - this->solver = gmg_mf; - else if (prm.get("solver") == "MB") - this->solver = gmg_mb; - else if (prm.get("solver") == "AMG") - this->solver = amg; - else - AssertThrow(false, ExcNotImplemented()); - - this->dimension = prm.get_integer("dim"); - this->n_steps = prm.get_integer("n_steps"); - this->smoother_dampen = prm.get_double("smoother dampen"); - this->smoother_steps = prm.get_integer("smoother steps"); - - this->output = prm.get_bool("output"); - - return true; -} - - +// Unlike step-16 and step-37, we split the setup into two parts, +// setup_system() and setup_multigrid(). Here is the typical setup_system() +// function for the active mesh found in most tutorials. For matrix-free, the +// active mesh setup similar to step-37, and for matrix-based (GMG and AMG +// solvers), the setup is similar to step-40. template void LaplaceProblem::setup_system() { @@ -492,8 +527,12 @@ void LaplaceProblem::setup_system() } } +// @sect4{LaplaceProblem::setup_multigrid()} - +// This function does the multilevel setup for both matrix-free and +// matrix-based GMG. The matrix-free setup is similar to that of step-37, and +// the matrix-based is similar to step-16, except we must use appropriate +// distributed sparsity patterns. template void LaplaceProblem::setup_multigrid() { @@ -629,6 +668,16 @@ void LaplaceProblem::setup_multigrid() } +// @sect4{LaplaceProblem::assemble_system()} + +// The assembly is split into three parts: assemble_system(), +// assemble_multigrid(), and assemble_rhs(). The assemble_system() function +// here assembles and stores the system matrix and the right-hand side for the +// matrix-based methods. It is similar to the assembly in step-40. +// +// Note that the matrix-free method does not execute this function as it does +// not need to assemble a matrix, and it will instead assemble the right-hand +// side in assemble_rhs(). template void LaplaceProblem::assemble_system() { @@ -692,6 +741,13 @@ void LaplaceProblem::assemble_system() } +// @sect4{LaplaceProblem::assemble_multigrid()} + +// This function assembles and stores the multilevel matrices for the +// matrix-based GMG method. This function is similar to the one found in +// step-16, only here it works for distributed meshes. This difference amounts +// to adding a condition that we only assemble on locally owned level cells and +// a call to compress() for each matrix that is built. template void LaplaceProblem::assemble_multigrid() { @@ -767,6 +823,38 @@ void LaplaceProblem::assemble_multigrid() } +// @sect4{LaplaceProblem::assemble_rhs()} + +// This function assembles the right-hand side vector for the matrix-free +// method. The function is similar to the one found in the ``Use +// FEEvaluation::read_dof_values_plain() to avoid resolving constraints'' +// subsection in the ``Possibilities for extensions'' section of step-37. +// +// The reason for this function is that the MatrixFree operators do not take +// into account non-homogeneous Dirichlet constraints, instead treating all +// Dirichlet constraints as homogeneous. To account for this, the right-hand +// side here is assembled as the residual $r_0 = f-Au_0$, where $u_0$ is a +// zero vector except in the Dirichlet values. Then when solving, we have that +// the solution is $u = u_0 + A^{-1}r_0$. This can be seen as a Newton +// iteration on a linear system with initial guess $u_0$. The CG solve in the +// solve() function below computes $A^{-1}r_0$ and the call to +// `constraints.distribute()` (which directly follows) adds the $u_0$. +// +// Obviously, since we are considering a problem with zero Dirichlet boundary, +// we could have taken a similar approach to step-37 assemble_rhs(), but this +// additional work allows us to change the problem declaration if we so +// choose. +// +// This function has two parts in the integration loop: applying the negative +// of matrix $A$ to $u_0$ by submitting the negative of the gradient, and adding +// the right-hand side contribution by submitting the value $f$. We must be sure +// to use read_dof_values_plain() for evaluating $u_0$ as read_dof_vaues() would +// set all Dirichlet values to zero. +// +// Finally, the system_rhs vector is of type LA::MPI::Vector, but the MatrixFree +// class only work for dealii::LinearAlgebra::distributed::Vector. +// Therefore we must compute the right-hand side using MatrixFree funtionality +// and then use the ChangeVectorType class to copy it to the correct type. template void LaplaceProblem::assemble_rhs() { @@ -786,7 +874,8 @@ void LaplaceProblem::assemble_rhs() RightHandSide right_hand_side_function; - FEEvaluation phi(*mf_system_matrix.get_matrix_free()); + FEEvaluation phi( + *mf_system_matrix.get_matrix_free()); for (unsigned int cell = 0; cell < mf_system_matrix.get_matrix_free()->n_macro_cells(); @@ -798,12 +887,9 @@ void LaplaceProblem::assemble_rhs() for (unsigned int q = 0; q < phi.n_q_points; ++q) { - // Submit gradient phi.submit_gradient(-1.0 * (coefficient(cell, 0) * phi.get_gradient(q)), q); - - // Submit RHS value phi.submit_value( right_hand_side_function.value(phi.quadrature_point(q)), q); } @@ -812,10 +898,17 @@ void LaplaceProblem::assemble_rhs() } right_hand_side_copy.compress(VectorOperation::add); + + // Copy the computed right-hand side to an LA::MPI::Vector ChangeVectorTypes::copy(right_hand_side, right_hand_side_copy); } +// @sect4{LaplaceProblem::solve()} + +// Here we setup the multigrid preconditioner, test the timing of a single +// V-cycle, and solve the linear system. Unsurprisingly, this is one of the +// places where the three methods differ the most. template void LaplaceProblem::solve() { @@ -826,6 +919,8 @@ void LaplaceProblem::solve() solution = 0.; + // The solver for the matrix-free GMG method is similar to step-37, apart + // from adding some interface matrices in complete analogy to step-16. if (settings.solver == Settings::gmg_mf) { computing_timer.enter_subsection("Solve: Preconditioner setup"); @@ -872,6 +967,8 @@ void LaplaceProblem::solve() MGTransferMatrixFree> preconditioner(dof_handler, mg, mg_transfer); + // Copy the solution vector and right-hand side from LA::MPI::Vector to + // dealii::LinearAlgebra::distributed::Vector so that we can solve. MatrixFreeActiveVector solution_copy; MatrixFreeActiveVector right_hand_side_copy; mf_system_matrix.initialize_dof_vector(solution_copy); @@ -881,13 +978,16 @@ void LaplaceProblem::solve() ChangeVectorTypes::copy(right_hand_side_copy, right_hand_side); computing_timer.leave_subsection("Solve: Preconditioner setup"); - // Timing 1 vcycle + // Timing for 1 V-cycle. { - TimerOutput::Scope timing(computing_timer, "Solve: 1 multigrid vcycle"); + TimerOutput::Scope timing(computing_timer, + "Solve: 1 multigrid V-cycle"); preconditioner.vmult(solution_copy, right_hand_side_copy); } solution_copy = 0.; + // Solve the linear system, update the ghost values of the solution, + // copy back to LA::MPI::Vector and distribute constraints. { SolverCG solver(solver_control); @@ -902,6 +1002,9 @@ void LaplaceProblem::solve() ChangeVectorTypes::copy(solution, solution_copy); constraints.distribute(solution); } + // Solver for the matrix-based GMG method, similar to step-16, only using a + // Jacobi smoother instead of a SOR smoother (which is not implemented in + // parallel). else if (settings.solver == Settings::gmg_mb) { computing_timer.enter_subsection("Solve: Preconditioner setup"); @@ -947,12 +1050,15 @@ void LaplaceProblem::solve() computing_timer.leave_subsection("Solve: Preconditioner setup"); + // Timing for 1 V-cycle. { - TimerOutput::Scope timing(computing_timer, "Solve: 1 multigrid vcycle"); + TimerOutput::Scope timing(computing_timer, + "Solve: 1 multigrid V-cycle"); preconditioner.vmult(solution, right_hand_side); } solution = 0.; + // Solve the linear system and distribute constraints. { SolverCG solver(solver_control); @@ -962,6 +1068,7 @@ void LaplaceProblem::solve() constraints.distribute(solution); } + // Solver for the AMG method, similar to step-40. else /*amg*/ { computing_timer.enter_subsection("Solve: Preconditioner setup"); @@ -984,12 +1091,15 @@ void LaplaceProblem::solve() preconditioner.initialize(system_matrix, Amg_data); computing_timer.leave_subsection("Solve: Preconditioner setup"); + // Timing for 1 V-cycle. { - TimerOutput::Scope timing(computing_timer, "Solve: 1 multigrid vcycle"); + TimerOutput::Scope timing(computing_timer, + "Solve: 1 multigrid V-cycle"); preconditioner.vmult(solution, right_hand_side); } solution = 0.; + // Solve the linear system and distribute constraints. { SolverCG solver(solver_control); @@ -1004,7 +1114,12 @@ void LaplaceProblem::solve() } +// @sect3{The error estimator} +// We use the FEInterfaceValues class to assemble an error estimator to decide +// which cells to refine. See the exact definition of the cell and face +// integrals in the introduction. To use the method, we define Scratch and +// Copy objects for the MeshWorker::mesh_loop(). template struct ScratchData { @@ -1170,8 +1285,13 @@ void LaplaceProblem::estimate() update_JxW_values, update_values | update_gradients | update_JxW_values | update_normal_vectors); + CopyData copy_data; - CopyData copy_data; + // We need to assemble each interior face once but we need to make sure that + // both processes assemble the face term between a locally owned and a ghost + // cell. This is achieved by setting the + // MeshWorker::assemble_ghost_faces_both flag. We need to do this, because + // we do not communicate the error estimator contributions here. MeshWorker::mesh_loop(dof_handler.begin_active(), dof_handler.end(), cell_worker, @@ -1186,7 +1306,11 @@ void LaplaceProblem::estimate() } +// @sect4{LaplaceProblem::refine_grid()} +// We used the cell-wise estimator stored in the vector @p estimate_vector and +// refine a fixed number of cells (chosen here to roughly double the number of +// DoFs in each step). template void LaplaceProblem::refine_grid() { @@ -1200,7 +1324,10 @@ void LaplaceProblem::refine_grid() } +// @sect4{LaplaceProblem::output_results()} +// The output_results() function is similar to the ones found in many of the +// tutorials (see step-40 for example). template void LaplaceProblem::output_results(const unsigned int cycle) { @@ -1237,6 +1364,10 @@ void LaplaceProblem::output_results(const unsigned int cycle) } +// @sect4{LaplaceProblem::run()} + +// As in most tutorials, this function calls the various functions defined +// above to setup, assemble, solve, and output the results. template void LaplaceProblem::run() { @@ -1248,6 +1379,10 @@ void LaplaceProblem::run() pcout << " Number of active cells: " << triangulation.n_global_active_cells(); + // We only output level cell data for the GMG methods (same with DoF + // data below). Note that the partition efficiency is irrelevant for AMG + // since the level hierarchy is not distributed or used during the + // computation. if (settings.solver == Settings::gmg_mf || settings.solver == Settings::gmg_mb) pcout << " (" << triangulation.n_global_levels() << " global levels)" @@ -1257,6 +1392,8 @@ void LaplaceProblem::run() pcout << std::endl; setup_system(); + + // Only setup the multievel hierarchy for GMG. if (settings.solver == Settings::gmg_mf || settings.solver == Settings::gmg_mb) setup_multigrid(); @@ -1274,6 +1411,10 @@ void LaplaceProblem::run() } pcout << std::endl; + // For the matrix-free method, we only assemble the right-hand side. + // For both matrix-based methods, we assemble both active matrix and + // right-hand side, and only assemble the multigrid matrices for + // matrix-based GMG. if (settings.solver == Settings::gmg_mf) assemble_rhs(); else /*gmg_mb or amg*/ @@ -1295,6 +1436,11 @@ void LaplaceProblem::run() } +// @sect3{The main() function} + +// This is a similar main function to step-40, with the exception that we +// require the user to pass a .prm file as a sole command line argument (see +// step-29 for a complete discussion of parameter files). int main(int argc, char *argv[]) { using namespace dealii; -- 2.39.5