From 286a2d867a44c85e83dc60b5dafff424f35ebcbe Mon Sep 17 00:00:00 2001 From: Timo Heister Date: Mon, 11 Feb 2019 16:11:08 -0700 Subject: [PATCH] reserve step-63 --- examples/step-63/CMakeLists.txt | 39 ++ examples/step-63/doc/builds-on | 1 + examples/step-63/doc/intro.dox | 9 + examples/step-63/doc/kind | 1 + examples/step-63/doc/results.dox | 2 + examples/step-63/doc/tooltip | 1 + examples/step-63/step-63.cc | 715 +++++++++++++++++++++++++++++++ 7 files changed, 768 insertions(+) create mode 100644 examples/step-63/CMakeLists.txt create mode 100644 examples/step-63/doc/builds-on create mode 100644 examples/step-63/doc/intro.dox create mode 100644 examples/step-63/doc/kind create mode 100644 examples/step-63/doc/results.dox create mode 100644 examples/step-63/doc/tooltip create mode 100644 examples/step-63/step-63.cc diff --git a/examples/step-63/CMakeLists.txt b/examples/step-63/CMakeLists.txt new file mode 100644 index 0000000000..476f55c11e --- /dev/null +++ b/examples/step-63/CMakeLists.txt @@ -0,0 +1,39 @@ +## +# CMake script for the step-63 tutorial program: +## + +# Set the name of the project and target: +SET(TARGET "step-63") + +# Declare all source files the target consists of. Here, this is only +# the one step-X.cc file, but as you expand your project you may wish +# to add other source files as well. If your project becomes much larger, +# you may want to either replace the following statement by something like +# FILE(GLOB_RECURSE TARGET_SRC "source/*.cc") +# FILE(GLOB_RECURSE TARGET_INC "include/*.h") +# SET(TARGET_SRC ${TARGET_SRC} ${TARGET_INC}) +# or switch altogether to the large project CMakeLists.txt file discussed +# in the "CMake in user projects" page accessible from the "User info" +# page of the documentation. +SET(TARGET_SRC + ${TARGET}.cc + ) + +# Usually, you will not need to modify anything beyond this point... + +CMAKE_MINIMUM_REQUIRED(VERSION 2.8.12) + +FIND_PACKAGE(deal.II 9.1.0 QUIET + HINTS ${deal.II_DIR} ${DEAL_II_DIR} ../ ../../ $ENV{DEAL_II_DIR} + ) +IF(NOT ${deal.II_FOUND}) + MESSAGE(FATAL_ERROR "\n" + "*** Could not locate a (sufficiently recent) version of deal.II. ***\n\n" + "You may want to either pass a flag -DDEAL_II_DIR=/path/to/deal.II to cmake\n" + "or set an environment variable \"DEAL_II_DIR\" that contains this path." + ) +ENDIF() + +DEAL_II_INITIALIZE_CACHED_VARIABLES() +PROJECT(${TARGET}) +DEAL_II_INVOKE_AUTOPILOT() diff --git a/examples/step-63/doc/builds-on b/examples/step-63/doc/builds-on new file mode 100644 index 0000000000..42c2846921 --- /dev/null +++ b/examples/step-63/doc/builds-on @@ -0,0 +1 @@ +step-16 diff --git a/examples/step-63/doc/intro.dox b/examples/step-63/doc/intro.dox new file mode 100644 index 0000000000..b8535c110b --- /dev/null +++ b/examples/step-63/doc/intro.dox @@ -0,0 +1,9 @@ +
+ +This program was contributed by Thomas Clevenger and Timo Heister. + + +

Introduction

+ +Please note: This is work in progress and will be an example for block +smoothers in geometric multigrid. For now, this is just step-16. diff --git a/examples/step-63/doc/kind b/examples/step-63/doc/kind new file mode 100644 index 0000000000..6816e9090f --- /dev/null +++ b/examples/step-63/doc/kind @@ -0,0 +1 @@ +unfinished diff --git a/examples/step-63/doc/results.dox b/examples/step-63/doc/results.dox new file mode 100644 index 0000000000..b5eaba9377 --- /dev/null +++ b/examples/step-63/doc/results.dox @@ -0,0 +1,2 @@ +

Results

+ diff --git a/examples/step-63/doc/tooltip b/examples/step-63/doc/tooltip new file mode 100644 index 0000000000..9aad4b39bb --- /dev/null +++ b/examples/step-63/doc/tooltip @@ -0,0 +1 @@ +Block smoothers for Geometric Multigrid. diff --git a/examples/step-63/step-63.cc b/examples/step-63/step-63.cc new file mode 100644 index 0000000000..86234e2ac7 --- /dev/null +++ b/examples/step-63/step-63.cc @@ -0,0 +1,715 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2003 - 2018 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE.md at + * the top level directory of deal.II. + * + * --------------------------------------------------------------------- + * + * Authors: Thomas Clevenger, Clemson University + * Timo Heister, University of Utah + */ + +// @note: This is work in progress and will be an example for block smoothers +// in geometric multigrid. For now, this is just step-16. + +// @sect3{Include files} + +// Again, the first few include files are already known, so we won't comment +// on them: +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include + +// These, now, are the include necessary for the multilevel methods. The first +// one declares how to handle Dirichlet boundary conditions on each of the +// levels of the multigrid method. For the actual description of the degrees +// of freedom, we do not need any new include file because DoFHandler already +// has all necessary methods implemented. We will only need to distribute the +// DoFs for the levels further down. +// +// The rest of the include files deals with the mechanics of multigrid as a +// linear operator (solver or preconditioner). +#include +#include +#include +#include +#include +#include +#include + +// We will be using MeshWorker::mesh_loop to loop over the cells, so include it +// here: +#include + + +// This is C++: +#include +#include + +using namespace dealii; + +namespace Step16 +{ + // @sect3{The Scratch and Copy objects} + // + // We use MeshWorker::mesh_loop() to assemble our matrices. For this, we + // need a ScratchData object to store temporary data on each cell (this is + // just the FEValues object) and a CopyData object that will contain the + // output of each cell assembly. For more details about the usage of scratch + // and copy objects, see the WorkStream namespace. + template + struct ScratchData + { + ScratchData(const Mapping & mapping, + const FiniteElement &fe, + const unsigned int quadrature_degree, + const UpdateFlags update_flags) + : fe_values(mapping, fe, QGauss(quadrature_degree), update_flags) + {} + + ScratchData(const ScratchData &scratch_data) + : fe_values(scratch_data.fe_values.get_mapping(), + scratch_data.fe_values.get_fe(), + scratch_data.fe_values.get_quadrature(), + scratch_data.fe_values.get_update_flags()) + {} + + FEValues fe_values; + }; + + struct CopyData + { + unsigned int level; + FullMatrix cell_matrix; + Vector cell_rhs; + std::vector local_dof_indices; + + template + void reinit(const Iterator &cell, unsigned int dofs_per_cell) + { + cell_matrix.reinit(dofs_per_cell, dofs_per_cell); + cell_rhs.reinit(dofs_per_cell); + + local_dof_indices.resize(dofs_per_cell); + cell->get_active_or_mg_dof_indices(local_dof_indices); + level = cell->level(); + } + }; + + // @sect3{The LaplaceProblem class template} + + // This main class is similar to the same class in step-6. As far as + // member functions is concerned, the only additions are: + // - The assemble_multigrid function that assembles the matrices + // that correspond to the discrete operators on intermediate levels. + // - The cell_worker function that assembles our PDE on a single + // cell. + template + class LaplaceProblem + { + public: + LaplaceProblem(const unsigned int degree); + void run(); + + private: + template + void cell_worker(const Iterator & cell, + ScratchData &scratch_data, + CopyData & copy_data); + + void setup_system(); + void assemble_system(); + void assemble_multigrid(); + void solve(); + void refine_grid(); + void output_results(const unsigned int cycle) const; + + Triangulation triangulation; + FE_Q fe; + DoFHandler dof_handler; + + SparsityPattern sparsity_pattern; + SparseMatrix system_matrix; + + AffineConstraints constraints; + + Vector solution; + Vector system_rhs; + + const unsigned int degree; + + // The following members are the essential data structures for the multigrid + // method. The first four represent the sparsity patterns and the matrices + // on individual levels of the multilevel hierarchy, very much like the + // objects for the global mesh above. + // + // Then we have two new matrices only needed for multigrid methods with + // local smoothing on adaptive meshes. They convey data between the interior + // part of the refined region and the refinement edge, as outlined in detail + // in the @ref mg_paper "multigrid paper". + // + // The last object stores information about the boundary indices on each + // level and information about indices lying on a refinement edge between + // two different refinement levels. It thus serves a similar purpose as + // AffineConstraints, but on each level. + MGLevelObject mg_sparsity_patterns; + MGLevelObject mg_interface_sparsity_patterns; + + MGLevelObject> mg_matrices; + MGLevelObject> mg_interface_matrices; + MGConstrainedDoFs mg_constrained_dofs; + }; + + + // @sect3{The LaplaceProblem class implementation} + + // Just one short remark about the constructor of the Triangulation: + // by convention, all adaptively refined triangulations in deal.II never + // change by more than one level across a face between cells. For our + // multigrid algorithms, however, we need a slightly stricter guarantee, + // namely that the mesh also does not change by more than refinement level + // across vertices that might connect two cells. In other words, we must + // prevent the following situation: + // + // @image html limit_level_difference_at_vertices.png "" + // + // This is achieved by passing the + // Triangulation::limit_level_difference_at_vertices flag to the constructor + // of the triangulation class. + template + LaplaceProblem::LaplaceProblem(const unsigned int degree) + : triangulation(Triangulation::limit_level_difference_at_vertices) + , fe(degree) + , dof_handler(triangulation) + , degree(degree) + {} + + + + // @sect4{LaplaceProblem::setup_system} + + // In addition to just distributing the degrees of freedom in + // the DoFHandler, we do the same on each level. Then, we follow the + // same procedure as before to set up the system on the leaf mesh. + template + void LaplaceProblem::setup_system() + { + dof_handler.distribute_dofs(fe); + dof_handler.distribute_mg_dofs(); + + std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs() + << " (by level: "; + for (unsigned int level = 0; level < triangulation.n_levels(); ++level) + std::cout << dof_handler.n_dofs(level) + << (level == triangulation.n_levels() - 1 ? ")" : ", "); + std::cout << std::endl; + + + solution.reinit(dof_handler.n_dofs()); + system_rhs.reinit(dof_handler.n_dofs()); + + constraints.clear(); + DoFTools::make_hanging_node_constraints(dof_handler, constraints); + + std::set dirichlet_boundary_ids = {0}; + Functions::ZeroFunction homogeneous_dirichlet_bc; + const std::map *> + dirichlet_boundary_functions = { + {types::boundary_id(0), &homogeneous_dirichlet_bc}}; + VectorTools::interpolate_boundary_values(dof_handler, + dirichlet_boundary_functions, + constraints); + constraints.close(); + + { + DynamicSparsityPattern dsp(dof_handler.n_dofs(), dof_handler.n_dofs()); + DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints); + sparsity_pattern.copy_from(dsp); + } + system_matrix.reinit(sparsity_pattern); + + // The multigrid constraints have to be initialized. They need to know + // where Dirichlet boundary conditions are prescribed. + mg_constrained_dofs.clear(); + mg_constrained_dofs.initialize(dof_handler); + mg_constrained_dofs.make_zero_boundary_constraints(dof_handler, + dirichlet_boundary_ids); + + + // Now for the things that concern the multigrid data structures. First, we + // resize the multilevel objects to hold matrices and sparsity patterns for + // every level. The coarse level is zero (this is mandatory right now but + // may change in a future revision). Note that these functions take a + // complete, inclusive range here (not a starting index and size), so the + // finest level is n_levels-1. We first have to resize the + // container holding the SparseMatrix classes, since they have to release + // their SparsityPattern before the can be destroyed upon resizing. + const unsigned int n_levels = triangulation.n_levels(); + + mg_interface_matrices.resize(0, n_levels - 1); + mg_matrices.resize(0, n_levels - 1); + mg_sparsity_patterns.resize(0, n_levels - 1); + mg_interface_sparsity_patterns.resize(0, n_levels - 1); + + // Now, we have to provide a matrix on each level. To this end, we first use + // the MGTools::make_sparsity_pattern function to generate a preliminary + // compressed sparsity pattern on each level (see the @ref Sparsity module + // for more information on this topic) and then copy it over to the one we + // really want. The next step is to initialize the interface matrices with + // the fitting sparsity pattern. + // + // It may be worth pointing out that the interface matrices only have + // entries for degrees of freedom that sit at or next to the interface + // between coarser and finer levels of the mesh. They are therefore even + // sparser than the matrices on the individual levels of our multigrid + // hierarchy. Therefore, we use a function specifically build for this + // purpose to generate it. + for (unsigned int level = 0; level < n_levels; ++level) + { + { + DynamicSparsityPattern dsp(dof_handler.n_dofs(level), + dof_handler.n_dofs(level)); + MGTools::make_sparsity_pattern(dof_handler, dsp, level); + + mg_sparsity_patterns[level].copy_from(dsp); + mg_matrices[level].reinit(mg_sparsity_patterns[level]); + } + { + DynamicSparsityPattern dsp(dof_handler.n_dofs(level), + dof_handler.n_dofs(level)); + MGTools::make_interface_sparsity_pattern(dof_handler, + mg_constrained_dofs, + dsp, + level); + mg_interface_sparsity_patterns[level].copy_from(dsp); + mg_interface_matrices[level].reinit( + mg_interface_sparsity_patterns[level]); + } + } + } + + + // @sect4{LaplaceProblem::cell_worker} + + // The cell_worker function is used to assemble the matrix and right-hand side + // on the given cell. This function is used for the active cells to generate + // the system_matrix and on each level to build the level matrices. + // + // Note that we also assemble a right-hand side when called from + // assemble_multigrid() even though it is not used. + template + template + void LaplaceProblem::cell_worker(const Iterator & cell, + ScratchData &scratch_data, + CopyData & copy_data) + { + FEValues &fe_values = scratch_data.fe_values; + fe_values.reinit(cell); + + const unsigned int dofs_per_cell = fe_values.get_fe().dofs_per_cell; + const unsigned int n_q_points = fe_values.get_quadrature().size(); + + copy_data.reinit(cell, dofs_per_cell); + + const std::vector &JxW = fe_values.get_JxW_values(); + + for (unsigned int q = 0; q < n_q_points; ++q) + { + const double coefficient = + (fe_values.get_quadrature_points()[q][0] < 0.0) ? 1.0 : 0.1; + //(cell->center().square() < 0.5 * 0.5) ? 10.0:1.0; + + for (unsigned int i = 0; i < dofs_per_cell; ++i) + { + for (unsigned int j = 0; j < dofs_per_cell; ++j) + { + copy_data.cell_matrix(i, j) += + coefficient * + (fe_values.shape_grad(i, q) * fe_values.shape_grad(j, q)) * + JxW[q]; + } + copy_data.cell_rhs(i) += 1.0 * fe_values.shape_value(i, q) * JxW[q]; + } + } + } + + + + // @sect4{LaplaceProblem::assemble_system} + + // The following function assembles the linear system on the active cells of + // the mesh. For this, we pass two lambda functions to the mesh_loop() + // function. The cell_worker function redirects to the class member function + // of the same name, while the copier is specific to this function and copies + // local matrix and vector to the corresponding global ones using the + // constraints. + template + void LaplaceProblem::assemble_system() + { + MappingQ1 mapping; + + auto cell_worker = + [&](const typename DoFHandler::active_cell_iterator &cell, + ScratchData & scratch_data, + CopyData & copy_data) { + this->cell_worker(cell, scratch_data, copy_data); + }; + + auto copier = [&](const CopyData &cd) { + this->constraints.distribute_local_to_global(cd.cell_matrix, + cd.cell_rhs, + cd.local_dof_indices, + system_matrix, + system_rhs); + }; + + const unsigned int n_gauss_points = degree + 1; + + ScratchData scratch_data(mapping, + fe, + n_gauss_points, + update_values | update_gradients | + update_JxW_values | + update_quadrature_points); + + MeshWorker::mesh_loop(dof_handler.begin_active(), + dof_handler.end(), + cell_worker, + copier, + scratch_data, + CopyData(), + MeshWorker::assemble_own_cells); + } + + + // @sect4{LaplaceProblem::assemble_multigrid} + + // The next function is the one that builds the matrices + // that define the multigrid method on each level of the mesh. The integration + // core is the same as above, but the loop below will go over all existing + // cells instead of just the active ones, and the results must be entered into + // the correct level matrices. Fortunately, MeshWorker hides most of that from + // us, and thus the difference between this function and the previous lies + // only in the setup of the assembler and the different iterators in the loop. + // + // We generate an AffineConstraints<> object for each level containing the + // boundary and interface dofs as constrained entries. The corresponding + // object is then used to generate the level matrices. + template + void LaplaceProblem::assemble_multigrid() + { + MappingQ1 mapping; + const unsigned int n_levels = triangulation.n_levels(); + + std::vector> boundary_constraints(n_levels); + for (unsigned int level = 0; level < n_levels; ++level) + { + IndexSet dofset; + DoFTools::extract_locally_relevant_level_dofs(dof_handler, + level, + dofset); + boundary_constraints[level].reinit(dofset); + boundary_constraints[level].add_lines( + mg_constrained_dofs.get_refinement_edge_indices(level)); + boundary_constraints[level].add_lines( + mg_constrained_dofs.get_boundary_indices(level)); + boundary_constraints[level].close(); + } + + auto cell_worker = + [&](const typename DoFHandler::level_cell_iterator &cell, + ScratchData & scratch_data, + CopyData & copy_data) { + this->cell_worker(cell, scratch_data, copy_data); + }; + + auto copier = [&](const CopyData &cd) { + boundary_constraints[cd.level].distribute_local_to_global( + cd.cell_matrix, cd.local_dof_indices, mg_matrices[cd.level]); + + const unsigned int dofs_per_cell = cd.local_dof_indices.size(); + + // TODO EXPLAIN: + + for (unsigned int i = 0; i < dofs_per_cell; ++i) + for (unsigned int j = 0; j < dofs_per_cell; ++j) + if (mg_constrained_dofs.is_interface_matrix_entry( + cd.level, cd.local_dof_indices[i], cd.local_dof_indices[j])) + { + mg_interface_matrices[cd.level].add(cd.local_dof_indices[i], + cd.local_dof_indices[j], + cd.cell_matrix(i, j)); + } + }; + + const unsigned int n_gauss_points = degree + 1; + + ScratchData scratch_data(mapping, + fe, + n_gauss_points, + update_values | update_gradients | + update_JxW_values | + update_quadrature_points); + + MeshWorker::mesh_loop(dof_handler.begin_mg(), + dof_handler.end_mg(), + cell_worker, + copier, + scratch_data, + CopyData(), + MeshWorker::assemble_own_cells); + } + + + + // @sect4{LaplaceProblem::solve} + + // This is the other function that is significantly different in support of + // the multigrid solver (or, in fact, the preconditioner for which we use + // the multigrid method). + // + // Let us start out by setting up two of the components of multilevel + // methods: transfer operators between levels, and a solver on the coarsest + // level. In finite element methods, the transfer operators are derived from + // the finite element function spaces involved and can often be computed in + // a generic way independent of the problem under consideration. In that + // case, we can use the MGTransferPrebuilt class that, given the constraints + // of the final linear system and the MGConstrainedDoFs object that knows + // about the boundary conditions on the each level and the degrees of + // freedom on interfaces between different refinement level can build the + // matrices for those transfer operations from a DoFHandler object with + // level degrees of freedom. + // + // The second part of the following lines deals with the coarse grid + // solver. Since our coarse grid is very coarse indeed, we decide for a + // direct solver (a Householder decomposition of the coarsest level matrix), + // even if its implementation is not particularly sophisticated. If our + // coarse mesh had many more cells than the five we have here, something + // better suited would obviously be necessary here. + template + void LaplaceProblem::solve() + { + MGTransferPrebuilt> mg_transfer(mg_constrained_dofs); + mg_transfer.build_matrices(dof_handler); + + FullMatrix coarse_matrix; + coarse_matrix.copy_from(mg_matrices[0]); + MGCoarseGridHouseholder<> coarse_grid_solver; + coarse_grid_solver.initialize(coarse_matrix); + + // The next component of a multilevel solver or preconditioner is that we + // need a smoother on each level. A common choice for this is to use the + // application of a relaxation method (such as the SOR, Jacobi or Richardson + // method) or a small number of iterations of a solver method (such as CG or + // GMRES). The mg::SmootherRelaxation and MGSmootherPrecondition classes + // provide support for these two kinds of smoothers. Here, we opt for the + // application of a single SOR iteration. To this end, we define an + // appropriate alias and then setup a smoother object. + // + // The last step is to initialize the smoother object with our level + // matrices and to set some smoothing parameters. The + // initialize() function can optionally take additional + // arguments that will be passed to the smoother object on each level. In + // the current case for the SOR smoother, this could, for example, include + // a relaxation parameter. However, we here leave these at their default + // values. The call to set_steps() indicates that we will use + // two pre- and two post-smoothing steps on each level; to use a variable + // number of smoother steps on different levels, more options can be set + // in the constructor call to the mg_smoother object. + // + // The last step results from the fact that we use the SOR method as a + // smoother - which is not symmetric - but we use the conjugate gradient + // iteration (which requires a symmetric preconditioner) below, we need to + // let the multilevel preconditioner make sure that we get a symmetric + // operator even for nonsymmetric smoothers: + using Smoother = PreconditionSOR>; + mg::SmootherRelaxation> mg_smoother; + mg_smoother.initialize(mg_matrices); + mg_smoother.set_steps(2); + mg_smoother.set_symmetric(true); + + // The next preparatory step is that we must wrap our level and interface + // matrices in an object having the required multiplication functions. We + // will create two objects for the interface objects going from coarse to + // fine and the other way around; the multigrid algorithm will later use + // the transpose operator for the latter operation, allowing us to + // initialize both up and down versions of the operator with the matrices + // we already built: + mg::Matrix> mg_matrix(mg_matrices); + mg::Matrix> mg_interface_up(mg_interface_matrices); + mg::Matrix> mg_interface_down(mg_interface_matrices); + + // Now, we are ready to set up the V-cycle operator and the multilevel + // preconditioner. + Multigrid> mg( + mg_matrix, coarse_grid_solver, mg_transfer, mg_smoother, mg_smoother); + mg.set_edge_matrices(mg_interface_down, mg_interface_up); + + PreconditionMG, MGTransferPrebuilt>> + preconditioner(dof_handler, mg, mg_transfer); + + // With all this together, we can finally get about solving the linear + // system in the usual way: + SolverControl solver_control(1000, 1e-12); + SolverCG<> solver(solver_control); + + solution = 0; + + solver.solve(system_matrix, solution, system_rhs, preconditioner); + std::cout << " Number of CG iterations: " << solver_control.last_step() + << "\n" + << std::endl; + constraints.distribute(solution); + } + + + + // @sect4{Postprocessing} + + // The following two functions postprocess a solution once it is + // computed. In particular, the first one refines the mesh at the beginning + // of each cycle while the second one outputs results at the end of each + // such cycle. The functions are almost unchanged from those in step-6. + template + void LaplaceProblem::refine_grid() + { + Vector estimated_error_per_cell(triangulation.n_active_cells()); + + KellyErrorEstimator::estimate( + dof_handler, + QGauss(degree + 2), + std::map *>(), + solution, + estimated_error_per_cell); + GridRefinement::refine_and_coarsen_fixed_number(triangulation, + estimated_error_per_cell, + 0.3, + 0.03); + triangulation.execute_coarsening_and_refinement(); + } + + + + template + void LaplaceProblem::output_results(const unsigned int cycle) const + { + DataOut data_out; + + data_out.attach_dof_handler(dof_handler); + data_out.add_data_vector(solution, "solution"); + data_out.build_patches(); + + std::ofstream output("solution-" + std::to_string(cycle) + ".vtk"); + data_out.write_vtk(output); + } + + + // @sect4{LaplaceProblem::run} + + // Like several of the functions above, this is almost exactly a copy of + // the corresponding function in step-6. The only difference is the call to + // assemble_multigrid that takes care of forming the matrices + // on every level that we need in the multigrid method. + template + void LaplaceProblem::run() + { + for (unsigned int cycle = 0; cycle < 8; ++cycle) + { + std::cout << "Cycle " << cycle << std::endl; + + if (cycle == 0) + { + GridGenerator::hyper_ball(triangulation); + triangulation.refine_global(2); + } + else + refine_grid(); + + std::cout << " Number of active cells: " + << triangulation.n_active_cells() << std::endl; + + setup_system(); + + assemble_system(); + assemble_multigrid(); + + solve(); + output_results(cycle); + } + } +} // namespace Step16 + + +// @sect3{The main() function} +// +// This is again the same function as in step-6: +int main() +{ + try + { + using namespace Step16; + + LaplaceProblem<2> laplace_problem(1); + laplace_problem.run(); + } + catch (std::exception &exc) + { + std::cerr << std::endl + << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + + return 1; + } + catch (...) + { + std::cerr << std::endl + << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + + return 0; +} -- 2.39.5