From: Lóránt Hadnagy Date: Mon, 29 Jul 2024 22:11:28 +0000 (+0300) Subject: Revert changes to step-49 and step-50 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F17399%2Fhead;p=dealii.git Revert changes to step-49 and step-50 --- diff --git a/doc/news/changes/minor/20240728AE7TB99 b/doc/news/changes/minor/20240728AE7TB99 deleted file mode 100644 index 5d14093f44..0000000000 --- a/doc/news/changes/minor/20240728AE7TB99 +++ /dev/null @@ -1,4 +0,0 @@ -Uniformly apply the practice of importing the 'dealii' namespace -only within the StepXX namespace across all tutorial programs. -
-(Lóránt Hadnagy, 2024/07/28) diff --git a/doc/news/changes/minor/20240730AE7TB99 b/doc/news/changes/minor/20240730AE7TB99 new file mode 100644 index 0000000000..e661d4f197 --- /dev/null +++ b/doc/news/changes/minor/20240730AE7TB99 @@ -0,0 +1,6 @@ +Uniformly apply the practice of importing the 'dealii' namespace +only within the StepXX namespace across all tutorial programs, +except for step-49 and step-50. These steps will be handled separately +to ensure proper integration and testing. +
+(Lóránt Hadnagy, 2024/07/30) diff --git a/examples/step-49/step-49.cc b/examples/step-49/step-49.cc index 25be3d81fa..ca73c28968 100644 --- a/examples/step-49/step-49.cc +++ b/examples/step-49/step-49.cc @@ -40,327 +40,323 @@ #include -namespace Step49 +using namespace dealii; + +// @sect3{Generating output for a given mesh} + +// The following function generates some output for any of the meshes we will +// be generating in the remainder of this program. In particular, it generates +// the following information: +// +// - Some general information about the number of space dimensions in which +// this mesh lives and its number of cells. +// - The number of boundary faces that use each boundary indicator, so that +// it can be compared with what we expect. +// +// Finally, the function outputs the mesh in VTU format that can easily be +// visualized in Paraview or VisIt. +template +void print_mesh_info(const Triangulation &triangulation, + const std::string &filename) { - using namespace dealii; - - // @sect3{Generating output for a given mesh} - - // The following function generates some output for any of the meshes we will - // be generating in the remainder of this program. In particular, it generates - // the following information: - // - // - Some general information about the number of space dimensions in which - // this mesh lives and its number of cells. - // - The number of boundary faces that use each boundary indicator, so that - // it can be compared with what we expect. - // - // Finally, the function outputs the mesh in VTU format that can easily be - // visualized in Paraview or VisIt. - template - void print_mesh_info(const Triangulation &triangulation, - const std::string &filename) + std::cout << "Mesh info:" << std::endl + << " dimension: " << dim << std::endl + << " no. of cells: " << triangulation.n_active_cells() << std::endl; + + // Next loop over all faces of all cells and find how often each + // boundary indicator is used (recall that if you access an element + // of a std::map object that doesn't exist, it is implicitly created + // and default initialized -- to zero, in the current case -- before + // we then increment it): { - std::cout << "Mesh info:" << std::endl - << " dimension: " << dim << std::endl - << " no. of cells: " << triangulation.n_active_cells() - << std::endl; - - // Next loop over all faces of all cells and find how often each - // boundary indicator is used (recall that if you access an element - // of a std::map object that doesn't exist, it is implicitly created - // and default initialized -- to zero, in the current case -- before - // we then increment it): - { - std::map boundary_count; - for (const auto &face : triangulation.active_face_iterators()) - if (face->at_boundary()) - boundary_count[face->boundary_id()]++; - - std::cout << " boundary indicators: "; - for (const std::pair &pair : - boundary_count) - { - std::cout << pair.first << '(' << pair.second << " times) "; - } - std::cout << std::endl; - } - - // Finally, produce a graphical representation of the mesh to an output - // file: - std::ofstream out(filename); - GridOut grid_out; - grid_out.write_vtu(triangulation, out); - std::cout << " written to " << filename << std::endl << std::endl; + std::map boundary_count; + for (const auto &face : triangulation.active_face_iterators()) + if (face->at_boundary()) + boundary_count[face->boundary_id()]++; + + std::cout << " boundary indicators: "; + for (const std::pair &pair : + boundary_count) + { + std::cout << pair.first << '(' << pair.second << " times) "; + } + std::cout << std::endl; } - // @sect3{Main routines} - - // @sect4{grid_1: Loading a mesh generated by gmsh} - - // In this first example, we show how to load the mesh for which we have - // discussed in the introduction how to generate it. This follows the same - // pattern as used in step-5 to load a mesh, although there it was written in - // a different file format (UCD instead of MSH). - // - // It's worth noting that it is possible to save manifold ids when using - // the gmsh api. If we specify - // - // @code - // GMSH_INCLUDE_DIR - // GMSH_LIBRARY - // @endcode - // - // when building deal.II, then DEAL_II_GMSH_WITH_API gets defined - // and and we can use GridIn::read_msh(). More details on the - // function can be found in its deal.II documentation. - // - // We will be utilizing the SphericalManifold class for the holes. We need to - // assign manifold IDs for this purpose. As physical IDs from Gmsh are - // assigned to boundary IDs in deal.II, we will assign manifold IDs based on - // the boundary IDs loaded from the file. - void grid_1() - { - const Point<2> Top_right_hole_origin(0.42, 2.0); - const Point<2> Bottom_left_hole_origin(-2.1, -1.54); + // Finally, produce a graphical representation of the mesh to an output + // file: + std::ofstream out(filename); + GridOut grid_out; + grid_out.write_vtu(triangulation, out); + std::cout << " written to " << filename << std::endl << std::endl; +} - const SphericalManifold<2> Top_right_manifold(Top_right_hole_origin); - const SphericalManifold<2> Bottom_left_manifold(Bottom_left_hole_origin); +// @sect3{Main routines} + +// @sect4{grid_1: Loading a mesh generated by gmsh} + +// In this first example, we show how to load the mesh for which we have +// discussed in the introduction how to generate it. This follows the same +// pattern as used in step-5 to load a mesh, although there it was written in +// a different file format (UCD instead of MSH). +// +// It's worth noting that it is possible to save manifold ids when using +// the gmsh api. If we specify +// +// @code +// GMSH_INCLUDE_DIR +// GMSH_LIBRARY +// @endcode +// +// when building deal.II, then DEAL_II_GMSH_WITH_API gets defined +// and and we can use GridIn::read_msh(). More details on the +// function can be found in its deal.II documentation. +// +// We will be utilizing the SphericalManifold class for the holes. We need to +// assign manifold IDs for this purpose. As physical IDs from Gmsh are assigned +// to boundary IDs in deal.II, we will assign manifold IDs based on the boundary +// IDs loaded from the file. +void grid_1() +{ + const Point<2> Top_right_hole_origin(0.42, 2.0); + const Point<2> Bottom_left_hole_origin(-2.1, -1.54); - Triangulation<2> triangulation; + const SphericalManifold<2> Top_right_manifold(Top_right_hole_origin); + const SphericalManifold<2> Bottom_left_manifold(Bottom_left_hole_origin); - GridIn<2> gridin; - gridin.attach_triangulation(triangulation); - std::ifstream f("example.msh"); - gridin.read_msh(f); + Triangulation<2> triangulation; - // Here is where we get the boundary IDs made in gmsh, which are in the - // first coordinate position, and assign them to manifold ids. With our - // example, we have boundary ID 1 on the top right hole and 2 and 3 for the - // bottom left hole. We assign both of these boundary IDs 2 because together - // they make a circle to match the manifold we assign it later. - triangulation.set_all_manifold_ids_on_boundary(1, 1); // top right hole - triangulation.set_all_manifold_ids_on_boundary( - 2, - 2); // top of bottom left hole - triangulation.set_all_manifold_ids_on_boundary( - 3, 2); // bottom of bottom left hole + GridIn<2> gridin; + gridin.attach_triangulation(triangulation); + std::ifstream f("example.msh"); + gridin.read_msh(f); - triangulation.set_manifold(1, Top_right_manifold); - triangulation.set_manifold(2, Bottom_left_manifold); + // Here is where we get the boundary IDs made in gmsh, which are in the first + // coordinate position, and assign them to manifold ids. With our example, we + // have boundary ID 1 on the top right hole and 2 and 3 for the bottom left + // hole. We assign both of these boundary IDs 2 because together they make a + // circle to match the manifold we assign it later. + triangulation.set_all_manifold_ids_on_boundary(1, 1); // top right hole + triangulation.set_all_manifold_ids_on_boundary(2, + 2); // top of bottom left hole + triangulation.set_all_manifold_ids_on_boundary( + 3, 2); // bottom of bottom left hole - triangulation.refine_global(2); + triangulation.set_manifold(1, Top_right_manifold); + triangulation.set_manifold(2, Bottom_left_manifold); - print_mesh_info(triangulation, "grid-1.vtu"); - } + triangulation.refine_global(2); + print_mesh_info(triangulation, "grid-1.vtu"); +} - // @sect4{grid_2: Merging triangulations} - // Here, we first create two triangulations and then merge them into one. As - // discussed in the introduction, it is important to ensure that the vertices - // at the common interface are located at the same coordinates. - void grid_2() - { - Triangulation<2> tria1; - GridGenerator::hyper_cube_with_cylindrical_hole(tria1, 0.25, 1.0); - - Triangulation<2> tria2; - std::vector repetitions(2); - repetitions[0] = 3; - repetitions[1] = 2; - GridGenerator::subdivided_hyper_rectangle(tria2, - repetitions, - Point<2>(1.0, -1.0), - Point<2>(4.0, 1.0)); - - Triangulation<2> triangulation; - GridGenerator::merge_triangulations(tria1, tria2, triangulation); - - print_mesh_info(triangulation, "grid-2.vtu"); - } +// @sect4{grid_2: Merging triangulations} +// Here, we first create two triangulations and then merge them into one. As +// discussed in the introduction, it is important to ensure that the vertices +// at the common interface are located at the same coordinates. +void grid_2() +{ + Triangulation<2> tria1; + GridGenerator::hyper_cube_with_cylindrical_hole(tria1, 0.25, 1.0); + + Triangulation<2> tria2; + std::vector repetitions(2); + repetitions[0] = 3; + repetitions[1] = 2; + GridGenerator::subdivided_hyper_rectangle(tria2, + repetitions, + Point<2>(1.0, -1.0), + Point<2>(4.0, 1.0)); + + Triangulation<2> triangulation; + GridGenerator::merge_triangulations(tria1, tria2, triangulation); + + print_mesh_info(triangulation, "grid-2.vtu"); +} - // @sect4{grid_3: Moving vertices} - - // In this function, we move vertices of a mesh. This is simpler than one - // usually expects: if you ask a cell using cell-@>vertex(i) for - // the coordinates of its ith vertex, it doesn't just provide the - // location of this vertex but in fact a reference to the location where these - // coordinates are stored. We can then modify the value stored there. - // - // So this is what we do in the first part of this function: We create a - // square of geometry $[-1,1]^2$ with a circular hole with radius 0.25 located - // at the origin. We then loop over all cells and all vertices and if a vertex - // has a $y$ coordinate equal to one, we move it upward by 0.5. - // - // Note that this sort of procedure does not usually work this way because one - // will typically encounter the same vertices multiple times and may move them - // more than once. It works here because we select the vertices we want to use - // based on their geometric location, and a vertex moved once will fail this - // test in the future. A more general approach to this problem would have been - // to keep a std::set of those vertex indices that we have already moved - // (which we can obtain using cell-@>vertex_index(i) and only - // move those vertices whose index isn't in the set yet. - void grid_3() - { - Triangulation<2> triangulation; - GridGenerator::hyper_cube_with_cylindrical_hole(triangulation, 0.25, 1.0); - for (const auto &cell : triangulation.active_cell_iterators()) - { - for (const auto i : cell->vertex_indices()) - { - Point<2> &v = cell->vertex(i); - if (std::abs(v[1] - 1.0) < 1e-5) - v[1] += 0.5; - } - } +// @sect4{grid_3: Moving vertices} + +// In this function, we move vertices of a mesh. This is simpler than one +// usually expects: if you ask a cell using cell-@>vertex(i) for +// the coordinates of its ith vertex, it doesn't just provide the +// location of this vertex but in fact a reference to the location where these +// coordinates are stored. We can then modify the value stored there. +// +// So this is what we do in the first part of this function: We create a +// square of geometry $[-1,1]^2$ with a circular hole with radius 0.25 located +// at the origin. We then loop over all cells and all vertices and if a vertex +// has a $y$ coordinate equal to one, we move it upward by 0.5. +// +// Note that this sort of procedure does not usually work this way because one +// will typically encounter the same vertices multiple times and may move them +// more than once. It works here because we select the vertices we want to use +// based on their geometric location, and a vertex moved once will fail this +// test in the future. A more general approach to this problem would have been +// to keep a std::set of those vertex indices that we have already moved +// (which we can obtain using cell-@>vertex_index(i) and only +// move those vertices whose index isn't in the set yet. +void grid_3() +{ + Triangulation<2> triangulation; + GridGenerator::hyper_cube_with_cylindrical_hole(triangulation, 0.25, 1.0); - // In the second step we will refine the mesh twice. To do this correctly, - // we should place new points on the interior boundary along the surface of - // a circle centered at the origin. Fortunately, - // GridGenerator::hyper_cube_with_cylindrical_hole already attaches a - // Manifold object to the interior boundary, so we do not need to do - // anything but refine the mesh (see the results - // section for a fully worked example where we do attach a - // Manifold object). - triangulation.refine_global(2); - print_mesh_info(triangulation, "grid-3.vtu"); - } + for (const auto &cell : triangulation.active_cell_iterators()) + { + for (const auto i : cell->vertex_indices()) + { + Point<2> &v = cell->vertex(i); + if (std::abs(v[1] - 1.0) < 1e-5) + v[1] += 0.5; + } + } - // There is one snag to doing things as shown above: If one moves the nodes on - // the boundary as shown here, one often ends up with cells in the interior - // that are badly distorted since the interior nodes were not moved around. - // This is not that much of a problem in the current case since the mesh did - // not contain any internal nodes when the nodes were moved -- it was the - // coarse mesh and it so happened that all vertices are at the boundary. It's - // also the case that the movement we had here was, compared to the average - // cell size not overly dramatic. Nevertheless, sometimes one does want to - // move vertices by a significant distance, and in that case one needs to move - // internal nodes as well. One way to do that automatically is to call the - // function GridTools::laplace_transform that takes a set of transformed - // vertex coordinates and moves all of the other vertices in such a way that - // the resulting mesh has, in some sense, a small distortion. + // In the second step we will refine the mesh twice. To do this correctly, + // we should place new points on the interior boundary along the surface of + // a circle centered at the origin. Fortunately, + // GridGenerator::hyper_cube_with_cylindrical_hole already attaches a + // Manifold object to the interior boundary, so we do not need to do + // anything but refine the mesh (see the results + // section for a fully worked example where we do attach a + // Manifold object). + triangulation.refine_global(2); + print_mesh_info(triangulation, "grid-3.vtu"); +} +// There is one snag to doing things as shown above: If one moves the nodes on +// the boundary as shown here, one often ends up with cells in the interior +// that are badly distorted since the interior nodes were not moved around. This +// is not that much of a problem in the current case since the mesh did not +// contain any internal nodes when the nodes were moved -- it was the coarse +// mesh and it so happened that all vertices are at the boundary. It's also +// the case that the movement we had here was, compared to the average cell +// size not overly dramatic. Nevertheless, sometimes one does want to move +// vertices by a significant distance, and in that case one needs to move +// internal nodes as well. One way to do that automatically is to call the +// function GridTools::laplace_transform that takes a set of transformed +// vertex coordinates and moves all of the other vertices in such a way that the +// resulting mesh has, in some sense, a small distortion. - // @sect4{grid_4: Demonstrating extrude_triangulation} - // This example takes the initial grid from the previous function and simply - // extrudes it into the third space dimension: - void grid_4() - { - Triangulation<2> triangulation; - Triangulation<3> out; - GridGenerator::hyper_cube_with_cylindrical_hole(triangulation, 0.25, 1.0); +// @sect4{grid_4: Demonstrating extrude_triangulation} - GridGenerator::extrude_triangulation(triangulation, 3, 2.0, out); - print_mesh_info(out, "grid-4.vtu"); - } +// This example takes the initial grid from the previous function and simply +// extrudes it into the third space dimension: +void grid_4() +{ + Triangulation<2> triangulation; + Triangulation<3> out; + GridGenerator::hyper_cube_with_cylindrical_hole(triangulation, 0.25, 1.0); + GridGenerator::extrude_triangulation(triangulation, 3, 2.0, out); + print_mesh_info(out, "grid-4.vtu"); +} - // @sect4{grid_5: Demonstrating GridTools::transform, part 1} - - // This and the next example first create a mesh and then transform it by - // moving every node of the mesh according to a function that takes a point - // and returns a mapped point. In this case, we transform $(x,y) \mapsto - // (x,y+\sin(\pi x/5))$. - // - // GridTools::transform() takes a triangulation and an argument that - // can be called like a function taking a Point and returning a - // Point. There are different ways of providing such an argument: It - // could be a pointer to a function; it could be an object of a class - // that has an `operator()`; it could be a lambda function; or it - // could be anything that is described via a - // std::function@(const Point@<2@>)@> object. - // - // Decidedly the more modern way is to use a lambda function that - // takes a Point and returns a Point, and that is what we do in the - // following: - void grid_5() - { - Triangulation<2> triangulation; - std::vector repetitions(2); - repetitions[0] = 14; - repetitions[1] = 2; - GridGenerator::subdivided_hyper_rectangle(triangulation, - repetitions, - Point<2>(0.0, 0.0), - Point<2>(10.0, 1.0)); - - GridTools::transform( - [](const Point<2> &in) { - return Point<2>(in[0], in[1] + std::sin(numbers::PI * in[0] / 5.0)); - }, - triangulation); - print_mesh_info(triangulation, "grid-5.vtu"); - } +// @sect4{grid_5: Demonstrating GridTools::transform, part 1} + +// This and the next example first create a mesh and then transform it by +// moving every node of the mesh according to a function that takes a point +// and returns a mapped point. In this case, we transform $(x,y) \mapsto +// (x,y+\sin(\pi x/5))$. +// +// GridTools::transform() takes a triangulation and an argument that +// can be called like a function taking a Point and returning a +// Point. There are different ways of providing such an argument: It +// could be a pointer to a function; it could be an object of a class +// that has an `operator()`; it could be a lambda function; or it +// could be anything that is described via a +// std::function@(const Point@<2@>)@> object. +// +// Decidedly the more modern way is to use a lambda function that +// takes a Point and returns a Point, and that is what we do in the +// following: +void grid_5() +{ + Triangulation<2> triangulation; + std::vector repetitions(2); + repetitions[0] = 14; + repetitions[1] = 2; + GridGenerator::subdivided_hyper_rectangle(triangulation, + repetitions, + Point<2>(0.0, 0.0), + Point<2>(10.0, 1.0)); + + GridTools::transform( + [](const Point<2> &in) { + return Point<2>(in[0], in[1] + std::sin(numbers::PI * in[0] / 5.0)); + }, + triangulation); + print_mesh_info(triangulation, "grid-5.vtu"); +} - // @sect4{grid_6: Demonstrating GridTools::transform, part 2} - // In this second example of transforming points from an original to a new - // mesh, we will use the mapping $(x,y) \mapsto (x,\tanh(2y)/\tanh(2))$. To - // make things more interesting, rather than doing so in a single function as - // in the previous example, we here create an object with an - // operator() that will be called by GridTools::transform. Of - // course, this object may in reality be much more complex: the object may - // have member variables that play a role in computing the new locations of - // vertices. - struct Grid6Func - { - double trans(const double y) const - { - return std::tanh(2 * y) / tanh(2); - } +// @sect4{grid_6: Demonstrating GridTools::transform, part 2} - Point<2> operator()(const Point<2> &in) const - { - return {in[0], trans(in[1])}; - } - }; +// In this second example of transforming points from an original to a new +// mesh, we will use the mapping $(x,y) \mapsto (x,\tanh(2y)/\tanh(2))$. To +// make things more interesting, rather than doing so in a single function as +// in the previous example, we here create an object with an +// operator() that will be called by GridTools::transform. Of +// course, this object may in reality be much more complex: the object may +// have member variables that play a role in computing the new locations of +// vertices. +struct Grid6Func +{ + double trans(const double y) const + { + return std::tanh(2 * y) / tanh(2); + } - void grid_6() + Point<2> operator()(const Point<2> &in) const { - Triangulation<2> triangulation; - std::vector repetitions(2); - repetitions[0] = repetitions[1] = 40; - GridGenerator::subdivided_hyper_rectangle(triangulation, - repetitions, - Point<2>(0.0, 0.0), - Point<2>(1.0, 1.0)); - - GridTools::transform(Grid6Func(), triangulation); - print_mesh_info(triangulation, "grid-6.vtu"); + return {in[0], trans(in[1])}; } +}; + + +void grid_6() +{ + Triangulation<2> triangulation; + std::vector repetitions(2); + repetitions[0] = repetitions[1] = 40; + GridGenerator::subdivided_hyper_rectangle(triangulation, + repetitions, + Point<2>(0.0, 0.0), + Point<2>(1.0, 1.0)); + + GridTools::transform(Grid6Func(), triangulation); + print_mesh_info(triangulation, "grid-6.vtu"); +} - // @sect4{grid_7: Demonstrating distort_random} +// @sect4{grid_7: Demonstrating distort_random} - // In this last example, we create a mesh and then distort its (interior) - // vertices by a random perturbation. This is not something you want to do for - // production computations (because results are generally better on meshes - // with "nicely shaped" cells than on the deformed cells produced by - // GridTools::distort_random()), but it is a useful tool for testing - // discretizations and codes to make sure they don't work just by accident - // because the mesh happens to be uniformly structured and supporting - // superconvergence properties. - void grid_7() - { - Triangulation<2> triangulation; - std::vector repetitions(2); - repetitions[0] = repetitions[1] = 16; - GridGenerator::subdivided_hyper_rectangle(triangulation, - repetitions, - Point<2>(0.0, 0.0), - Point<2>(1.0, 1.0)); - - GridTools::distort_random(0.3, triangulation, true); - print_mesh_info(triangulation, "grid-7.vtu"); - } -} // namespace Step49 +// In this last example, we create a mesh and then distort its (interior) +// vertices by a random perturbation. This is not something you want to do for +// production computations (because results are generally better on meshes +// with "nicely shaped" cells than on the deformed cells produced by +// GridTools::distort_random()), but it is a useful tool for testing +// discretizations and codes to make sure they don't work just by accident +// because the mesh happens to be uniformly structured and supporting +// superconvergence properties. +void grid_7() +{ + Triangulation<2> triangulation; + std::vector repetitions(2); + repetitions[0] = repetitions[1] = 16; + GridGenerator::subdivided_hyper_rectangle(triangulation, + repetitions, + Point<2>(0.0, 0.0), + Point<2>(1.0, 1.0)); + + GridTools::distort_random(0.3, triangulation, true); + print_mesh_info(triangulation, "grid-7.vtu"); +} // @sect3{The main function} @@ -371,7 +367,6 @@ int main() { try { - using namespace Step49; grid_1(); grid_2(); grid_3(); diff --git a/examples/step-50/step-50.cc b/examples/step-50/step-50.cc index d535bf8297..7b138cf951 100644 --- a/examples/step-50/step-50.cc +++ b/examples/step-50/step-50.cc @@ -84,1466 +84,1450 @@ namespace LA #include #include -namespace Step50 -{ - using namespace dealii; +using namespace dealii; + - // @sect3{Coefficients and helper classes} +// @sect3{Coefficients and helper classes} - // MatrixFree operators must use the - // LinearAlgebra::distributed::Vector vector type. Here we define - // operations which copy to and from Trilinos vectors for compatibility with - // the matrix-based code. Note that this functionality does not currently - // exist for PETSc vector types, so Trilinos must be installed to use the - // MatrixFree solver in this tutorial. - namespace ChangeVectorTypes +// MatrixFree operators must use the +// LinearAlgebra::distributed::Vector vector type. Here we define +// operations which copy to and from Trilinos vectors for compatibility with +// the matrix-based code. Note that this functionality does not currently +// exist for PETSc vector types, so Trilinos must be installed to use the +// MatrixFree solver in this tutorial. +namespace ChangeVectorTypes +{ + template + void copy(LA::MPI::Vector &out, + const LinearAlgebra::distributed::Vector &in) { - template - void copy(LA::MPI::Vector &out, - const LinearAlgebra::distributed::Vector &in) - { - LinearAlgebra::ReadWriteVector rwv(out.locally_owned_elements()); - rwv.import_elements(in, VectorOperation::insert); + LinearAlgebra::ReadWriteVector rwv(out.locally_owned_elements()); + rwv.import_elements(in, VectorOperation::insert); #ifdef USE_PETSC_LA - AssertThrow(false, - ExcMessage("ChangeVectorTypes::copy() not implemented for " - "PETSc vector types.")); + AssertThrow(false, + ExcMessage("ChangeVectorTypes::copy() not implemented for " + "PETSc vector types.")); #else - out.import_elements(rwv, VectorOperation::insert); + out.import_elements(rwv, VectorOperation::insert); #endif - } + } - template - void copy(LinearAlgebra::distributed::Vector &out, - const LA::MPI::Vector &in) - { - LinearAlgebra::ReadWriteVector rwv; + template + void copy(LinearAlgebra::distributed::Vector &out, + const LA::MPI::Vector &in) + { + LinearAlgebra::ReadWriteVector rwv; #ifdef USE_PETSC_LA - (void)in; - AssertThrow(false, - ExcMessage("ChangeVectorTypes::copy() not implemented for " - "PETSc vector types.")); + (void)in; + AssertThrow(false, + ExcMessage("ChangeVectorTypes::copy() not implemented for " + "PETSc vector types.")); #else - rwv.reinit(in); + rwv.reinit(in); #endif - out.import_elements(rwv, VectorOperation::insert); - } - } // namespace ChangeVectorTypes + out.import_elements(rwv, VectorOperation::insert); + } +} // namespace ChangeVectorTypes - // Let's move on to the description of the problem we want to solve. - // We set the right-hand side function to 1.0. The @p value function returning a - // VectorizedArray is used by the matrix-free code path. - template - class RightHandSide : public Function +// Let's move on to the description of the problem we want to solve. +// We set the right-hand side function to 1.0. The @p value function returning a +// VectorizedArray is used by the matrix-free code path. +template +class RightHandSide : public Function +{ +public: + virtual double value(const Point & /*p*/, + const unsigned int /*component*/ = 0) const override { - public: - virtual double value(const Point & /*p*/, - const unsigned int /*component*/ = 0) const override - { - return 1.0; - } + return 1.0; + } - template - VectorizedArray - value(const Point> & /*p*/, - const unsigned int /*component*/ = 0) const - { - return VectorizedArray(1.0); - } - }; + template + VectorizedArray + value(const Point> & /*p*/, + const unsigned int /*component*/ = 0) const + { + return VectorizedArray(1.0); + } +}; - // This next class represents the diffusion coefficient. We use a variable - // coefficient which is 100.0 at any point where at least one coordinate is - // less than -0.5, and 1.0 at all other points. As above, a separate value() - // returning a VectorizedArray is used for the matrix-free code. An @p - // average() function computes the arithmetic average for a set of points. - template - class Coefficient : public Function - { - public: - virtual double value(const Point &p, - const unsigned int /*component*/ = 0) const override; - - template - VectorizedArray value(const Point> &p, - const unsigned int /*component*/ = 0) const; - - template - number average_value(const std::vector> &points) const; - - // When using a coefficient in the MatrixFree framework, we also - // need a function that creates a Table of coefficient values for a - // set of cells provided by the MatrixFree operator argument here. - template - std::shared_ptr>> make_coefficient_table( - const MatrixFree> &mf_storage) const; - }; +// This next class represents the diffusion coefficient. We use a variable +// coefficient which is 100.0 at any point where at least one coordinate is +// less than -0.5, and 1.0 at all other points. As above, a separate value() +// returning a VectorizedArray is used for the matrix-free code. An @p +// average() function computes the arithmetic average for a set of points. +template +class Coefficient : public Function +{ +public: + virtual double value(const Point &p, + const unsigned int /*component*/ = 0) const override; + template + VectorizedArray value(const Point> &p, + const unsigned int /*component*/ = 0) const; + template + number average_value(const std::vector> &points) const; - template - double Coefficient::value(const Point &p, const unsigned int) const - { - for (int d = 0; d < dim; ++d) - { - if (p[d] < -0.5) - return 100.0; - } - return 1.0; - } + // When using a coefficient in the MatrixFree framework, we also + // need a function that creates a Table of coefficient values for a + // set of cells provided by the MatrixFree operator argument here. + template + std::shared_ptr>> make_coefficient_table( + const MatrixFree> &mf_storage) const; +}; - template - template - VectorizedArray - Coefficient::value(const Point> &p, - const unsigned int) const - { - VectorizedArray return_value = VectorizedArray(1.0); - for (unsigned int i = 0; i < VectorizedArray::size(); ++i) - { - for (int d = 0; d < dim; ++d) - if (p[d][i] < -0.5) - { - return_value[i] = 100.0; - break; - } - } +template +double Coefficient::value(const Point &p, const unsigned int) const +{ + for (int d = 0; d < dim; ++d) + { + if (p[d] < -0.5) + return 100.0; + } + return 1.0; +} - return return_value; - } +template +template +VectorizedArray +Coefficient::value(const Point> &p, + const unsigned int) const +{ + VectorizedArray return_value = VectorizedArray(1.0); + for (unsigned int i = 0; i < VectorizedArray::size(); ++i) + { + for (int d = 0; d < dim; ++d) + if (p[d][i] < -0.5) + { + return_value[i] = 100.0; + break; + } + } - template - template - number Coefficient::average_value( - const std::vector> &points) const - { - number average(0); - for (unsigned int i = 0; i < points.size(); ++i) - average += value(points[i]); - average /= points.size(); + return return_value; +} - return average; - } +template +template +number Coefficient::average_value( + const std::vector> &points) const +{ + number average(0); + for (unsigned int i = 0; i < points.size(); ++i) + average += value(points[i]); + average /= points.size(); - template - template - std::shared_ptr>> - Coefficient::make_coefficient_table( - const MatrixFree> &mf_storage) const - { - auto coefficient_table = - std::make_shared>>(); + return average; +} - FEEvaluation fe_eval(mf_storage); - const unsigned int n_cells = mf_storage.n_cell_batches(); - coefficient_table->reinit(n_cells, 1); +template +template +std::shared_ptr>> +Coefficient::make_coefficient_table( + const MatrixFree> &mf_storage) const +{ + auto coefficient_table = + std::make_shared>>(); - for (unsigned int cell = 0; cell < n_cells; ++cell) - { - fe_eval.reinit(cell); + FEEvaluation fe_eval(mf_storage); - VectorizedArray average_value = 0.; - for (const unsigned int q : fe_eval.quadrature_point_indices()) - average_value += value(fe_eval.quadrature_point(q)); - average_value /= fe_eval.n_q_points; + const unsigned int n_cells = mf_storage.n_cell_batches(); - (*coefficient_table)(cell, 0) = average_value; - } + coefficient_table->reinit(n_cells, 1); - return coefficient_table; - } + for (unsigned int cell = 0; cell < n_cells; ++cell) + { + fe_eval.reinit(cell); + VectorizedArray average_value = 0.; + for (const unsigned int q : fe_eval.quadrature_point_indices()) + average_value += value(fe_eval.quadrature_point(q)); + average_value /= fe_eval.n_q_points; + (*coefficient_table)(cell, 0) = average_value; + } - // @sect3{Run time parameters} + return coefficient_table; +} - // We will use ParameterHandler to pass in parameters at runtime. The - // structure @p Settings parses and stores these parameters to be queried - // throughout the program. - struct Settings - { - bool try_parse(const std::string &prm_filename); - enum SolverType - { - gmg_mb, - gmg_mf, - amg - }; - - SolverType solver; - - int dimension; - double smoother_dampen; - unsigned int smoother_steps; - unsigned int n_steps; - bool output; - }; +// @sect3{Run time parameters} +// We will use ParameterHandler to pass in parameters at runtime. The +// structure @p Settings parses and stores these parameters to be queried +// throughout the program. +struct Settings +{ + bool try_parse(const std::string &prm_filename); - bool Settings::try_parse(const std::string &prm_filename) + enum SolverType { - ParameterHandler prm; - prm.declare_entry("dim", - "2", - Patterns::Integer(), - "The problem dimension."); - prm.declare_entry("n_steps", - "10", - Patterns::Integer(0), - "Number of adaptive refinement steps."); - prm.declare_entry("smoother dampen", - "1.0", - Patterns::Double(0.0), - "Dampen factor for the smoother."); - prm.declare_entry("smoother steps", - "1", - Patterns::Integer(1), - "Number of smoother steps."); - prm.declare_entry("solver", - "MF", - Patterns::Selection("MF|MB|AMG"), - "Switch between matrix-free GMG, " - "matrix-based GMG, and AMG."); - prm.declare_entry("output", - "false", - Patterns::Bool(), - "Output graphical results."); - - if (prm_filename.empty()) - { - std::cout - << "**** Error: No input file provided!\n" - << "**** Error: Call this program as './step-50 input.prm\n" - << '\n' - << "**** You may want to use one of the input files in this\n" - << "**** directory, or use the following default values\n" - << "**** to create an input file:\n"; - if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) - prm.print_parameters(std::cout, ParameterHandler::Text); - return false; - } + gmg_mb, + gmg_mf, + amg + }; - try - { - prm.parse_input(prm_filename); - } - catch (std::exception &e) - { - if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) - std::cerr << e.what() << std::endl; - return false; - } + SolverType solver; - if (prm.get("solver") == "MF") - this->solver = gmg_mf; - else if (prm.get("solver") == "MB") - this->solver = gmg_mb; - else if (prm.get("solver") == "AMG") - this->solver = amg; - else - AssertThrow(false, ExcNotImplemented()); - - this->dimension = prm.get_integer("dim"); - this->n_steps = prm.get_integer("n_steps"); - this->smoother_dampen = prm.get_double("smoother dampen"); - this->smoother_steps = prm.get_integer("smoother steps"); - this->output = prm.get_bool("output"); - - return true; - } + int dimension; + double smoother_dampen; + unsigned int smoother_steps; + unsigned int n_steps; + bool output; +}; - // @sect3{LaplaceProblem class} +bool Settings::try_parse(const std::string &prm_filename) +{ + ParameterHandler prm; + prm.declare_entry("dim", "2", Patterns::Integer(), "The problem dimension."); + prm.declare_entry("n_steps", + "10", + Patterns::Integer(0), + "Number of adaptive refinement steps."); + prm.declare_entry("smoother dampen", + "1.0", + Patterns::Double(0.0), + "Dampen factor for the smoother."); + prm.declare_entry("smoother steps", + "1", + Patterns::Integer(1), + "Number of smoother steps."); + prm.declare_entry("solver", + "MF", + Patterns::Selection("MF|MB|AMG"), + "Switch between matrix-free GMG, " + "matrix-based GMG, and AMG."); + prm.declare_entry("output", + "false", + Patterns::Bool(), + "Output graphical results."); + + if (prm_filename.empty()) + { + std::cout << "**** Error: No input file provided!\n" + << "**** Error: Call this program as './step-50 input.prm\n" + << '\n' + << "**** You may want to use one of the input files in this\n" + << "**** directory, or use the following default values\n" + << "**** to create an input file:\n"; + if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) + prm.print_parameters(std::cout, ParameterHandler::Text); + return false; + } - // This is the main class of the program. It looks very similar to - // step-16, step-37, and step-40. For the MatrixFree setup, we use the - // MatrixFreeOperators::LaplaceOperator class which defines `local_apply()`, - // `compute_diagonal()`, and `set_coefficient()` functions internally. Note - // that the polynomial degree is a template parameter of this class. This is - // necessary for the matrix-free code. - template - class LaplaceProblem - { - public: - LaplaceProblem(const Settings &settings); - void run(); - - private: - // We will use the following types throughout the program. First the - // matrix-based types, after that the matrix-free classes. For the - // matrix-free implementation, we use @p float for the level operators. - using MatrixType = LA::MPI::SparseMatrix; - using VectorType = LA::MPI::Vector; - using PreconditionAMG = LA::MPI::PreconditionAMG; - - using MatrixFreeLevelMatrix = MatrixFreeOperators::LaplaceOperator< - dim, - degree, - degree + 1, - 1, - LinearAlgebra::distributed::Vector>; - using MatrixFreeActiveMatrix = MatrixFreeOperators::LaplaceOperator< - dim, - degree, - degree + 1, - 1, - LinearAlgebra::distributed::Vector>; - - using MatrixFreeLevelVector = LinearAlgebra::distributed::Vector; - using MatrixFreeActiveVector = LinearAlgebra::distributed::Vector; - - void setup_system(); - void setup_multigrid(); - void assemble_system(); - void assemble_multigrid(); - void assemble_rhs(); - void solve(); - void estimate(); - void refine_grid(); - void output_results(const unsigned int cycle); - - Settings settings; - - MPI_Comm mpi_communicator; - ConditionalOStream pcout; - - parallel::distributed::Triangulation triangulation; - const MappingQ1 mapping; - const FE_Q fe; - - DoFHandler dof_handler; - - IndexSet locally_owned_dofs; - IndexSet locally_relevant_dofs; - AffineConstraints constraints; - - MatrixType system_matrix; - MatrixFreeActiveMatrix mf_system_matrix; - VectorType solution; - VectorType right_hand_side; - Vector estimated_error_square_per_cell; - - MGLevelObject mg_matrix; - MGLevelObject mg_interface_in; - MGConstrainedDoFs mg_constrained_dofs; - - MGLevelObject mf_mg_matrix; - - TimerOutput computing_timer; - }; + try + { + prm.parse_input(prm_filename); + } + catch (std::exception &e) + { + if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) + std::cerr << e.what() << std::endl; + return false; + } + if (prm.get("solver") == "MF") + this->solver = gmg_mf; + else if (prm.get("solver") == "MB") + this->solver = gmg_mb; + else if (prm.get("solver") == "AMG") + this->solver = amg; + else + AssertThrow(false, ExcNotImplemented()); + + this->dimension = prm.get_integer("dim"); + this->n_steps = prm.get_integer("n_steps"); + this->smoother_dampen = prm.get_double("smoother dampen"); + this->smoother_steps = prm.get_integer("smoother steps"); + this->output = prm.get_bool("output"); + + return true; +} - // The only interesting part about the constructor is that we construct the - // multigrid hierarchy unless we use AMG. For that, we need to parse the - // run time parameters before this constructor completes. - template - LaplaceProblem::LaplaceProblem(const Settings &settings) - : settings(settings) - , mpi_communicator(MPI_COMM_WORLD) - , pcout(std::cout, - (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)) - , triangulation( - mpi_communicator, - Triangulation::limit_level_difference_at_vertices, - (settings.solver == Settings::amg) ? - parallel::distributed::Triangulation::default_setting : - parallel::distributed::Triangulation< - dim>::construct_multigrid_hierarchy) - , mapping() - , fe(degree) - , dof_handler(triangulation) - , computing_timer(pcout, TimerOutput::never, TimerOutput::wall_times) - { - GridGenerator::hyper_L(triangulation, -1., 1., /*colorize*/ false); - triangulation.refine_global(1); - } +// @sect3{LaplaceProblem class} - // @sect4{LaplaceProblem::setup_system()} +// This is the main class of the program. It looks very similar to +// step-16, step-37, and step-40. For the MatrixFree setup, we use the +// MatrixFreeOperators::LaplaceOperator class which defines `local_apply()`, +// `compute_diagonal()`, and `set_coefficient()` functions internally. Note that +// the polynomial degree is a template parameter of this class. This is +// necessary for the matrix-free code. +template +class LaplaceProblem +{ +public: + LaplaceProblem(const Settings &settings); + void run(); + +private: + // We will use the following types throughout the program. First the + // matrix-based types, after that the matrix-free classes. For the + // matrix-free implementation, we use @p float for the level operators. + using MatrixType = LA::MPI::SparseMatrix; + using VectorType = LA::MPI::Vector; + using PreconditionAMG = LA::MPI::PreconditionAMG; + + using MatrixFreeLevelMatrix = MatrixFreeOperators::LaplaceOperator< + dim, + degree, + degree + 1, + 1, + LinearAlgebra::distributed::Vector>; + using MatrixFreeActiveMatrix = MatrixFreeOperators::LaplaceOperator< + dim, + degree, + degree + 1, + 1, + LinearAlgebra::distributed::Vector>; + + using MatrixFreeLevelVector = LinearAlgebra::distributed::Vector; + using MatrixFreeActiveVector = LinearAlgebra::distributed::Vector; + + void setup_system(); + void setup_multigrid(); + void assemble_system(); + void assemble_multigrid(); + void assemble_rhs(); + void solve(); + void estimate(); + void refine_grid(); + void output_results(const unsigned int cycle); - // Unlike step-16 and step-37, we split the set up into two parts, - // setup_system() and setup_multigrid(). Here is the typical setup_system() - // function for the active mesh found in most tutorials. For matrix-free, the - // active mesh set up is similar to step-37; for matrix-based (GMG and AMG - // solvers), the setup is similar to step-40. - template - void LaplaceProblem::setup_system() - { - TimerOutput::Scope timing(computing_timer, "Setup"); + Settings settings; - dof_handler.distribute_dofs(fe); + MPI_Comm mpi_communicator; + ConditionalOStream pcout; + + parallel::distributed::Triangulation triangulation; + const MappingQ1 mapping; + const FE_Q fe; + + DoFHandler dof_handler; + + IndexSet locally_owned_dofs; + IndexSet locally_relevant_dofs; + AffineConstraints constraints; + + MatrixType system_matrix; + MatrixFreeActiveMatrix mf_system_matrix; + VectorType solution; + VectorType right_hand_side; + Vector estimated_error_square_per_cell; + + MGLevelObject mg_matrix; + MGLevelObject mg_interface_in; + MGConstrainedDoFs mg_constrained_dofs; + + MGLevelObject mf_mg_matrix; + + TimerOutput computing_timer; +}; + + +// The only interesting part about the constructor is that we construct the +// multigrid hierarchy unless we use AMG. For that, we need to parse the +// run time parameters before this constructor completes. +template +LaplaceProblem::LaplaceProblem(const Settings &settings) + : settings(settings) + , mpi_communicator(MPI_COMM_WORLD) + , pcout(std::cout, (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)) + , triangulation(mpi_communicator, + Triangulation::limit_level_difference_at_vertices, + (settings.solver == Settings::amg) ? + parallel::distributed::Triangulation::default_setting : + parallel::distributed::Triangulation< + dim>::construct_multigrid_hierarchy) + , mapping() + , fe(degree) + , dof_handler(triangulation) + , computing_timer(pcout, TimerOutput::never, TimerOutput::wall_times) +{ + GridGenerator::hyper_L(triangulation, -1., 1., /*colorize*/ false); + triangulation.refine_global(1); +} - locally_relevant_dofs = - DoFTools::extract_locally_relevant_dofs(dof_handler); - locally_owned_dofs = dof_handler.locally_owned_dofs(); - solution.reinit(locally_owned_dofs, mpi_communicator); - right_hand_side.reinit(locally_owned_dofs, mpi_communicator); - constraints.reinit(locally_owned_dofs, locally_relevant_dofs); - DoFTools::make_hanging_node_constraints(dof_handler, constraints); - VectorTools::interpolate_boundary_values( - mapping, dof_handler, 0, Functions::ZeroFunction(), constraints); - constraints.close(); +// @sect4{LaplaceProblem::setup_system()} - switch (settings.solver) - { - case Settings::gmg_mf: - { - typename MatrixFree::AdditionalData additional_data; - additional_data.tasks_parallel_scheme = - MatrixFree::AdditionalData::none; - additional_data.mapping_update_flags = - (update_gradients | update_JxW_values | update_quadrature_points); - std::shared_ptr> mf_storage = - std::make_shared>(); - mf_storage->reinit(mapping, - dof_handler, - constraints, - QGauss<1>(degree + 1), - additional_data); - - mf_system_matrix.initialize(mf_storage); - - const Coefficient coefficient; - mf_system_matrix.set_coefficient( - coefficient.make_coefficient_table(*mf_storage)); +// Unlike step-16 and step-37, we split the set up into two parts, +// setup_system() and setup_multigrid(). Here is the typical setup_system() +// function for the active mesh found in most tutorials. For matrix-free, the +// active mesh set up is similar to step-37; for matrix-based (GMG and AMG +// solvers), the setup is similar to step-40. +template +void LaplaceProblem::setup_system() +{ + TimerOutput::Scope timing(computing_timer, "Setup"); - break; - } + dof_handler.distribute_dofs(fe); - case Settings::gmg_mb: - case Settings::amg: - { + locally_relevant_dofs = DoFTools::extract_locally_relevant_dofs(dof_handler); + locally_owned_dofs = dof_handler.locally_owned_dofs(); + + solution.reinit(locally_owned_dofs, mpi_communicator); + right_hand_side.reinit(locally_owned_dofs, mpi_communicator); + constraints.reinit(locally_owned_dofs, locally_relevant_dofs); + DoFTools::make_hanging_node_constraints(dof_handler, constraints); + + VectorTools::interpolate_boundary_values( + mapping, dof_handler, 0, Functions::ZeroFunction(), constraints); + constraints.close(); + + switch (settings.solver) + { + case Settings::gmg_mf: + { + typename MatrixFree::AdditionalData additional_data; + additional_data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + additional_data.mapping_update_flags = + (update_gradients | update_JxW_values | update_quadrature_points); + std::shared_ptr> mf_storage = + std::make_shared>(); + mf_storage->reinit(mapping, + dof_handler, + constraints, + QGauss<1>(degree + 1), + additional_data); + + mf_system_matrix.initialize(mf_storage); + + const Coefficient coefficient; + mf_system_matrix.set_coefficient( + coefficient.make_coefficient_table(*mf_storage)); + + break; + } + + case Settings::gmg_mb: + case Settings::amg: + { #ifdef USE_PETSC_LA - DynamicSparsityPattern dsp(locally_relevant_dofs); - DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints); - - SparsityTools::distribute_sparsity_pattern(dsp, - locally_owned_dofs, - mpi_communicator, - locally_relevant_dofs); - - system_matrix.reinit(locally_owned_dofs, - locally_owned_dofs, - dsp, - mpi_communicator); + DynamicSparsityPattern dsp(locally_relevant_dofs); + DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints); + + SparsityTools::distribute_sparsity_pattern(dsp, + locally_owned_dofs, + mpi_communicator, + locally_relevant_dofs); + + system_matrix.reinit(locally_owned_dofs, + locally_owned_dofs, + dsp, + mpi_communicator); #else - TrilinosWrappers::SparsityPattern dsp(locally_owned_dofs, - locally_owned_dofs, - locally_relevant_dofs, - mpi_communicator); - DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints); - dsp.compress(); - system_matrix.reinit(dsp); + TrilinosWrappers::SparsityPattern dsp(locally_owned_dofs, + locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints); + dsp.compress(); + system_matrix.reinit(dsp); #endif - break; - } + break; + } - default: - DEAL_II_NOT_IMPLEMENTED(); - } - } + default: + DEAL_II_NOT_IMPLEMENTED(); + } +} - // @sect4{LaplaceProblem::setup_multigrid()} - - // This function does the multilevel setup for both matrix-free and - // matrix-based GMG. The matrix-free setup is similar to that of step-37, and - // the matrix-based is similar to step-16, except we must use appropriate - // distributed sparsity patterns. - // - // The function is not called for the AMG approach, but to err on the - // safe side, the main `switch` statement of this function - // nevertheless makes sure that the function only operates on known - // multigrid settings by throwing an assertion if the function were - // called for anything other than the two geometric multigrid methods. - template - void LaplaceProblem::setup_multigrid() - { - TimerOutput::Scope timing(computing_timer, "Setup multigrid"); +// @sect4{LaplaceProblem::setup_multigrid()} + +// This function does the multilevel setup for both matrix-free and +// matrix-based GMG. The matrix-free setup is similar to that of step-37, and +// the matrix-based is similar to step-16, except we must use appropriate +// distributed sparsity patterns. +// +// The function is not called for the AMG approach, but to err on the +// safe side, the main `switch` statement of this function +// nevertheless makes sure that the function only operates on known +// multigrid settings by throwing an assertion if the function were +// called for anything other than the two geometric multigrid methods. +template +void LaplaceProblem::setup_multigrid() +{ + TimerOutput::Scope timing(computing_timer, "Setup multigrid"); - dof_handler.distribute_mg_dofs(); + dof_handler.distribute_mg_dofs(); - mg_constrained_dofs.clear(); - mg_constrained_dofs.initialize(dof_handler); + mg_constrained_dofs.clear(); + mg_constrained_dofs.initialize(dof_handler); - const std::set boundary_ids = {types::boundary_id(0)}; - mg_constrained_dofs.make_zero_boundary_constraints(dof_handler, - boundary_ids); + const std::set boundary_ids = {types::boundary_id(0)}; + mg_constrained_dofs.make_zero_boundary_constraints(dof_handler, boundary_ids); - const unsigned int n_levels = triangulation.n_global_levels(); + const unsigned int n_levels = triangulation.n_global_levels(); - switch (settings.solver) - { - case Settings::gmg_mf: - { - mf_mg_matrix.resize(0, n_levels - 1); + switch (settings.solver) + { + case Settings::gmg_mf: + { + mf_mg_matrix.resize(0, n_levels - 1); - for (unsigned int level = 0; level < n_levels; ++level) - { - AffineConstraints level_constraints( - dof_handler.locally_owned_mg_dofs(level), - DoFTools::extract_locally_relevant_level_dofs(dof_handler, - level)); - for (const types::global_dof_index dof_index : - mg_constrained_dofs.get_boundary_indices(level)) - level_constraints.constrain_dof_to_zero(dof_index); - level_constraints.close(); - - typename MatrixFree::AdditionalData additional_data; - additional_data.tasks_parallel_scheme = - MatrixFree::AdditionalData::none; - additional_data.mapping_update_flags = - (update_gradients | update_JxW_values | - update_quadrature_points); - additional_data.mg_level = level; - std::shared_ptr> mf_storage_level( - new MatrixFree()); - mf_storage_level->reinit(mapping, - dof_handler, - level_constraints, - QGauss<1>(degree + 1), - additional_data); - - mf_mg_matrix[level].initialize(mf_storage_level, - mg_constrained_dofs, - level); - - const Coefficient coefficient; - mf_mg_matrix[level].set_coefficient( - coefficient.make_coefficient_table(*mf_storage_level)); - - mf_mg_matrix[level].compute_diagonal(); - } + for (unsigned int level = 0; level < n_levels; ++level) + { + AffineConstraints level_constraints( + dof_handler.locally_owned_mg_dofs(level), + DoFTools::extract_locally_relevant_level_dofs(dof_handler, + level)); + for (const types::global_dof_index dof_index : + mg_constrained_dofs.get_boundary_indices(level)) + level_constraints.constrain_dof_to_zero(dof_index); + level_constraints.close(); + + typename MatrixFree::AdditionalData additional_data; + additional_data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + additional_data.mapping_update_flags = + (update_gradients | update_JxW_values | + update_quadrature_points); + additional_data.mg_level = level; + std::shared_ptr> mf_storage_level( + new MatrixFree()); + mf_storage_level->reinit(mapping, + dof_handler, + level_constraints, + QGauss<1>(degree + 1), + additional_data); + + mf_mg_matrix[level].initialize(mf_storage_level, + mg_constrained_dofs, + level); + + const Coefficient coefficient; + mf_mg_matrix[level].set_coefficient( + coefficient.make_coefficient_table(*mf_storage_level)); + + mf_mg_matrix[level].compute_diagonal(); + } - break; - } + break; + } - case Settings::gmg_mb: - { - mg_matrix.resize(0, n_levels - 1); - mg_matrix.clear_elements(); - mg_interface_in.resize(0, n_levels - 1); - mg_interface_in.clear_elements(); + case Settings::gmg_mb: + { + mg_matrix.resize(0, n_levels - 1); + mg_matrix.clear_elements(); + mg_interface_in.resize(0, n_levels - 1); + mg_interface_in.clear_elements(); - for (unsigned int level = 0; level < n_levels; ++level) - { - const IndexSet dof_set = - DoFTools::extract_locally_relevant_level_dofs(dof_handler, - level); + for (unsigned int level = 0; level < n_levels; ++level) + { + const IndexSet dof_set = + DoFTools::extract_locally_relevant_level_dofs(dof_handler, + level); - { + { #ifdef USE_PETSC_LA - DynamicSparsityPattern dsp(dof_set); - MGTools::make_sparsity_pattern(dof_handler, dsp, level); - dsp.compress(); - SparsityTools::distribute_sparsity_pattern( - dsp, - dof_handler.locally_owned_mg_dofs(level), - mpi_communicator, - dof_set); - - mg_matrix[level].reinit( - dof_handler.locally_owned_mg_dofs(level), - dof_handler.locally_owned_mg_dofs(level), - dsp, - mpi_communicator); + DynamicSparsityPattern dsp(dof_set); + MGTools::make_sparsity_pattern(dof_handler, dsp, level); + dsp.compress(); + SparsityTools::distribute_sparsity_pattern( + dsp, + dof_handler.locally_owned_mg_dofs(level), + mpi_communicator, + dof_set); + + mg_matrix[level].reinit( + dof_handler.locally_owned_mg_dofs(level), + dof_handler.locally_owned_mg_dofs(level), + dsp, + mpi_communicator); #else - TrilinosWrappers::SparsityPattern dsp( - dof_handler.locally_owned_mg_dofs(level), - dof_handler.locally_owned_mg_dofs(level), - dof_set, - mpi_communicator); - MGTools::make_sparsity_pattern(dof_handler, dsp, level); - - dsp.compress(); - mg_matrix[level].reinit(dsp); + TrilinosWrappers::SparsityPattern dsp( + dof_handler.locally_owned_mg_dofs(level), + dof_handler.locally_owned_mg_dofs(level), + dof_set, + mpi_communicator); + MGTools::make_sparsity_pattern(dof_handler, dsp, level); + + dsp.compress(); + mg_matrix[level].reinit(dsp); #endif - } + } - { + { #ifdef USE_PETSC_LA - DynamicSparsityPattern dsp(dof_set); - MGTools::make_interface_sparsity_pattern(dof_handler, - mg_constrained_dofs, - dsp, - level); - dsp.compress(); - SparsityTools::distribute_sparsity_pattern( - dsp, - dof_handler.locally_owned_mg_dofs(level), - mpi_communicator, - dof_set); - - mg_interface_in[level].reinit( - dof_handler.locally_owned_mg_dofs(level), - dof_handler.locally_owned_mg_dofs(level), - dsp, - mpi_communicator); + DynamicSparsityPattern dsp(dof_set); + MGTools::make_interface_sparsity_pattern(dof_handler, + mg_constrained_dofs, + dsp, + level); + dsp.compress(); + SparsityTools::distribute_sparsity_pattern( + dsp, + dof_handler.locally_owned_mg_dofs(level), + mpi_communicator, + dof_set); + + mg_interface_in[level].reinit( + dof_handler.locally_owned_mg_dofs(level), + dof_handler.locally_owned_mg_dofs(level), + dsp, + mpi_communicator); #else - TrilinosWrappers::SparsityPattern dsp( - dof_handler.locally_owned_mg_dofs(level), - dof_handler.locally_owned_mg_dofs(level), - dof_set, - mpi_communicator); - - MGTools::make_interface_sparsity_pattern(dof_handler, - mg_constrained_dofs, - dsp, - level); - dsp.compress(); - mg_interface_in[level].reinit(dsp); + TrilinosWrappers::SparsityPattern dsp( + dof_handler.locally_owned_mg_dofs(level), + dof_handler.locally_owned_mg_dofs(level), + dof_set, + mpi_communicator); + + MGTools::make_interface_sparsity_pattern(dof_handler, + mg_constrained_dofs, + dsp, + level); + dsp.compress(); + mg_interface_in[level].reinit(dsp); #endif - } } - break; - } - - default: - DEAL_II_NOT_IMPLEMENTED(); - } - } - - - // @sect4{LaplaceProblem::assemble_system()} + } + break; + } - // The assembly is split into three parts: `assemble_system()`, - // `assemble_multigrid()`, and `assemble_rhs()`. The - // `assemble_system()` function here assembles and stores the (global) - // system matrix and the right-hand side for the matrix-based - // methods. It is similar to the assembly in step-40. - // - // Note that the matrix-free method does not execute this function as it does - // not need to assemble a matrix, and it will instead assemble the right-hand - // side in assemble_rhs(). - template - void LaplaceProblem::assemble_system() - { - TimerOutput::Scope timing(computing_timer, "Assemble"); + default: + DEAL_II_NOT_IMPLEMENTED(); + } +} - const QGauss quadrature_formula(degree + 1); - FEValues fe_values(fe, - quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); +// @sect4{LaplaceProblem::assemble_system()} - const unsigned int dofs_per_cell = fe.n_dofs_per_cell(); - const unsigned int n_q_points = quadrature_formula.size(); +// The assembly is split into three parts: `assemble_system()`, +// `assemble_multigrid()`, and `assemble_rhs()`. The +// `assemble_system()` function here assembles and stores the (global) +// system matrix and the right-hand side for the matrix-based +// methods. It is similar to the assembly in step-40. +// +// Note that the matrix-free method does not execute this function as it does +// not need to assemble a matrix, and it will instead assemble the right-hand +// side in assemble_rhs(). +template +void LaplaceProblem::assemble_system() +{ + TimerOutput::Scope timing(computing_timer, "Assemble"); - FullMatrix cell_matrix(dofs_per_cell, dofs_per_cell); - Vector cell_rhs(dofs_per_cell); + const QGauss quadrature_formula(degree + 1); - std::vector local_dof_indices(dofs_per_cell); + FEValues fe_values(fe, + quadrature_formula, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); - const Coefficient coefficient; - RightHandSide rhs; - std::vector rhs_values(n_q_points); + const unsigned int dofs_per_cell = fe.n_dofs_per_cell(); + const unsigned int n_q_points = quadrature_formula.size(); - for (const auto &cell : dof_handler.active_cell_iterators()) - if (cell->is_locally_owned()) - { - cell_matrix = 0; - cell_rhs = 0; + FullMatrix cell_matrix(dofs_per_cell, dofs_per_cell); + Vector cell_rhs(dofs_per_cell); - fe_values.reinit(cell); + std::vector local_dof_indices(dofs_per_cell); - const double coefficient_value = - coefficient.average_value(fe_values.get_quadrature_points()); - rhs.value_list(fe_values.get_quadrature_points(), rhs_values); + const Coefficient coefficient; + RightHandSide rhs; + std::vector rhs_values(n_q_points); - for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) - for (unsigned int i = 0; i < dofs_per_cell; ++i) - { - for (unsigned int j = 0; j < dofs_per_cell; ++j) - cell_matrix(i, j) += - coefficient_value * // epsilon(x) - fe_values.shape_grad(i, q_point) * // * grad phi_i(x) - fe_values.shape_grad(j, q_point) * // * grad phi_j(x) - fe_values.JxW(q_point); // * dx - - cell_rhs(i) += - fe_values.shape_value(i, q_point) * // grad phi_i(x) - rhs_values[q_point] * // * f(x) - fe_values.JxW(q_point); // * dx - } + for (const auto &cell : dof_handler.active_cell_iterators()) + if (cell->is_locally_owned()) + { + cell_matrix = 0; + cell_rhs = 0; - cell->get_dof_indices(local_dof_indices); - constraints.distribute_local_to_global(cell_matrix, - cell_rhs, - local_dof_indices, - system_matrix, - right_hand_side); - } + fe_values.reinit(cell); - system_matrix.compress(VectorOperation::add); - right_hand_side.compress(VectorOperation::add); - } + const double coefficient_value = + coefficient.average_value(fe_values.get_quadrature_points()); + rhs.value_list(fe_values.get_quadrature_points(), rhs_values); + for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) + for (unsigned int i = 0; i < dofs_per_cell; ++i) + { + for (unsigned int j = 0; j < dofs_per_cell; ++j) + cell_matrix(i, j) += + coefficient_value * // epsilon(x) + fe_values.shape_grad(i, q_point) * // * grad phi_i(x) + fe_values.shape_grad(j, q_point) * // * grad phi_j(x) + fe_values.JxW(q_point); // * dx + + cell_rhs(i) += + fe_values.shape_value(i, q_point) * // grad phi_i(x) + rhs_values[q_point] * // * f(x) + fe_values.JxW(q_point); // * dx + } - // @sect4{LaplaceProblem::assemble_multigrid()} + cell->get_dof_indices(local_dof_indices); + constraints.distribute_local_to_global(cell_matrix, + cell_rhs, + local_dof_indices, + system_matrix, + right_hand_side); + } - // The following function assembles and stores the multilevel matrices for the - // matrix-based GMG method. This function is similar to the one found in - // step-16, only here it works for distributed meshes. This difference amounts - // to adding a condition that we only assemble on locally owned level cells - // and a call to compress() for each matrix that is built. - template - void LaplaceProblem::assemble_multigrid() - { - TimerOutput::Scope timing(computing_timer, "Assemble multigrid"); + system_matrix.compress(VectorOperation::add); + right_hand_side.compress(VectorOperation::add); +} - const QGauss quadrature_formula(degree + 1); - FEValues fe_values(fe, - quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); +// @sect4{LaplaceProblem::assemble_multigrid()} - const unsigned int dofs_per_cell = fe.n_dofs_per_cell(); - const unsigned int n_q_points = quadrature_formula.size(); +// The following function assembles and stores the multilevel matrices for the +// matrix-based GMG method. This function is similar to the one found in +// step-16, only here it works for distributed meshes. This difference amounts +// to adding a condition that we only assemble on locally owned level cells and +// a call to compress() for each matrix that is built. +template +void LaplaceProblem::assemble_multigrid() +{ + TimerOutput::Scope timing(computing_timer, "Assemble multigrid"); - FullMatrix cell_matrix(dofs_per_cell, dofs_per_cell); + const QGauss quadrature_formula(degree + 1); - std::vector local_dof_indices(dofs_per_cell); + FEValues fe_values(fe, + quadrature_formula, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); - const Coefficient coefficient; + const unsigned int dofs_per_cell = fe.n_dofs_per_cell(); + const unsigned int n_q_points = quadrature_formula.size(); - std::vector> boundary_constraints( - triangulation.n_global_levels()); - for (unsigned int level = 0; level < triangulation.n_global_levels(); - ++level) - { - boundary_constraints[level].reinit( - dof_handler.locally_owned_mg_dofs(level), - DoFTools::extract_locally_relevant_level_dofs(dof_handler, level)); - - for (const types::global_dof_index dof_index : - mg_constrained_dofs.get_refinement_edge_indices(level)) - boundary_constraints[level].constrain_dof_to_zero(dof_index); - for (const types::global_dof_index dof_index : - mg_constrained_dofs.get_boundary_indices(level)) - boundary_constraints[level].constrain_dof_to_zero(dof_index); - boundary_constraints[level].close(); - } + FullMatrix cell_matrix(dofs_per_cell, dofs_per_cell); - for (const auto &cell : dof_handler.cell_iterators()) - if (cell->level_subdomain_id() == triangulation.locally_owned_subdomain()) - { - cell_matrix = 0; - fe_values.reinit(cell); + std::vector local_dof_indices(dofs_per_cell); - const double coefficient_value = - coefficient.average_value(fe_values.get_quadrature_points()); + const Coefficient coefficient; - for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) - for (unsigned int i = 0; i < dofs_per_cell; ++i) - for (unsigned int j = 0; j < dofs_per_cell; ++j) - cell_matrix(i, j) += - coefficient_value * fe_values.shape_grad(i, q_point) * - fe_values.shape_grad(j, q_point) * fe_values.JxW(q_point); + std::vector> boundary_constraints( + triangulation.n_global_levels()); + for (unsigned int level = 0; level < triangulation.n_global_levels(); ++level) + { + boundary_constraints[level].reinit( + dof_handler.locally_owned_mg_dofs(level), + DoFTools::extract_locally_relevant_level_dofs(dof_handler, level)); + + for (const types::global_dof_index dof_index : + mg_constrained_dofs.get_refinement_edge_indices(level)) + boundary_constraints[level].constrain_dof_to_zero(dof_index); + for (const types::global_dof_index dof_index : + mg_constrained_dofs.get_boundary_indices(level)) + boundary_constraints[level].constrain_dof_to_zero(dof_index); + boundary_constraints[level].close(); + } - cell->get_mg_dof_indices(local_dof_indices); + for (const auto &cell : dof_handler.cell_iterators()) + if (cell->level_subdomain_id() == triangulation.locally_owned_subdomain()) + { + cell_matrix = 0; + fe_values.reinit(cell); - boundary_constraints[cell->level()].distribute_local_to_global( - cell_matrix, local_dof_indices, mg_matrix[cell->level()]); + const double coefficient_value = + coefficient.average_value(fe_values.get_quadrature_points()); + for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) for (unsigned int i = 0; i < dofs_per_cell; ++i) for (unsigned int j = 0; j < dofs_per_cell; ++j) - if (mg_constrained_dofs.is_interface_matrix_entry( - cell->level(), local_dof_indices[i], local_dof_indices[j])) - mg_interface_in[cell->level()].add(local_dof_indices[i], - local_dof_indices[j], - cell_matrix(i, j)); - } - - for (unsigned int i = 0; i < triangulation.n_global_levels(); ++i) - { - mg_matrix[i].compress(VectorOperation::add); - mg_interface_in[i].compress(VectorOperation::add); + cell_matrix(i, j) += + coefficient_value * fe_values.shape_grad(i, q_point) * + fe_values.shape_grad(j, q_point) * fe_values.JxW(q_point); + + cell->get_mg_dof_indices(local_dof_indices); + + boundary_constraints[cell->level()].distribute_local_to_global( + cell_matrix, local_dof_indices, mg_matrix[cell->level()]); + + for (unsigned int i = 0; i < dofs_per_cell; ++i) + for (unsigned int j = 0; j < dofs_per_cell; ++j) + if (mg_constrained_dofs.is_interface_matrix_entry( + cell->level(), local_dof_indices[i], local_dof_indices[j])) + mg_interface_in[cell->level()].add(local_dof_indices[i], + local_dof_indices[j], + cell_matrix(i, j)); } - } + for (unsigned int i = 0; i < triangulation.n_global_levels(); ++i) + { + mg_matrix[i].compress(VectorOperation::add); + mg_interface_in[i].compress(VectorOperation::add); + } +} - // @sect4{LaplaceProblem::assemble_rhs()} - - // The final function in this triptych assembles the right-hand side - // vector for the matrix-free method -- because in the matrix-free - // framework, we don't have to assemble the matrix and can get away - // with only assembling the right hand side. We could do this by extracting - // the code from the `assemble_system()` function above that deals with the - // right hand side, but we decide instead to go all in on the matrix-free - // approach and do the assembly using that way as well. - // - // The result is a function that is similar - // to the one found in the "Use FEEvaluation::read_dof_values_plain() - // to avoid resolving constraints" subsection in the "Possibilities - // for extensions" section of step-37. - // - // The reason for this function is that the MatrixFree operators do not take - // into account non-homogeneous Dirichlet constraints, instead treating all - // Dirichlet constraints as homogeneous. To account for this, the right-hand - // side here is assembled as the residual $r_0 = f-Au_0$, where $u_0$ is a - // zero vector except in the Dirichlet values. Then when solving, we have that - // the solution is $u = u_0 + A^{-1}r_0$. This can be seen as a Newton - // iteration on a linear system with initial guess $u_0$. The CG solve in the - // `solve()` function below computes $A^{-1}r_0$ and the call to - // `constraints.distribute()` (which directly follows) adds the $u_0$. - // - // Obviously, since we are considering a problem with zero Dirichlet boundary, - // we could have taken a similar approach to step-37 `assemble_rhs()`, but - // this additional work allows us to change the problem declaration if we so - // choose. - // - // This function has two parts in the integration loop: applying the negative - // of matrix $A$ to $u_0$ by submitting the negative of the gradient, and - // adding the right-hand side contribution by submitting the value $f$. We - // must be sure to use `read_dof_values_plain()` for evaluating $u_0$ as - // `read_dof_values()` would set all Dirichlet values to zero. - // - // Finally, the system_rhs vector is of type LA::MPI::Vector, but the - // MatrixFree class only work for - // LinearAlgebra::distributed::Vector. Therefore we must - // compute the right-hand side using MatrixFree functionality and then - // use the functions in the `ChangeVectorType` namespace to copy it to - // the correct type. - template - void LaplaceProblem::assemble_rhs() - { - TimerOutput::Scope timing(computing_timer, "Assemble right-hand side"); - MatrixFreeActiveVector solution_copy; - MatrixFreeActiveVector right_hand_side_copy; - mf_system_matrix.initialize_dof_vector(solution_copy); - mf_system_matrix.initialize_dof_vector(right_hand_side_copy); +// @sect4{LaplaceProblem::assemble_rhs()} + +// The final function in this triptych assembles the right-hand side +// vector for the matrix-free method -- because in the matrix-free +// framework, we don't have to assemble the matrix and can get away +// with only assembling the right hand side. We could do this by extracting the +// code from the `assemble_system()` function above that deals with the right +// hand side, but we decide instead to go all in on the matrix-free approach and +// do the assembly using that way as well. +// +// The result is a function that is similar +// to the one found in the "Use FEEvaluation::read_dof_values_plain() +// to avoid resolving constraints" subsection in the "Possibilities +// for extensions" section of step-37. +// +// The reason for this function is that the MatrixFree operators do not take +// into account non-homogeneous Dirichlet constraints, instead treating all +// Dirichlet constraints as homogeneous. To account for this, the right-hand +// side here is assembled as the residual $r_0 = f-Au_0$, where $u_0$ is a +// zero vector except in the Dirichlet values. Then when solving, we have that +// the solution is $u = u_0 + A^{-1}r_0$. This can be seen as a Newton +// iteration on a linear system with initial guess $u_0$. The CG solve in the +// `solve()` function below computes $A^{-1}r_0$ and the call to +// `constraints.distribute()` (which directly follows) adds the $u_0$. +// +// Obviously, since we are considering a problem with zero Dirichlet boundary, +// we could have taken a similar approach to step-37 `assemble_rhs()`, but this +// additional work allows us to change the problem declaration if we so +// choose. +// +// This function has two parts in the integration loop: applying the negative +// of matrix $A$ to $u_0$ by submitting the negative of the gradient, and adding +// the right-hand side contribution by submitting the value $f$. We must be sure +// to use `read_dof_values_plain()` for evaluating $u_0$ as `read_dof_values()` +// would set all Dirichlet values to zero. +// +// Finally, the system_rhs vector is of type LA::MPI::Vector, but the +// MatrixFree class only work for +// LinearAlgebra::distributed::Vector. Therefore we must +// compute the right-hand side using MatrixFree functionality and then +// use the functions in the `ChangeVectorType` namespace to copy it to +// the correct type. +template +void LaplaceProblem::assemble_rhs() +{ + TimerOutput::Scope timing(computing_timer, "Assemble right-hand side"); - solution_copy = 0.; - constraints.distribute(solution_copy); - solution_copy.update_ghost_values(); - right_hand_side_copy = 0; - const Table<2, VectorizedArray> &coefficient = - *(mf_system_matrix.get_coefficient()); + MatrixFreeActiveVector solution_copy; + MatrixFreeActiveVector right_hand_side_copy; + mf_system_matrix.initialize_dof_vector(solution_copy); + mf_system_matrix.initialize_dof_vector(right_hand_side_copy); - RightHandSide right_hand_side_function; + solution_copy = 0.; + constraints.distribute(solution_copy); + solution_copy.update_ghost_values(); + right_hand_side_copy = 0; + const Table<2, VectorizedArray> &coefficient = + *(mf_system_matrix.get_coefficient()); - FEEvaluation phi( - *mf_system_matrix.get_matrix_free()); + RightHandSide right_hand_side_function; - for (unsigned int cell = 0; - cell < mf_system_matrix.get_matrix_free()->n_cell_batches(); - ++cell) - { - phi.reinit(cell); - phi.read_dof_values_plain(solution_copy); - phi.evaluate(EvaluationFlags::gradients); + FEEvaluation phi( + *mf_system_matrix.get_matrix_free()); - for (const unsigned int q : phi.quadrature_point_indices()) - { - phi.submit_gradient(-1.0 * - (coefficient(cell, 0) * phi.get_gradient(q)), - q); - phi.submit_value( - right_hand_side_function.value(phi.quadrature_point(q)), q); - } + for (unsigned int cell = 0; + cell < mf_system_matrix.get_matrix_free()->n_cell_batches(); + ++cell) + { + phi.reinit(cell); + phi.read_dof_values_plain(solution_copy); + phi.evaluate(EvaluationFlags::gradients); - phi.integrate_scatter(EvaluationFlags::values | - EvaluationFlags::gradients, - right_hand_side_copy); - } + for (const unsigned int q : phi.quadrature_point_indices()) + { + phi.submit_gradient(-1.0 * + (coefficient(cell, 0) * phi.get_gradient(q)), + q); + phi.submit_value( + right_hand_side_function.value(phi.quadrature_point(q)), q); + } - right_hand_side_copy.compress(VectorOperation::add); + phi.integrate_scatter(EvaluationFlags::values | + EvaluationFlags::gradients, + right_hand_side_copy); + } - ChangeVectorTypes::copy(right_hand_side, right_hand_side_copy); - } + right_hand_side_copy.compress(VectorOperation::add); + + ChangeVectorTypes::copy(right_hand_side, right_hand_side_copy); +} - // @sect4{LaplaceProblem::solve()} +// @sect4{LaplaceProblem::solve()} - // Here we set up the multigrid preconditioner, test the timing of a single - // V-cycle, and solve the linear system. Unsurprisingly, this is one of the - // places where the three methods differ the most. - template - void LaplaceProblem::solve() - { - TimerOutput::Scope timing(computing_timer, "Solve"); +// Here we set up the multigrid preconditioner, test the timing of a single +// V-cycle, and solve the linear system. Unsurprisingly, this is one of the +// places where the three methods differ the most. +template +void LaplaceProblem::solve() +{ + TimerOutput::Scope timing(computing_timer, "Solve"); - SolverControl solver_control(1000, 1.e-10 * right_hand_side.l2_norm()); - solver_control.enable_history_data(); + SolverControl solver_control(1000, 1.e-10 * right_hand_side.l2_norm()); + solver_control.enable_history_data(); - solution = 0.; + solution = 0.; - // The solver for the matrix-free GMG method is similar to step-37, apart - // from adding some interface matrices in complete analogy to step-16. - switch (settings.solver) - { - case Settings::gmg_mf: + // The solver for the matrix-free GMG method is similar to step-37, apart + // from adding some interface matrices in complete analogy to step-16. + switch (settings.solver) + { + case Settings::gmg_mf: + { + computing_timer.enter_subsection("Solve: Preconditioner setup"); + + MGTransferMatrixFree mg_transfer(mg_constrained_dofs); + mg_transfer.build(dof_handler); + + SolverControl coarse_solver_control(1000, 1e-12, false, false); + SolverCG coarse_solver(coarse_solver_control); + PreconditionIdentity identity; + MGCoarseGridIterativeSolver, + MatrixFreeLevelMatrix, + PreconditionIdentity> + coarse_grid_solver(coarse_solver, mf_mg_matrix[0], identity); + + using Smoother = PreconditionJacobi; + MGSmootherPrecondition + smoother; + smoother.initialize(mf_mg_matrix, + typename Smoother::AdditionalData( + settings.smoother_dampen)); + smoother.set_steps(settings.smoother_steps); + + mg::Matrix mg_m(mf_mg_matrix); + + MGLevelObject< + MatrixFreeOperators::MGInterfaceOperator> + mg_interface_matrices; + mg_interface_matrices.resize(0, triangulation.n_global_levels() - 1); + for (unsigned int level = 0; level < triangulation.n_global_levels(); + ++level) + mg_interface_matrices[level].initialize(mf_mg_matrix[level]); + mg::Matrix mg_interface(mg_interface_matrices); + + Multigrid mg( + mg_m, coarse_grid_solver, mg_transfer, smoother, smoother); + mg.set_edge_matrices(mg_interface, mg_interface); + + PreconditionMG> + preconditioner(dof_handler, mg, mg_transfer); + + // Copy the solution vector and right-hand side from LA::MPI::Vector + // to LinearAlgebra::distributed::Vector so that we can solve. + MatrixFreeActiveVector solution_copy; + MatrixFreeActiveVector right_hand_side_copy; + mf_system_matrix.initialize_dof_vector(solution_copy); + mf_system_matrix.initialize_dof_vector(right_hand_side_copy); + + ChangeVectorTypes::copy(solution_copy, solution); + ChangeVectorTypes::copy(right_hand_side_copy, right_hand_side); + computing_timer.leave_subsection("Solve: Preconditioner setup"); + + // Timing for 1 V-cycle. { - computing_timer.enter_subsection("Solve: Preconditioner setup"); - - MGTransferMatrixFree mg_transfer(mg_constrained_dofs); - mg_transfer.build(dof_handler); - - SolverControl coarse_solver_control(1000, 1e-12, false, false); - SolverCG coarse_solver( - coarse_solver_control); - PreconditionIdentity identity; - MGCoarseGridIterativeSolver, - MatrixFreeLevelMatrix, - PreconditionIdentity> - coarse_grid_solver(coarse_solver, mf_mg_matrix[0], identity); - - using Smoother = PreconditionJacobi; - MGSmootherPrecondition - smoother; - smoother.initialize(mf_mg_matrix, - typename Smoother::AdditionalData( - settings.smoother_dampen)); - smoother.set_steps(settings.smoother_steps); - - mg::Matrix mg_m(mf_mg_matrix); - - MGLevelObject< - MatrixFreeOperators::MGInterfaceOperator> - mg_interface_matrices; - mg_interface_matrices.resize(0, - triangulation.n_global_levels() - 1); - for (unsigned int level = 0; - level < triangulation.n_global_levels(); - ++level) - mg_interface_matrices[level].initialize(mf_mg_matrix[level]); - mg::Matrix mg_interface( - mg_interface_matrices); - - Multigrid mg( - mg_m, coarse_grid_solver, mg_transfer, smoother, smoother); - mg.set_edge_matrices(mg_interface, mg_interface); - - PreconditionMG> - preconditioner(dof_handler, mg, mg_transfer); - - // Copy the solution vector and right-hand side from LA::MPI::Vector - // to LinearAlgebra::distributed::Vector so that we can solve. - MatrixFreeActiveVector solution_copy; - MatrixFreeActiveVector right_hand_side_copy; - mf_system_matrix.initialize_dof_vector(solution_copy); - mf_system_matrix.initialize_dof_vector(right_hand_side_copy); - - ChangeVectorTypes::copy(solution_copy, solution); - ChangeVectorTypes::copy(right_hand_side_copy, right_hand_side); - computing_timer.leave_subsection("Solve: Preconditioner setup"); - - // Timing for 1 V-cycle. - { - TimerOutput::Scope timing(computing_timer, - "Solve: 1 multigrid V-cycle"); - preconditioner.vmult(solution_copy, right_hand_side_copy); - } - solution_copy = 0.; + TimerOutput::Scope timing(computing_timer, + "Solve: 1 multigrid V-cycle"); + preconditioner.vmult(solution_copy, right_hand_side_copy); + } + solution_copy = 0.; - // Solve the linear system, update the ghost values of the solution, - // copy back to LA::MPI::Vector and distribute constraints. - { - SolverCG solver(solver_control); + // Solve the linear system, update the ghost values of the solution, + // copy back to LA::MPI::Vector and distribute constraints. + { + SolverCG solver(solver_control); - TimerOutput::Scope timing(computing_timer, "Solve: CG"); - solver.solve(mf_system_matrix, - solution_copy, - right_hand_side_copy, - preconditioner); - } + TimerOutput::Scope timing(computing_timer, "Solve: CG"); + solver.solve(mf_system_matrix, + solution_copy, + right_hand_side_copy, + preconditioner); + } - solution_copy.update_ghost_values(); - ChangeVectorTypes::copy(solution, solution_copy); - constraints.distribute(solution); + solution_copy.update_ghost_values(); + ChangeVectorTypes::copy(solution, solution_copy); + constraints.distribute(solution); - break; - } + break; + } - // Solver for the matrix-based GMG method, similar to step-16, only - // using a Jacobi smoother instead of a SOR smoother (which is not - // implemented in parallel). - case Settings::gmg_mb: - { - computing_timer.enter_subsection("Solve: Preconditioner setup"); + // Solver for the matrix-based GMG method, similar to step-16, only + // using a Jacobi smoother instead of a SOR smoother (which is not + // implemented in parallel). + case Settings::gmg_mb: + { + computing_timer.enter_subsection("Solve: Preconditioner setup"); - MGTransferPrebuilt mg_transfer(mg_constrained_dofs); - mg_transfer.build(dof_handler); + MGTransferPrebuilt mg_transfer(mg_constrained_dofs); + mg_transfer.build(dof_handler); - SolverControl coarse_solver_control(1000, 1e-12, false, false); - SolverCG coarse_solver(coarse_solver_control); - PreconditionIdentity identity; - MGCoarseGridIterativeSolver, - MatrixType, - PreconditionIdentity> - coarse_grid_solver(coarse_solver, mg_matrix[0], identity); + SolverControl coarse_solver_control(1000, 1e-12, false, false); + SolverCG coarse_solver(coarse_solver_control); + PreconditionIdentity identity; + MGCoarseGridIterativeSolver, + MatrixType, + PreconditionIdentity> + coarse_grid_solver(coarse_solver, mg_matrix[0], identity); - using Smoother = LA::MPI::PreconditionJacobi; - MGSmootherPrecondition smoother; + using Smoother = LA::MPI::PreconditionJacobi; + MGSmootherPrecondition smoother; #ifdef USE_PETSC_LA - smoother.initialize(mg_matrix); - Assert( - settings.smoother_dampen == 1.0, - ExcNotImplemented( - "PETSc's PreconditionJacobi has no support for a damping parameter.")); + smoother.initialize(mg_matrix); + Assert( + settings.smoother_dampen == 1.0, + ExcNotImplemented( + "PETSc's PreconditionJacobi has no support for a damping parameter.")); #else - smoother.initialize(mg_matrix, settings.smoother_dampen); + smoother.initialize(mg_matrix, settings.smoother_dampen); #endif - smoother.set_steps(settings.smoother_steps); + smoother.set_steps(settings.smoother_steps); - mg::Matrix mg_m(mg_matrix); - mg::Matrix mg_in(mg_interface_in); - mg::Matrix mg_out(mg_interface_in); + mg::Matrix mg_m(mg_matrix); + mg::Matrix mg_in(mg_interface_in); + mg::Matrix mg_out(mg_interface_in); - Multigrid mg( - mg_m, coarse_grid_solver, mg_transfer, smoother, smoother); - mg.set_edge_matrices(mg_out, mg_in); + Multigrid mg( + mg_m, coarse_grid_solver, mg_transfer, smoother, smoother); + mg.set_edge_matrices(mg_out, mg_in); - PreconditionMG> - preconditioner(dof_handler, mg, mg_transfer); + PreconditionMG> + preconditioner(dof_handler, mg, mg_transfer); - computing_timer.leave_subsection("Solve: Preconditioner setup"); + computing_timer.leave_subsection("Solve: Preconditioner setup"); - // Timing for 1 V-cycle. - { - TimerOutput::Scope timing(computing_timer, - "Solve: 1 multigrid V-cycle"); - preconditioner.vmult(solution, right_hand_side); - } - solution = 0.; + // Timing for 1 V-cycle. + { + TimerOutput::Scope timing(computing_timer, + "Solve: 1 multigrid V-cycle"); + preconditioner.vmult(solution, right_hand_side); + } + solution = 0.; - // Solve the linear system and distribute constraints. - { - SolverCG solver(solver_control); + // Solve the linear system and distribute constraints. + { + SolverCG solver(solver_control); - TimerOutput::Scope timing(computing_timer, "Solve: CG"); - solver.solve(system_matrix, - solution, - right_hand_side, - preconditioner); - } + TimerOutput::Scope timing(computing_timer, "Solve: CG"); + solver.solve(system_matrix, + solution, + right_hand_side, + preconditioner); + } - constraints.distribute(solution); + constraints.distribute(solution); - break; - } + break; + } - // Solver for the AMG method, similar to step-40. - case Settings::amg: - { - computing_timer.enter_subsection("Solve: Preconditioner setup"); + // Solver for the AMG method, similar to step-40. + case Settings::amg: + { + computing_timer.enter_subsection("Solve: Preconditioner setup"); - PreconditionAMG preconditioner; - PreconditionAMG::AdditionalData Amg_data; + PreconditionAMG preconditioner; + PreconditionAMG::AdditionalData Amg_data; #ifdef USE_PETSC_LA - Amg_data.symmetric_operator = true; + Amg_data.symmetric_operator = true; #else - Amg_data.elliptic = true; - Amg_data.smoother_type = "Jacobi"; - Amg_data.higher_order_elements = true; - Amg_data.smoother_sweeps = settings.smoother_steps; - Amg_data.aggregation_threshold = 0.02; + Amg_data.elliptic = true; + Amg_data.smoother_type = "Jacobi"; + Amg_data.higher_order_elements = true; + Amg_data.smoother_sweeps = settings.smoother_steps; + Amg_data.aggregation_threshold = 0.02; #endif - Amg_data.output_details = false; + Amg_data.output_details = false; - preconditioner.initialize(system_matrix, Amg_data); - computing_timer.leave_subsection("Solve: Preconditioner setup"); + preconditioner.initialize(system_matrix, Amg_data); + computing_timer.leave_subsection("Solve: Preconditioner setup"); - // Timing for 1 V-cycle. - { - TimerOutput::Scope timing(computing_timer, - "Solve: 1 multigrid V-cycle"); - preconditioner.vmult(solution, right_hand_side); - } - solution = 0.; - - // Solve the linear system and distribute constraints. - { - SolverCG solver(solver_control); + // Timing for 1 V-cycle. + { + TimerOutput::Scope timing(computing_timer, + "Solve: 1 multigrid V-cycle"); + preconditioner.vmult(solution, right_hand_side); + } + solution = 0.; - TimerOutput::Scope timing(computing_timer, "Solve: CG"); - solver.solve(system_matrix, - solution, - right_hand_side, - preconditioner); - } - constraints.distribute(solution); + // Solve the linear system and distribute constraints. + { + SolverCG solver(solver_control); - break; + TimerOutput::Scope timing(computing_timer, "Solve: CG"); + solver.solve(system_matrix, + solution, + right_hand_side, + preconditioner); } + constraints.distribute(solution); - default: - DEAL_II_ASSERT_UNREACHABLE(); - } - - pcout << " Number of CG iterations: " << solver_control.last_step() - << std::endl; - } + break; + } + default: + DEAL_II_ASSERT_UNREACHABLE(); + } - // @sect3{The error estimator} + pcout << " Number of CG iterations: " << solver_control.last_step() + << std::endl; +} - // We use the FEInterfaceValues class to assemble an error estimator to decide - // which cells to refine. See the exact definition of the cell and face - // integrals in the introduction. To use the method, we define Scratch and - // Copy objects for the MeshWorker::mesh_loop() with much of the following - // code being in essence as was set up in step-12 already (or at least similar - // in spirit). - template - struct ScratchData - { - ScratchData(const Mapping &mapping, - const FiniteElement &fe, - const unsigned int quadrature_degree, - const UpdateFlags update_flags, - const UpdateFlags interface_update_flags) - : fe_values(mapping, fe, QGauss(quadrature_degree), update_flags) - , fe_interface_values(mapping, - fe, - QGauss(quadrature_degree), - interface_update_flags) - {} - - - ScratchData(const ScratchData &scratch_data) - : fe_values(scratch_data.fe_values.get_mapping(), - scratch_data.fe_values.get_fe(), - scratch_data.fe_values.get_quadrature(), - scratch_data.fe_values.get_update_flags()) - , fe_interface_values(scratch_data.fe_values.get_mapping(), - scratch_data.fe_values.get_fe(), - scratch_data.fe_interface_values.get_quadrature(), - scratch_data.fe_interface_values.get_update_flags()) - {} - - FEValues fe_values; - FEInterfaceValues fe_interface_values; - }; +// @sect3{The error estimator} +// We use the FEInterfaceValues class to assemble an error estimator to decide +// which cells to refine. See the exact definition of the cell and face +// integrals in the introduction. To use the method, we define Scratch and +// Copy objects for the MeshWorker::mesh_loop() with much of the following code +// being in essence as was set up in step-12 already (or at least similar in +// spirit). +template +struct ScratchData +{ + ScratchData(const Mapping &mapping, + const FiniteElement &fe, + const unsigned int quadrature_degree, + const UpdateFlags update_flags, + const UpdateFlags interface_update_flags) + : fe_values(mapping, fe, QGauss(quadrature_degree), update_flags) + , fe_interface_values(mapping, + fe, + QGauss(quadrature_degree), + interface_update_flags) + {} + + + ScratchData(const ScratchData &scratch_data) + : fe_values(scratch_data.fe_values.get_mapping(), + scratch_data.fe_values.get_fe(), + scratch_data.fe_values.get_quadrature(), + scratch_data.fe_values.get_update_flags()) + , fe_interface_values(scratch_data.fe_values.get_mapping(), + scratch_data.fe_values.get_fe(), + scratch_data.fe_interface_values.get_quadrature(), + scratch_data.fe_interface_values.get_update_flags()) + {} + + FEValues fe_values; + FEInterfaceValues fe_interface_values; +}; + + + +struct CopyData +{ + CopyData() + : cell_index(numbers::invalid_unsigned_int) + , value(0.) + {} - struct CopyData + struct FaceData { - CopyData() - : cell_index(numbers::invalid_unsigned_int) - , value(0.) - {} + unsigned int cell_indices[2]; + double values[2]; + }; - struct FaceData - { - unsigned int cell_indices[2]; - double values[2]; - }; + unsigned int cell_index; + double value; + std::vector face_data; +}; - unsigned int cell_index; - double value; - std::vector face_data; - }; +template +void LaplaceProblem::estimate() +{ + TimerOutput::Scope timing(computing_timer, "Estimate"); - template - void LaplaceProblem::estimate() - { - TimerOutput::Scope timing(computing_timer, "Estimate"); + VectorType temp_solution; + temp_solution.reinit(locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + temp_solution = solution; - VectorType temp_solution; - temp_solution.reinit(locally_owned_dofs, - locally_relevant_dofs, - mpi_communicator); - temp_solution = solution; + const Coefficient coefficient; - const Coefficient coefficient; + estimated_error_square_per_cell.reinit(triangulation.n_active_cells()); - estimated_error_square_per_cell.reinit(triangulation.n_active_cells()); + using Iterator = typename DoFHandler::active_cell_iterator; - using Iterator = typename DoFHandler::active_cell_iterator; + // Assembler for cell residual $h^2 \| f + \epsilon \triangle u \|_K^2$ + auto cell_worker = [&](const Iterator &cell, + ScratchData &scratch_data, + CopyData ©_data) { + FEValues &fe_values = scratch_data.fe_values; + fe_values.reinit(cell); - // Assembler for cell residual $h^2 \| f + \epsilon \triangle u \|_K^2$ - auto cell_worker = [&](const Iterator &cell, - ScratchData &scratch_data, - CopyData ©_data) { - FEValues &fe_values = scratch_data.fe_values; - fe_values.reinit(cell); + RightHandSide rhs; + const double rhs_value = rhs.value(cell->center()); - RightHandSide rhs; - const double rhs_value = rhs.value(cell->center()); + const double nu = coefficient.value(cell->center()); - const double nu = coefficient.value(cell->center()); + std::vector> hessians(fe_values.n_quadrature_points); + fe_values.get_function_hessians(temp_solution, hessians); - std::vector> hessians(fe_values.n_quadrature_points); - fe_values.get_function_hessians(temp_solution, hessians); + copy_data.cell_index = cell->active_cell_index(); - copy_data.cell_index = cell->active_cell_index(); + double residual_norm_square = 0.; + for (unsigned k = 0; k < fe_values.n_quadrature_points; ++k) + { + const double residual = (rhs_value + nu * trace(hessians[k])); + residual_norm_square += residual * residual * fe_values.JxW(k); + } - double residual_norm_square = 0.; - for (unsigned k = 0; k < fe_values.n_quadrature_points; ++k) - { - const double residual = (rhs_value + nu * trace(hessians[k])); - residual_norm_square += residual * residual * fe_values.JxW(k); - } + copy_data.value = + cell->diameter() * cell->diameter() * residual_norm_square; + }; - copy_data.value = - cell->diameter() * cell->diameter() * residual_norm_square; - }; + // Assembler for face term $\sum_F h_F \| \jump{\epsilon \nabla u \cdot n} + // \|_F^2$ + auto face_worker = [&](const Iterator &cell, + const unsigned int &f, + const unsigned int &sf, + const Iterator &ncell, + const unsigned int &nf, + const unsigned int &nsf, + ScratchData &scratch_data, + CopyData ©_data) { + FEInterfaceValues &fe_interface_values = + scratch_data.fe_interface_values; + fe_interface_values.reinit(cell, f, sf, ncell, nf, nsf); - // Assembler for face term $\sum_F h_F \| \jump{\epsilon \nabla u \cdot n} - // \|_F^2$ - auto face_worker = [&](const Iterator &cell, - const unsigned int &f, - const unsigned int &sf, - const Iterator &ncell, - const unsigned int &nf, - const unsigned int &nsf, - ScratchData &scratch_data, - CopyData ©_data) { - FEInterfaceValues &fe_interface_values = - scratch_data.fe_interface_values; - fe_interface_values.reinit(cell, f, sf, ncell, nf, nsf); + copy_data.face_data.emplace_back(); + CopyData::FaceData ©_data_face = copy_data.face_data.back(); - copy_data.face_data.emplace_back(); - CopyData::FaceData ©_data_face = copy_data.face_data.back(); + copy_data_face.cell_indices[0] = cell->active_cell_index(); + copy_data_face.cell_indices[1] = ncell->active_cell_index(); - copy_data_face.cell_indices[0] = cell->active_cell_index(); - copy_data_face.cell_indices[1] = ncell->active_cell_index(); + const double coeff1 = coefficient.value(cell->center()); + const double coeff2 = coefficient.value(ncell->center()); - const double coeff1 = coefficient.value(cell->center()); - const double coeff2 = coefficient.value(ncell->center()); + std::vector> grad_u[2]; - std::vector> grad_u[2]; + for (unsigned int i = 0; i < 2; ++i) + { + grad_u[i].resize(fe_interface_values.n_quadrature_points); + fe_interface_values.get_fe_face_values(i).get_function_gradients( + temp_solution, grad_u[i]); + } - for (unsigned int i = 0; i < 2; ++i) - { - grad_u[i].resize(fe_interface_values.n_quadrature_points); - fe_interface_values.get_fe_face_values(i).get_function_gradients( - temp_solution, grad_u[i]); - } + double jump_norm_square = 0.; - double jump_norm_square = 0.; + for (unsigned int qpoint = 0; + qpoint < fe_interface_values.n_quadrature_points; + ++qpoint) + { + const double jump = + coeff1 * grad_u[0][qpoint] * fe_interface_values.normal(qpoint) - + coeff2 * grad_u[1][qpoint] * fe_interface_values.normal(qpoint); - for (unsigned int qpoint = 0; - qpoint < fe_interface_values.n_quadrature_points; - ++qpoint) - { - const double jump = - coeff1 * grad_u[0][qpoint] * fe_interface_values.normal(qpoint) - - coeff2 * grad_u[1][qpoint] * fe_interface_values.normal(qpoint); + jump_norm_square += jump * jump * fe_interface_values.JxW(qpoint); + } - jump_norm_square += jump * jump * fe_interface_values.JxW(qpoint); - } + const double h = cell->face(f)->measure(); + copy_data_face.values[0] = 0.5 * h * jump_norm_square; + copy_data_face.values[1] = copy_data_face.values[0]; + }; - const double h = cell->face(f)->measure(); - copy_data_face.values[0] = 0.5 * h * jump_norm_square; - copy_data_face.values[1] = copy_data_face.values[0]; - }; - - auto copier = [&](const CopyData ©_data) { - if (copy_data.cell_index != numbers::invalid_unsigned_int) - estimated_error_square_per_cell[copy_data.cell_index] += - copy_data.value; - - for (const auto &cdf : copy_data.face_data) - for (unsigned int j = 0; j < 2; ++j) - estimated_error_square_per_cell[cdf.cell_indices[j]] += cdf.values[j]; - }; - - const unsigned int n_gauss_points = degree + 1; - ScratchData scratch_data(mapping, - fe, - n_gauss_points, - update_hessians | update_quadrature_points | - update_JxW_values, - update_values | update_gradients | - update_JxW_values | update_normal_vectors); - CopyData copy_data; - - // We need to assemble each interior face once but we need to make sure that - // both processes assemble the face term between a locally owned and a ghost - // cell. This is achieved by setting the - // MeshWorker::assemble_ghost_faces_both flag. We need to do this, because - // we do not communicate the error estimator contributions here. - MeshWorker::mesh_loop(dof_handler.begin_active(), - dof_handler.end(), - cell_worker, - copier, - scratch_data, - copy_data, - MeshWorker::assemble_own_cells | - MeshWorker::assemble_ghost_faces_both | - MeshWorker::assemble_own_interior_faces_once, - /*boundary_worker=*/nullptr, - face_worker); - - const double global_error_estimate = - std::sqrt(Utilities::MPI::sum(estimated_error_square_per_cell.l1_norm(), - mpi_communicator)); - pcout << " Global error estimate: " << global_error_estimate - << std::endl; - } + auto copier = [&](const CopyData ©_data) { + if (copy_data.cell_index != numbers::invalid_unsigned_int) + estimated_error_square_per_cell[copy_data.cell_index] += copy_data.value; + for (const auto &cdf : copy_data.face_data) + for (unsigned int j = 0; j < 2; ++j) + estimated_error_square_per_cell[cdf.cell_indices[j]] += cdf.values[j]; + }; - // @sect4{LaplaceProblem::refine_grid()} + const unsigned int n_gauss_points = degree + 1; + ScratchData scratch_data(mapping, + fe, + n_gauss_points, + update_hessians | update_quadrature_points | + update_JxW_values, + update_values | update_gradients | + update_JxW_values | update_normal_vectors); + CopyData copy_data; + + // We need to assemble each interior face once but we need to make sure that + // both processes assemble the face term between a locally owned and a ghost + // cell. This is achieved by setting the + // MeshWorker::assemble_ghost_faces_both flag. We need to do this, because + // we do not communicate the error estimator contributions here. + MeshWorker::mesh_loop(dof_handler.begin_active(), + dof_handler.end(), + cell_worker, + copier, + scratch_data, + copy_data, + MeshWorker::assemble_own_cells | + MeshWorker::assemble_ghost_faces_both | + MeshWorker::assemble_own_interior_faces_once, + /*boundary_worker=*/nullptr, + face_worker); + + const double global_error_estimate = + std::sqrt(Utilities::MPI::sum(estimated_error_square_per_cell.l1_norm(), + mpi_communicator)); + pcout << " Global error estimate: " << global_error_estimate + << std::endl; +} - // We use the cell-wise estimator stored in the vector @p estimate_vector and - // refine a fixed number of cells (chosen here to roughly double the number of - // DoFs in each step). - template - void LaplaceProblem::refine_grid() - { - TimerOutput::Scope timing(computing_timer, "Refine grid"); - const double refinement_fraction = 1. / (std::pow(2.0, dim) - 1.); - parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number( - triangulation, estimated_error_square_per_cell, refinement_fraction, 0.0); +// @sect4{LaplaceProblem::refine_grid()} - triangulation.execute_coarsening_and_refinement(); - } +// We use the cell-wise estimator stored in the vector @p estimate_vector and +// refine a fixed number of cells (chosen here to roughly double the number of +// DoFs in each step). +template +void LaplaceProblem::refine_grid() +{ + TimerOutput::Scope timing(computing_timer, "Refine grid"); + const double refinement_fraction = 1. / (std::pow(2.0, dim) - 1.); + parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number( + triangulation, estimated_error_square_per_cell, refinement_fraction, 0.0); - // @sect4{LaplaceProblem::output_results()} + triangulation.execute_coarsening_and_refinement(); +} - // The output_results() function is similar to the ones found in many of the - // tutorials (see step-40 for example). - template - void LaplaceProblem::output_results(const unsigned int cycle) - { - TimerOutput::Scope timing(computing_timer, "Output results"); - VectorType temp_solution; - temp_solution.reinit(locally_owned_dofs, - locally_relevant_dofs, - mpi_communicator); - temp_solution = solution; +// @sect4{LaplaceProblem::output_results()} - DataOut data_out; - data_out.attach_dof_handler(dof_handler); - data_out.add_data_vector(temp_solution, "solution"); +// The output_results() function is similar to the ones found in many of the +// tutorials (see step-40 for example). +template +void LaplaceProblem::output_results(const unsigned int cycle) +{ + TimerOutput::Scope timing(computing_timer, "Output results"); - Vector subdomain(triangulation.n_active_cells()); - for (unsigned int i = 0; i < subdomain.size(); ++i) - subdomain(i) = triangulation.locally_owned_subdomain(); - data_out.add_data_vector(subdomain, "subdomain"); + VectorType temp_solution; + temp_solution.reinit(locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + temp_solution = solution; - Vector level(triangulation.n_active_cells()); - for (const auto &cell : triangulation.active_cell_iterators()) - level(cell->active_cell_index()) = cell->level(); - data_out.add_data_vector(level, "level"); + DataOut data_out; + data_out.attach_dof_handler(dof_handler); + data_out.add_data_vector(temp_solution, "solution"); - if (estimated_error_square_per_cell.size() > 0) - data_out.add_data_vector(estimated_error_square_per_cell, - "estimated_error_square_per_cell"); + Vector subdomain(triangulation.n_active_cells()); + for (unsigned int i = 0; i < subdomain.size(); ++i) + subdomain(i) = triangulation.locally_owned_subdomain(); + data_out.add_data_vector(subdomain, "subdomain"); - data_out.build_patches(); + Vector level(triangulation.n_active_cells()); + for (const auto &cell : triangulation.active_cell_iterators()) + level(cell->active_cell_index()) = cell->level(); + data_out.add_data_vector(level, "level"); - const std::string pvtu_filename = data_out.write_vtu_with_pvtu_record( - "", "solution", cycle, mpi_communicator, 2 /*n_digits*/, 1 /*n_groups*/); + if (estimated_error_square_per_cell.size() > 0) + data_out.add_data_vector(estimated_error_square_per_cell, + "estimated_error_square_per_cell"); - pcout << " Wrote " << pvtu_filename << std::endl; - } + data_out.build_patches(); + const std::string pvtu_filename = data_out.write_vtu_with_pvtu_record( + "", "solution", cycle, mpi_communicator, 2 /*n_digits*/, 1 /*n_groups*/); - // @sect4{LaplaceProblem::run()} + pcout << " Wrote " << pvtu_filename << std::endl; +} - // As in most tutorials, this function calls the various functions defined - // above to set up, assemble, solve, and output the results. - template - void LaplaceProblem::run() - { - for (unsigned int cycle = 0; cycle < settings.n_steps; ++cycle) - { - pcout << "Cycle " << cycle << ':' << std::endl; - if (cycle > 0) - refine_grid(); - - pcout << " Number of active cells: " - << triangulation.n_global_active_cells(); - - // We only output level cell data for the GMG methods (same with DoF - // data below). Note that the partition efficiency is irrelevant for AMG - // since the level hierarchy is not distributed or used during the - // computation. - if (settings.solver == Settings::gmg_mf || - settings.solver == Settings::gmg_mb) - pcout << " (" << triangulation.n_global_levels() << " global levels)" - << std::endl - << " Partition efficiency: " - << 1.0 / MGTools::workload_imbalance(triangulation); - pcout << std::endl; - setup_system(); +// @sect4{LaplaceProblem::run()} - // Only set up the multilevel hierarchy for GMG. - if (settings.solver == Settings::gmg_mf || - settings.solver == Settings::gmg_mb) - setup_multigrid(); +// As in most tutorials, this function calls the various functions defined +// above to set up, assemble, solve, and output the results. +template +void LaplaceProblem::run() +{ + for (unsigned int cycle = 0; cycle < settings.n_steps; ++cycle) + { + pcout << "Cycle " << cycle << ':' << std::endl; + if (cycle > 0) + refine_grid(); + + pcout << " Number of active cells: " + << triangulation.n_global_active_cells(); + + // We only output level cell data for the GMG methods (same with DoF + // data below). Note that the partition efficiency is irrelevant for AMG + // since the level hierarchy is not distributed or used during the + // computation. + if (settings.solver == Settings::gmg_mf || + settings.solver == Settings::gmg_mb) + pcout << " (" << triangulation.n_global_levels() << " global levels)" + << std::endl + << " Partition efficiency: " + << 1.0 / MGTools::workload_imbalance(triangulation); + pcout << std::endl; + + setup_system(); + + // Only set up the multilevel hierarchy for GMG. + if (settings.solver == Settings::gmg_mf || + settings.solver == Settings::gmg_mb) + setup_multigrid(); + + pcout << " Number of degrees of freedom: " << dof_handler.n_dofs(); + if (settings.solver == Settings::gmg_mf || + settings.solver == Settings::gmg_mb) + { + pcout << " (by level: "; + for (unsigned int level = 0; level < triangulation.n_global_levels(); + ++level) + pcout << dof_handler.n_dofs(level) + << (level == triangulation.n_global_levels() - 1 ? ")" : + ", "); + } + pcout << std::endl; + + // For the matrix-free method, we only assemble the right-hand side. + // For both matrix-based methods, we assemble both active matrix and + // right-hand side, and only assemble the multigrid matrices for + // matrix-based GMG. + if (settings.solver == Settings::gmg_mf) + assemble_rhs(); + else /*gmg_mb or amg*/ + { + assemble_system(); + if (settings.solver == Settings::gmg_mb) + assemble_multigrid(); + } - pcout << " Number of degrees of freedom: " << dof_handler.n_dofs(); - if (settings.solver == Settings::gmg_mf || - settings.solver == Settings::gmg_mb) - { - pcout << " (by level: "; - for (unsigned int level = 0; - level < triangulation.n_global_levels(); - ++level) - pcout << dof_handler.n_dofs(level) - << (level == triangulation.n_global_levels() - 1 ? ")" : - ", "); - } - pcout << std::endl; - - // For the matrix-free method, we only assemble the right-hand side. - // For both matrix-based methods, we assemble both active matrix and - // right-hand side, and only assemble the multigrid matrices for - // matrix-based GMG. - if (settings.solver == Settings::gmg_mf) - assemble_rhs(); - else /*gmg_mb or amg*/ - { - assemble_system(); - if (settings.solver == Settings::gmg_mb) - assemble_multigrid(); - } + solve(); + estimate(); - solve(); - estimate(); + if (settings.output) + output_results(cycle); - if (settings.output) - output_results(cycle); + computing_timer.print_summary(); + computing_timer.reset(); + } +} - computing_timer.print_summary(); - computing_timer.reset(); - } - } -} // namespace Step50 // @sect3{The main() function} @@ -1554,7 +1538,6 @@ namespace Step50 int main(int argc, char *argv[]) { using namespace dealii; - using namespace Step50; Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); Settings settings;