From 9b12145c7dce8d257f26cc93228570f07b2e1a79 Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Mon, 23 Apr 2018 11:26:02 +0200 Subject: [PATCH] Add step-59 tutorial program showing matrix-free DG. --- examples/step-59/CMakeLists.txt | 52 ++ examples/step-59/doc/builds-on | 1 + examples/step-59/doc/intro.dox | 18 + examples/step-59/doc/kind | 1 + examples/step-59/doc/results.dox | 99 +++ examples/step-59/doc/tooltip | 1 + examples/step-59/step-59.cc | 1369 ++++++++++++++++++++++++++++++ 7 files changed, 1541 insertions(+) create mode 100644 examples/step-59/CMakeLists.txt create mode 100644 examples/step-59/doc/builds-on create mode 100644 examples/step-59/doc/intro.dox create mode 100644 examples/step-59/doc/kind create mode 100644 examples/step-59/doc/results.dox create mode 100644 examples/step-59/doc/tooltip create mode 100644 examples/step-59/step-59.cc diff --git a/examples/step-59/CMakeLists.txt b/examples/step-59/CMakeLists.txt new file mode 100644 index 0000000000..6107425646 --- /dev/null +++ b/examples/step-59/CMakeLists.txt @@ -0,0 +1,52 @@ +## +# CMake script for the step-59 tutorial program: +## + +# Set the name of the project and target: +SET(TARGET "step-59") + +# Declare all source files the target consists of. Here, this is only +# the one step-X.cc file, but as you expand your project you may wish +# to add other source files as well. If your project becomes much larger, +# you may want to either replace the following statement by something like +# FILE(GLOB_RECURSE TARGET_SRC "source/*.cc") +# FILE(GLOB_RECURSE TARGET_INC "include/*.h") +# SET(TARGET_SRC ${TARGET_SRC} ${TARGET_INC}) +# or switch altogether to the large project CMakeLists.txt file discussed +# in the "CMake in user projects" page accessible from the "User info" +# page of the documentation. +SET(TARGET_SRC + ${TARGET}.cc + ) + +# Usually, you will not need to modify anything beyond this point... + +CMAKE_MINIMUM_REQUIRED(VERSION 2.8.12) + +FIND_PACKAGE(deal.II 9.0.0 QUIET + HINTS ${deal.II_DIR} ${DEAL_II_DIR} ../ ../../ $ENV{DEAL_II_DIR} + ) +IF(NOT ${deal.II_FOUND}) + MESSAGE(FATAL_ERROR "\n" + "*** Could not locate a (sufficiently recent) version of deal.II. ***\n\n" + "You may want to either pass a flag -DDEAL_II_DIR=/path/to/deal.II to cmake\n" + "or set an environment variable \"DEAL_II_DIR\" that contains this path." + ) +ENDIF() + +# +# Are all dependencies fulfilled? +# +IF(NOT DEAL_II_WITH_LAPACK) # keep in one line + MESSAGE(FATAL_ERROR " +Error! This tutorial requires a deal.II library that was configured with the following options: + DEAL_II_WITH_LAPACK = ON +However, the deal.II library found at ${DEAL_II_PATH} was configured with these options + DEAL_II_WITH_LAPACK = ${DEAL_II_WITH_LAPACK} +which conflict with the requirements." + ) +ENDIF() + +DEAL_II_INITIALIZE_CACHED_VARIABLES() +PROJECT(${TARGET}) +DEAL_II_INVOKE_AUTOPILOT() diff --git a/examples/step-59/doc/builds-on b/examples/step-59/doc/builds-on new file mode 100644 index 0000000000..8888d17a80 --- /dev/null +++ b/examples/step-59/doc/builds-on @@ -0,0 +1 @@ +step-37 diff --git a/examples/step-59/doc/intro.dox b/examples/step-59/doc/intro.dox new file mode 100644 index 0000000000..35ea5a1d9e --- /dev/null +++ b/examples/step-59/doc/intro.dox @@ -0,0 +1,18 @@ +
+ + +This program was contributed by Katharina Kormann and Martin +Kronbichler. + +This work was partly supported by the German Research Foundation (DFG) through +the project "High-order discontinuous Galerkin for the exa-scale" (ExaDG) +within the priority program "Software for Exascale Computing" (SPPEXA). + + +

Introduction

+ +

The symmetric interior penalty formulation for the Laplacian

+ +

Face integration support in MatrixFree and FEFaceEvaluation

+ +

An approximate block-Jacobi smoother using the fast diagonalization method

diff --git a/examples/step-59/doc/kind b/examples/step-59/doc/kind new file mode 100644 index 0000000000..c1d9154931 --- /dev/null +++ b/examples/step-59/doc/kind @@ -0,0 +1 @@ +techniques diff --git a/examples/step-59/doc/results.dox b/examples/step-59/doc/results.dox new file mode 100644 index 0000000000..e7cf5f779c --- /dev/null +++ b/examples/step-59/doc/results.dox @@ -0,0 +1,99 @@ +

Results

+ +

Program output

+ +Like in step-37, we evaluate the multigrid solver in terms of run time. In +two space dimensions with elements of degree 3, a possible output could look +as follows: +@code +Running with 12 MPI processes + +Cycle 0 +Number of degrees of freedom: 1024 +Total setup time 0.0246709 s +Time solve (12 iterations) 0.0093796 s +Verification via L2 error: 0.0155167 + +Cycle 1 +Number of degrees of freedom: 4096 +Total setup time 0.0159565 s +Time solve (12 iterations) 0.0100399 s +Verification via L2 error: 0.00130939 + +Cycle 2 +Number of degrees of freedom: 16384 +Total setup time 0.0210655 s +Time solve (12 iterations) 0.0117574 s +Verification via L2 error: 9.22924e-05 + +Cycle 3 +Number of degrees of freedom: 65536 +Total setup time 0.0570049 s +Time solve (12 iterations) 0.0228628 s +Verification via L2 error: 5.99019e-06 + +Cycle 4 +Number of degrees of freedom: 262144 +Total setup time 0.144457 s +Time solve (12 iterations) 0.0545919 s +Verification via L2 error: 3.78568e-07 + +Cycle 5 +Number of degrees of freedom: 1048576 +Total setup time 0.487433 s +Time solve (12 iterations) 0.230212 s +Verification via L2 error: 2.37412e-08 + +Cycle 6 +Number of degrees of freedom: 4194304 +Total setup time 1.87463 s +Time solve (12 iterations) 1.16517 s +Verification via L2 error: 1.48553e-09 +@endcode + +Like in step-37, the number of CG iterations remains constant with increasing +problem size. + +Not much changes if we run the program in three spatial dimensions. Since we +use uniform mesh refinement, we get eight times as many elements and +approximately eight times as many degrees of freedom with each cycle: + +@code +Running with 12 MPI processes + +Cycle 0 +Number of degrees of freedom: 512 +Total setup time 0.0297214 s +Time solve (12 iterations) 0.0137131 s +Verification via L2 error: 1.69792 + +Cycle 1 +Number of degrees of freedom: 4096 +Total setup time 0.0322271 s +Time solve (12 iterations) 0.0120323 s +Verification via L2 error: 0.346758 + +Cycle 2 +Number of degrees of freedom: 32768 +Total setup time 0.0566789 s +Time solve (12 iterations) 0.0226668 s +Verification via L2 error: 0.0231837 + +Cycle 3 +Number of degrees of freedom: 262144 +Total setup time 0.135041 s +Time solve (12 iterations) 0.0597281 s +Verification via L2 error: 0.00198009 + +Cycle 4 +Number of degrees of freedom: 2097152 +Total setup time 0.762947 s +Time solve (12 iterations) 0.502595 s +Verification via L2 error: 0.000140762 + +Cycle 5 +Number of degrees of freedom: 16777216 +Total setup time 5.63233 s +Time solve (12 iterations) 4.77959 s +Verification via L2 error: 9.16255e-06 +@endcode diff --git a/examples/step-59/doc/tooltip b/examples/step-59/doc/tooltip new file mode 100644 index 0000000000..83658ea2c4 --- /dev/null +++ b/examples/step-59/doc/tooltip @@ -0,0 +1 @@ +Matrix-free methods. Multigrid. Fast assembly techniques. diff --git a/examples/step-59/step-59.cc b/examples/step-59/step-59.cc new file mode 100644 index 0000000000..248cc96ec9 --- /dev/null +++ b/examples/step-59/step-59.cc @@ -0,0 +1,1369 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2018 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE at + * the top level of the deal.II distribution. + * + * --------------------------------------------------------------------- + + * + * Authors: Katharina Kormann, Martin Kronbichler, 2018 + */ + + +// The include files are essentially the same as in step-37, with the +// exception of the finite element class FE_DGQHermite instead of FE_Q. All +// functionality for matrix-free computations on face integrals is already +// contained in `fe_evaluation.h`. +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include + + +namespace Step59 +{ + using namespace dealii; + + // As in step-37, we collect the dimension and polynomial degree as + // constants here at the top of the program for simplicity. As opposed to + // step-37, we choose a really high order method this time with degree 8 + // where any implementation not using sum factorization would become + // prohibitively slow compared to the implementation with MatrixFree which + // provides an efficiency that is essentially the same as at degrees two or + // three. Furthermore, all classes in this tutorial program are templated, + // so it would be easy to select the degree at run time from an input file + // or a command-line argument by adding instantiations of the appropriate + // degrees in the `main()` function. + + const unsigned int degree_finite_element = 8; + const unsigned int dimension = 3; + + // @sect3{Equation data} + + // In analogy to step-7, we define an analytic solution that we try to + // reproduce with our discretization. Since the aim of this tutorial is to + // show matrix-free methods, we choose one of the simplest possibilities, + // namely a cosine function whose derivatives are simple enough for us to + // compute analytically. Further down, the wave number 2.4 we select here + // will be matched with the domain extent in $x$-direction that is 2.5, such + // that we obtain a periodic solution at $x = 2.5$ including $6pi$ or three + // full wave revolutions in the cosine. The first function defines the + // solution and its gradient for expressing the analytic solution for the + // Dirichlet and Neumann boundary conditions, respectively. Furthermore, a + // class representing the negative Laplacian of the solution is used to + // represent the right hand side (forcing) function that we use to match the + // given analytic solution in the discretized version (manufactured + // solution). + + template + class Solution : public Function + { + public: + Solution () : Function() {} + + virtual double value (const Point &p, + const unsigned int = 0) const override final + { + double val = 1.; + for (unsigned int d=0; d gradient (const Point &p, + const unsigned int = 0) const override final + { + const double arg = numbers::PI * 2.4; + Tensor<1,dim> grad; + for (unsigned int d=0; d + class RightHandSide : public Function + { + public: + RightHandSide () : Function() {} + + virtual double value (const Point &p, + const unsigned int = 0) const override final + { + const double arg = numbers::PI * 2.4; + double val = 1.; + for (unsigned int d=0; d + class LaplaceOperator : public Subscriptor + { + public: + typedef number value_type; + + LaplaceOperator () = default; + + void initialize(std::shared_ptr > data); + + void clear(); + + types::global_dof_index m() const; + + void initialize_dof_vector(LinearAlgebra::distributed::Vector &vec) const; + + std::shared_ptr > + get_matrix_free() const; + + void vmult(LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src) const; + + void Tvmult(LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src) const; + + number get_penalty_factor() const + { + return 1.0 * fe_degree * (fe_degree+1); + } + + private: + void apply_cell (const MatrixFree &data, + LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src, + const std::pair &cell_range) const; + + void apply_face (const MatrixFree &data, + LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src, + const std::pair &face_range) const; + + void apply_boundary (const MatrixFree &data, + LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src, + const std::pair &face_range) const; + + std::shared_ptr > data; + }; + + + + // The `%PreconditionBlockJacobi` class defines our custom preconditioner for + // this problem. As opposed to step-37 which was based on the matrix + // diagonal, we here compute an approximate inversion of the diagonal blocks + // in the discontinuous Galerkin method by using the so-called fast + // diagonalization method discussed in the introduction. + + template + class PreconditionBlockJacobi + { + public: + typedef number value_type; + + void clear() + { + cell_matrices.clear(); + } + + void initialize(const LaplaceOperator &op); + + void vmult(LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src) const; + + void Tvmult(LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src) const + { + vmult(dst, src); + } + + private: + std::shared_ptr > data; + std::vector, fe_degree+1> > cell_matrices; + }; + + + + // This free-standing function is used in both the `LaplaceOperator` and + // `%PreconditionBlockJacobi` classes to adjust the ghost range. This function + // is necessary because some of the vectors that the `vmult()` functions are + // supplied with are not initialized properly with + // `LaplaceOperator::initialize_dof_vector` that includes the correct layout + // of ghost entries, but instead comes from the MGTransferMatrixFree class + // that has no notion on the ghost selection of the matrix-free classes. To + // avoid index confusion, we must adjust the ghost range before actually + // doing something with these vectors. Since the vectors are kept around in + // the multigrid smoother and transfer classes, a vector whose ghost range + // has once been adjusted will remain in this state throughout the lifetime + // of the object, so we can use a shortcut at the start of the function to + // see whether the partitioner object of the distributed vector, which is + // stored as a shared pointer, is the same as the layout expected by + // MatrixFree, which is stored in a data structure accessed by + // MatrixFree::get_dof_info(0), where the 0 indicates the DoFHandler number + // from which this was extracted; we only use a single DoFHandler in + // MatrixFree, so the only valid number is 0 here. + + template + void + adjust_ghost_range_if_necessary (const MatrixFree &data, + const LinearAlgebra::distributed::Vector &vec) + { + if (vec.get_partitioner().get() == data.get_dof_info(0).vector_partitioner.get()) + return; + + LinearAlgebra::distributed::Vector copy_vec(vec); + const_cast &>(vec). + reinit(data.get_dof_info(0).vector_partitioner); + const_cast &>(vec). + copy_locally_owned_data_from(copy_vec); + } + + + + // The next five functions to clear and initialize the `LaplaceOperator` + // class, to return the shared pointer holding the MatrixFree data + // container, as well as the correct initialization of the vector and + // operator sizes are the same as in step-37 or rather + // MatrixFreeOperators::Base. + template + void + LaplaceOperator::clear () + { + data.reset(); + } + + + + template + void + LaplaceOperator + ::initialize (std::shared_ptr > data) + { + this->data = data; + } + + + + template + std::shared_ptr > + LaplaceOperator + ::get_matrix_free () const + { + return data; + } + + + + template + void + LaplaceOperator + ::initialize_dof_vector (LinearAlgebra::distributed::Vector &vec) const + { + data->initialize_dof_vector(vec); + } + + + + template + types::global_dof_index + LaplaceOperator::m() const + { + Assert(data.get() != nullptr, ExcNotInitialized()); + return data->get_dof_handler().n_dofs(); + } + + + + // This function implements the action of the LaplaceOperator on a vector + // `src` and stores the result in the vector `dst`. When compared to + // step-37, there are four new features present in this call. + // + // The first new feature is the `adjust_ghost_range_if_necessary` function + // mentioned above that is needed to fit the vectors to the layout expected + // by FEEvaluation and FEFaceEvaluation in the cell and face functions. + // + // The second new feature is the fact that we do not implement a + // `vmult_add()` function as we did in step-37 (through the virtual function + // MatrixFreeOperators::Base::vmult_add()), but directly implement a + // `vmult()` functionality. Since both cell and face integrals will sum into + // the destination vector, we must of course zero the vector somewhere. For + // DG elements, we are given two options – one is to use + // FEEvaluation::set_dof_values() instead of + // FEEvaluation::distribute_local_to_global() in the `apply_cell` function + // below. This works because the loop layout in MatrixFree is such that cell + // integrals always touch a given vector entry before the face + // integrals. However, this really only works for fully discontinuous bases + // where every cell has its own degrees of freedom, without any sharing with + // neighboring results. An alternative setup, the one chosen here, is to let + // the MatrixFree::loop() take care of zeroing the vector. This can be + // thought of as simply calling `dst = 0;` somewhere in the code. The + // implementation is more involved for supported vectors such as + // `LinearAlgebra::distributed::Vector`, because we aim to not zero the + // whole vector at once. Doing the zero operation on a small enough pieces + // of a few thousands of vector entries has the advantage that the vector + // entries that get zeroed remain in caches before they are accessed again + // in FEEvaluation::distribute_local_to_global() and + // FEFaceEvaluation::distribute_local_to_global(). Since matrix-free + // operator evaluation is really fast, just zeroing a large vector can + // amount to up to a 25% of the operator evaluation time, and we obviously + // want to avoid this cost. This option of zeroing the vector is also + // available for MatrixFree::cell_loop and for continuous bases, even though + // it was not used in the step-37 or step-48 tutorial programs. + // + // The third new feature is the way we provide the functions to compute on + // cells, inner faces, and boundary faces: The class MatrixFree has a + // function called `loop` that takes three function pointers to the three + // cases, allowing to separate the implementations of different things. As + // explained in step-37, these function pointers can be `std::function` + // objects or member functions of a class. In this case, we use pointers to + // member functions. + // + // The final new feature are the last two arguments of type + // MatrixFree::DataAccessOnFaces that can be given to + // MatrixFree::loop(). This class passes the type of data access for face + // integrals to the MPI data exchange routines + // LinearAlgebra::distributed::Vector::update_ghost_values() and + // LinearAlgebra::distributed::Vector::compress() of the parallel + // vectors. The purpose is to not send all degrees of freedom of a + // neighboring element, but to reduce the amount of data to what is really + // needed for the computations at hand. The data exchange is a real + // bottleneck in particular for high-degree DG methods, therefore a more + // restrictive way of exchange is often beneficial. The enum field + // MatrixFree::DataAccessOnFaces can take the value `none`, which means that + // no face integrals at all are done, which would be analogous to + // MatrixFree::cell_loop(), the value `values` meaning that only shape + // function values (but no derivatives) are used on faces, and the value + // `gradients` when also first derivatives on faces are accessed besides the + // values. A value `unspecified` means that all degrees of freedom will be + // exchanged for the faces that are located at the processor boundaries and + // designated to be worked on at the local processor. + // + // To see how the data can be reduced, think of the case of the nodal + // element FE_DGQ with node points on the element surface, where only + // $(k+1)^{d-1}$ degrees of freedom contribute to the values on a face for + // polynomial degree $k$ in $d$ space dimensions, out of the $(k+1)^d$ + // degrees of freedom of a cell. A similar reduction is also possible for + // the interior penalty method that evaluates values and first derivatives + // on the faces. When using a Hermite-like basis in 1D, only up to two basis + // functions contribute to the value and derivative. The class FE_DGQHermite + // implements a tensor product of this concept, as discussed in the + // introduction. Thus, only $2(k+1)^{d-1}$ degrees of freedom must be + // exchanged for each face, which is a clear win once $k$ gets larger than + // four or five. Note that this reduced exchange of FE_DGQHermite is valid + // also on meshes with curved boundaries, as the derivatives are taken on + // the reference element, whereas the geometry only mixes them on the + // inside. Thus, this is different from the attempt to obtain $C^1$ + // continuity with continuous Hermite-type shape functions where the + // non-Cartesian case changes the picture significantly. Obviously, on + // non-Cartesian meshes the derivatives also include tangential derivatives + // of shape functions beyond the normal derivative, but those only need the + // function values on the element surface, too. Should the element not + // provide any compression, the loop automatically exchanges all entries for + // the affected cells. + + template + void + LaplaceOperator + ::vmult (LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src) const + { + adjust_ghost_range_if_necessary(*data, dst); + adjust_ghost_range_if_necessary(*data, src); + data->loop (&LaplaceOperator::apply_cell, + &LaplaceOperator::apply_face, + &LaplaceOperator::apply_boundary, + this, dst, src, /*zero_dst =*/ true, + MatrixFree::DataAccessOnFaces::gradients, + MatrixFree::DataAccessOnFaces::gradients); + } + + + + // Since the Laplacian is symmetric, the `Tvmult()` (needed by the multigrid + // smoother interfaces) operation is simply forwarded to the `vmult()` case. + + template + void + LaplaceOperator + ::Tvmult (LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src) const + { + vmult(dst, src); + } + + + + // The cell operation is very similar to step-37. We do not use a + // coefficient here, though. The second difference is that we replaced the + // two steps of FEEvaluation::read_dof_values() followed by + // FEEvaluation::evaluate() by a single function call + // FEEvaluation::gather_evaluate() which internally calls the sequence of + // the two individual methods. Likewise, FEEvaluation::integrate_scatter() + // implements the sequence of FEEvaluation::integrate() followed by + // FEEvaluation::distribute_local_to_global(). In this case, these new + // functions merely save two lines of code. However, we use them for the + // analogy with FEFaceEvaluation where they are more important as + // explained below. + + template + void + LaplaceOperator + ::apply_cell (const MatrixFree &data, + LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src, + const std::pair &cell_range) const + { + FEEvaluation phi(data); + for (unsigned int cell=cell_range.first; cell + void + LaplaceOperator + ::apply_face (const MatrixFree &data, + LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src, + const std::pair &face_range) const + { + FEFaceEvaluation phi_inner(data, true); + FEFaceEvaluation phi_outer(data, false); + for (unsigned int face=face_range.first; face inverse_length_normal_to_face + = 0.5 * (std::abs((phi_inner.get_normal_vector(0)* + phi_inner.inverse_jacobian(0))[dim-1]) + + + std::abs((phi_outer.get_normal_vector(0)* + phi_outer.inverse_jacobian(0))[dim-1])); + const VectorizedArray sigma = inverse_length_normal_to_face * get_penalty_factor(); + + // In the loop over the quadrature points, we eventually compute all + // contributions to the interior penalty scheme. According to the + // formulas in the introduction, the value of the test function gets + // multiplied by the difference of the jump in the solution times the + // penalty parameter and the average of the normal derivative in real + // space. Since the two evaluators for interior and exterior sides get + // different signs due to the jump, we pass the result with a + // different sign here. The normal derivative of the test function + // gets multiplied by the negative jump in the solution between the + // interior and exterior side. This term, coined adjoint consistency + // term, must also include the factor of $\frac{1}{2}$ in the code in + // accordance with its relation to the primal consistency term that + // gets the factor of one half due to the average in the test function + // slot. + for (unsigned int q=0; q solution_jump + = (phi_inner.get_value(q)-phi_outer.get_value(q)); + const VectorizedArray average_normal_derivative + = (phi_inner.get_normal_derivative(q) + + phi_outer.get_normal_derivative(q)) * number(0.5); + const VectorizedArray test_by_value + = solution_jump * sigma - average_normal_derivative; + + phi_inner.submit_value(test_by_value, q); + phi_outer.submit_value(-test_by_value, q); + + phi_inner.submit_normal_derivative(-solution_jump * number(0.5), q); + phi_outer.submit_normal_derivative(-solution_jump * number(0.5), q); + } + + // Once we are done with the loop over quadrature points, we can do + // the sum factorization operations for the integration loops on faces + // and sum the results into the result vector, using the + // `integrate_scatter` function. The name `scatter` reflects the + // distribution of the vector data into scattered positions in the + // vector using the same pattern as in `gather_evaluate`. Like before, + // the combined integrate + write operation allows us to reduce the + // data access. + phi_inner.integrate_scatter (true, true, dst); + phi_outer.integrate_scatter (true, true, dst); + } + } + + + + // The boundary face function follows by and large the interior face + // function. The only difference is the fact that we do not have a separate + // FEFaceEvaluation object that provides us with exterior values $u^+$, but + // we must define them from the boundary conditions and interior values + // $u^-$. As explained in the introduction, we use $u^+ = -u^- + 2 + // g_\text{D}$ and $\mathbf{n}^-\cdot \nabla u^+ = \mathbf{n}^-\cdot \nabla + // u^-$ on Dirichlet boundaries and $u^+=u^-$ and $\mathbf{n}^-\cdot \nabla + // u^+ = -\mathbf{n}^-\cdot \nabla u^- + 2 g_\text{N}$ on Neumann + // boundaries. Since this operation implements the homogeneous part, i.e., + // the matrix-vector product, we must neglect the boundary functions + // $g_\text{D}$ and $g_\text{N}$ here, and added them to the right hand side + // in `LaplaceProblem::compute_rhs()`. Note that due to extension of the + // solution $u^-$ to the exterior via $u^+$, we can keep all factors $0.5$ + // the same as in the inner face function, see also the discussion in + // step-39. + // + // There is one catch at this point: The implementation below uses a boolean + // variable `is_dirichlet` to switch between the Dirichlet and the Neumann + // cases. However, we solve a problem where we also want to impose periodic + // boundary conditions on some boundaries, namely along those in the $x$ + // direction. One might wonder how those conditions should be handled + // here. The answer is that MatrixFree automatically treats periodic + // boundaries as what they are technically, namely an inner face where the + // solution values of two adjacent cells meet and must be treated by proper + // numerical fluxes. Thus, all the faces on the periodic boundaries will + // appear in the `apply_face()` function and not in this one. + + template + void + LaplaceOperator + ::apply_boundary (const MatrixFree &data, + LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src, + const std::pair &face_range) const + { + FEFaceEvaluation phi_inner(data, true); + for (unsigned int face=face_range.first; face inverse_length_normal_to_face + = std::abs((phi_inner.get_normal_vector(0)* + phi_inner.inverse_jacobian(0))[dim-1]); + const VectorizedArray sigma = inverse_length_normal_to_face * get_penalty_factor(); + + const bool is_dirichlet = (data.get_boundary_id(face) == 0); + + for (unsigned int q=0; q u_inner = phi_inner.get_value(q); + const VectorizedArray u_outer = is_dirichlet ? -u_inner : u_inner; + const VectorizedArray normal_derivative_inner = phi_inner.get_normal_derivative(q); + const VectorizedArray normal_derivative_outer + = is_dirichlet ? normal_derivative_inner : -normal_derivative_inner; + const VectorizedArray solution_jump = (u_inner - u_outer); + const VectorizedArray average_normal_derivative + = (normal_derivative_inner + normal_derivative_outer) * number(0.5); + const VectorizedArray test_by_value + = solution_jump * sigma - average_normal_derivative; + phi_inner.submit_normal_derivative(-solution_jump * number(0.5), q); + phi_inner.submit_value(test_by_value, q); + } + phi_inner.integrate_scatter (true, true, dst); + } + } + + + + // Next we turn to the preconditioner initialization. As explained in the + // introduction, we want to construct an (approximate) inverse of the cell + // matrices from a product of 1D mass and Laplace matrices. Our first task + // is to compute the 1D matrices, which we do by first creating a 1D finite + // element. Instead of anticipating FE_DGQHermite<1> here, we get the finite + // element's name from DoFHandler, replace the argument (2 or 3) by 1 + // to create a 1D name, and construct the 1D element by using FETools. + + template + void + PreconditionBlockJacobi + ::initialize (const LaplaceOperator &op) + { + data = op.get_matrix_free(); + + std::string name = data->get_dof_handler().get_fe().get_name(); + name.replace(name.find('<')+1, 1, "1"); + std::unique_ptr > fe_1d = FETools::get_fe_by_name<1>(name); + + // As for computing the 1D matrices on the unit element, we simply write + // down what a typical assembly procedure over rows and columns of the + // matrix as well as the quadrature points would do. We select the same + // Laplace matrices once and for all using the coefficients 0.5 for + // interior faces (but possibly scaled differently in different directions + // as a result of the mesh). Thus, we make a slight mistake at the + // Dirichlet boundary (where the correct factor would be 1 for the + // derivative terms and 2 for the penalty term, see step-39) or at the + // Neumann boundary where the factor should be zero. Since we only use + // this class as a smoother inside a multigrid scheme, this error is not + // going to have any significant effect and merely affects smoothing + // quality. + const unsigned int N = fe_degree+1; + FullMatrix laplace_unscaled(N, N); + std::array >,dim> mass_matrices; + std::array >,dim> laplace_matrices; + for (unsigned int d=0; d quadrature(N); + for (unsigned int i=0; ishape_value(i, quadrature.point(q)) * + fe_1d->shape_value(j, quadrature.point(q)) + )* quadrature.weight(q); + sum_laplace += (fe_1d->shape_grad(i, quadrature.point(q))[0] * + fe_1d->shape_grad(j, quadrature.point(q))[0] + )* quadrature.weight(q); + } + for (unsigned int d=0; dshape_value(i, Point<1>()) * + fe_1d->shape_value(j, Point<1>()) * op.get_penalty_factor() + + + 0.5*fe_1d->shape_grad(i, Point<1>())[0] * + fe_1d->shape_value(j, Point<1>()) + + + 0.5*fe_1d->shape_grad(j, Point<1>())[0] * + fe_1d->shape_value(i, Point<1>())); + + sum_laplace += (1.*fe_1d->shape_value(i, Point<1>(1.0)) * + fe_1d->shape_value(j, Point<1>(1.0)) * op.get_penalty_factor() + - + 0.5*fe_1d->shape_grad(i, Point<1>(1.0))[0] * + fe_1d->shape_value(j, Point<1>(1.0)) + - + 0.5*fe_1d->shape_grad(j, Point<1>(1.0))[0] * + fe_1d->shape_value(i, Point<1>(1.0))); + + laplace_unscaled(i,j) = sum_laplace; + } + + // Next, we go through the cells and pass the scaled matrices to + // TensorProductMatrixSymmetricSum to actually compute the generalized + // eigenvalue problem for representing the inverse: Since the matrix + // approximation is constructed as $A\otimes M + M\otimes A$ and the + // weights are constant for each element, we can apply all weights on the + // Laplace matrix and simply keep the mass matrices unscaled. In the loop + // over cells, we want to make use of the geometry compression provided by + // the MatrixFree class and check if the current geometry is the same as + // on the last cell batch, in which case there is nothing to do. This + // compression can be accessed by + // FEEvaluation::get_mapping_data_index_offset() once `reinit()` has been + // called. + // + // Once we have accessed the inverse Jacobian through the FEEvaluation + // access function (we take the one for the zeroth quadrature point as + // they should be the same on all quadrature points for a Cartesian cell), + // we check that it is diagonal and then extract the determinant of the + // original Jacobian, i.e., the inverse of the determinant of the inverse + // Jacobian, and set the weight as $\text{det}(J) / h_d^2$ according to + // the 1D Laplacian times $d-1$ copies of the mass matrix. + cell_matrices.clear(); + FEEvaluation phi(*data); + unsigned int old_mapping_data_index = numbers::invalid_unsigned_int; + for (unsigned int cell=0; celln_cell_batches(); ++cell) + { + phi.reinit(cell); + + if (phi.get_mapping_data_index_offset() == + old_mapping_data_index) + continue; + + Tensor<2,dim,VectorizedArray> inverse_jacobian + = phi.inverse_jacobian(0); + + for (unsigned int d=0; d::n_array_elements; ++v) + AssertThrow(inverse_jacobian[d][e][v] == 0., + ExcNotImplemented()); + + VectorizedArray jacobian_determinant = inverse_jacobian[0][0]; + for (unsigned int e=1; e scaling_factor + = inverse_jacobian[d][d] * inverse_jacobian[d][d] * jacobian_determinant; + + // Once we know the factor by which we should scale the Laplace + // matrix, we apply this weight to the unscaled DG Laplace matrix + // and send the array to the class TensorProductMatrixSymmetricSum + // for computing the generalized eigenvalue problem mentioned in + // the introduction. + + for (unsigned int i=0; i + void + PreconditionBlockJacobi + ::vmult (LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src) const + { + adjust_ghost_range_if_necessary(*data, dst); + adjust_ghost_range_if_necessary(*data, src); + + FEEvaluation phi(*data); + for (unsigned int cell=0; celln_cell_batches(); ++cell) + { + phi.reinit(cell); + phi.read_dof_values(src); + cell_matrices[phi.get_mapping_data_index_offset()] + .apply_inverse(ArrayView> + (phi.begin_dof_values(), phi.dofs_per_cell), + ArrayView> + (phi.begin_dof_values(), phi.dofs_per_cell)); + phi.set_dof_values(dst); + } + } + + + + // The definition of the LaplaceProblem class is very similar to + // step-37. One difference is the fact that we add the element degree as a + // template argument to the class, which would allow us to more easily + // include more than one degree in the same program by creating different + // instances in the `main()` function. The second difference is the + // selection of the element, FE_DGQHermite, which is specialized for this + // kind of equations. + + template + class LaplaceProblem + { + public: + LaplaceProblem (); + void run (); + + private: + void setup_system (); + void compute_rhs (); + void solve (); + void analyze_results () const; + +#ifdef DEAL_II_WITH_P4EST + parallel::distributed::Triangulation triangulation; +#else + Triangulation triangulation; +#endif + + FE_DGQHermite fe; + DoFHandler dof_handler; + + typedef LaplaceOperator SystemMatrixType; + SystemMatrixType system_matrix; + + typedef LaplaceOperator LevelMatrixType; + MGLevelObject mg_matrices; + + LinearAlgebra::distributed::Vector solution; + LinearAlgebra::distributed::Vector system_rhs; + + double setup_time; + ConditionalOStream pcout; + ConditionalOStream time_details; + }; + + + + template + LaplaceProblem::LaplaceProblem () + : +#ifdef DEAL_II_WITH_P4EST + triangulation(MPI_COMM_WORLD, + Triangulation::limit_level_difference_at_vertices, + parallel::distributed::Triangulation::construct_multigrid_hierarchy), +#else + triangulation (Triangulation::limit_level_difference_at_vertices), +#endif + fe (fe_degree), + dof_handler (triangulation), + setup_time(0.), + pcout (std::cout, Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0), + time_details (std::cout, false && + Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) + {} + + + + // The setup function differs in two aspects from step-37. The first is that + // we do not need to interpolate any constraints for the discontinuous + // ansatz space, and simply pass a dummy ConstraintMatrix object into + // Matrixfree::reinit(). The second change arises because we need to tell + // MatrixFree to also initialize the data structures for faces. We do this + // by setting update flags for the inner and boundary faces, + // respectively. On the boundary faces, we need both the function values, + // their gradients, JxW values (for integration), the normal vectors, and + // quadrature points (for the evaluation of the boundary conditions), + // whereas we only need shape function values, gradients, JxW values, and + // normal vectors for interior faces. The face data structures in MatrixFree + // are always built as soon as one of `mapping_update_flags_inner_faces` or + // `mapping_update_flags_boundary_faces` are different from the default + // value `update_default` of UpdateFlags. + + template + void LaplaceProblem::setup_system () + { + Timer time; + setup_time = 0; + + system_matrix.clear(); + mg_matrices.clear_elements(); + + dof_handler.distribute_dofs (fe); + dof_handler.distribute_mg_dofs (); + + pcout << "Number of degrees of freedom: " + << dof_handler.n_dofs() + << std::endl; + + setup_time += time.wall_time(); + time_details << "Distribute DoFs " + << time.wall_time() << " s" << std::endl; + time.restart(); + + ConstraintMatrix dummy; + dummy.close(); + + { + typename MatrixFree::AdditionalData additional_data; + additional_data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + additional_data.mapping_update_flags = (update_gradients | update_JxW_values + | update_quadrature_points); + additional_data.mapping_update_flags_inner_faces = (update_gradients | + update_JxW_values | + update_normal_vectors); + additional_data.mapping_update_flags_boundary_faces = (update_gradients | + update_JxW_values | + update_normal_vectors | + update_quadrature_points); + std::shared_ptr > + system_mf_storage(new MatrixFree()); + system_mf_storage->reinit (dof_handler, dummy, QGauss<1>(fe.degree+1), + additional_data); + system_matrix.initialize (system_mf_storage); + } + + system_matrix.initialize_dof_vector(solution); + system_matrix.initialize_dof_vector(system_rhs); + + setup_time += time.wall_time(); + time_details << "Setup matrix-free system " + << time.wall_time() << " s" << std::endl; + time.restart(); + + const unsigned int nlevels = triangulation.n_global_levels(); + mg_matrices.resize(0, nlevels-1); + + for (unsigned int level=0; level::AdditionalData additional_data; + additional_data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + additional_data.mapping_update_flags = (update_gradients | update_JxW_values); + additional_data.mapping_update_flags_inner_faces = (update_gradients | update_JxW_values); + additional_data.mapping_update_flags_boundary_faces = (update_gradients | update_JxW_values); + additional_data.level_mg_handler = level; + std::shared_ptr > + mg_mf_storage_level(new MatrixFree()); + mg_mf_storage_level->reinit(dof_handler, dummy, + QGauss<1>(fe.degree+1), additional_data); + + mg_matrices[level].initialize(mg_mf_storage_level); + } + setup_time += time.wall_time(); + time_details << "Setup matrix-free levels " + << time.wall_time() << " s" << std::endl; + } + + + + // The computation of the right hand side is a bit more complicated than in + // step-37. The cell term now consists of the negative Laplacian of the + // analytical solution, `RightHandSide`, for which we need to first split up + // the Point of VectorizedArray fields, i.e., a batch of points, into a + // single point by evaluating all lanes in the VectorizedArray + // separately. Remember that the number of lanes depends on the hardware; it + // could be 1 for systems that do not offer vectorization (or where deal.II + // does not have intrinsics), but it could also be 8 or 16 on AVX-512 of + // recent Intel architectures. + template + void + LaplaceProblem::compute_rhs () + { + Timer time; + system_rhs = 0; + const MatrixFree &data = *system_matrix.get_matrix_free(); + FEEvaluation phi(data); + RightHandSide rhs_func; + Solution exact_solution; + for (unsigned int cell=0; cell rhs_val = VectorizedArray(); + Point > point_batch = phi.quadrature_point(q); + for (unsigned int v=0; v::n_array_elements; ++v) + { + Point single_point; + for (unsigned int d=0; d phi_face(data, true); + for (unsigned int face=data.n_inner_face_batches(); + face inverse_length_normal_to_face + = std::abs((phi_face.get_normal_vector(0)*phi_face.inverse_jacobian(0))[dim-1]); + const VectorizedArray sigma = inverse_length_normal_to_face * system_matrix.get_penalty_factor(); + + for (unsigned int q=0; q test_value = VectorizedArray(), + test_normal_derivative = VectorizedArray(); + Point > point_batch = phi_face.quadrature_point(q); + + for (unsigned int v=0; v::n_array_elements; ++v) + { + Point single_point; + for (unsigned int d=0; d normal; + for (unsigned int d=0; d + void LaplaceProblem::solve () + { + Timer time; + MGTransferMatrixFree mg_transfer; + mg_transfer.build(dof_handler); + setup_time += time.wall_time(); + time_details << "MG build transfer time " << time.wall_time() << " s\n"; + time.restart(); + + typedef PreconditionChebyshev, + PreconditionBlockJacobi > SmootherType; + mg::SmootherRelaxation > + mg_smoother; + MGLevelObject smoother_data; + smoother_data.resize(0, triangulation.n_global_levels()-1); + for (unsigned int level = 0; level 0) + { + smoother_data[level].smoothing_range = 15.; + smoother_data[level].degree = 2; + smoother_data[level].eig_cg_n_iterations = 10; + } + else + { + smoother_data[0].smoothing_range = 2e-2; + smoother_data[0].degree = numbers::invalid_unsigned_int; + smoother_data[0].eig_cg_n_iterations = mg_matrices[0].m(); + } + smoother_data[level].preconditioner.reset(new PreconditionBlockJacobi()); + smoother_data[level].preconditioner->initialize(mg_matrices[level]); + } + mg_smoother.initialize(mg_matrices, smoother_data); + + MGCoarseGridApplySmoother > mg_coarse; + mg_coarse.initialize(mg_smoother); + + mg::Matrix > mg_matrix(mg_matrices); + + Multigrid > mg(mg_matrix, + mg_coarse, + mg_transfer, + mg_smoother, + mg_smoother); + + PreconditionMG, + MGTransferMatrixFree > + preconditioner(dof_handler, mg, mg_transfer); + + SolverControl solver_control (10000, 1e-12*system_rhs.l2_norm()); + SolverCG > cg (solver_control); + setup_time += time.wall_time(); + time_details << "MG build smoother time " << time.wall_time() << "s\n"; + pcout << "Total setup time " << setup_time + << " s\n"; + + time.reset(); + time.start(); + cg.solve (system_matrix, solution, system_rhs, preconditioner); + + pcout << "Time solve (" + << solver_control.last_step() + << " iterations) " + << time.wall_time() << " s" + << std::endl; + } + + + + // Since we have solved a problem with analytic solution, we want to verify + // the correctness of our implementation by computing the L2 error of the + // numerical result against the analytic solution. + + template + void LaplaceProblem::analyze_results () const + { + Vector error_per_cell(triangulation.n_active_cells()); + VectorTools::integrate_difference (dof_handler, + solution, + Solution(), + error_per_cell, + QGauss(fe.degree+2), + VectorTools::L2_norm); + pcout << "Verification via L2 error: " + << std::sqrt(Utilities::MPI::sum(error_per_cell.norm_sqr(), MPI_COMM_WORLD)) + << std::endl; + } + + + + // The `run()` function sets up the initial grid and then runs the multigrid + // program in the usual way. As a domain, we choose a rectangle with + // periodic boundary conditions in the $x$-direction, a Dirichlet condition + // on the front face in $y$ direction (i.e., the face with index number 2, + // with boundary id equal to 0), and Neumann conditions on the back face as + // well as the two faces in $z$ direction for the 3D case (with boundary id + // equal to 1). The extent of the domain is a bit different in the $x$ + // direction (where we want to achieve a periodic solution given the + // definition of `Solution`) as compared to the $y$ and $z$ directions. + + template + void LaplaceProblem::run () + { + const unsigned int n_ranks = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); + pcout << "Running with " << n_ranks + << " MPI process" << (n_ranks > 1 ? "es" : "") + << ", element " << fe.get_name() + << std::endl << std::endl; + for (unsigned int cycle=0; cycle<9-dim; ++cycle) + { + pcout << "Cycle " << cycle << std::endl; + + if (cycle == 0) + { + Point upper_right; + upper_right[0] = 2.5; + for (unsigned int d=1; d(), upper_right); + triangulation.begin_active()->face(0)->set_boundary_id(10); + triangulation.begin_active()->face(1)->set_boundary_id(11); + triangulation.begin_active()->face(2)->set_boundary_id(0); + for (unsigned int f=3; f::faces_per_cell; ++f) + triangulation.begin_active()->face(f)->set_boundary_id(1); + + std::vector::cell_iterator> > + periodic_faces; + GridTools::collect_periodic_faces(triangulation, 10, 11, 0, periodic_faces); + triangulation.add_periodicity(periodic_faces); + + triangulation.refine_global (6-2*dim); + } + triangulation.refine_global (1); + setup_system (); + compute_rhs (); + solve (); + analyze_results (); + pcout << std::endl; + }; + } +} + + + +// There is nothing unexpected in the `main()` function. We call `MPI_Init()` +// through the `MPI_InitFinalize` class, pass on the two parameters on the +// dimension and the degree set at the top of the file, and run the Laplace +// problem. + +int main (int argc, char *argv[]) +{ + try + { + using namespace Step59; + + Utilities::MPI::MPI_InitFinalize mpi_init(argc, argv, 1); + + LaplaceProblem laplace_problem; + laplace_problem.run (); + } + catch (std::exception &exc) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + catch (...) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + + return 0; +} -- 2.39.5