From 1037b386f6a14853f132e9477eb6bf4ff1e4b251 Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Mon, 12 Jun 2023 16:41:37 +0200 Subject: [PATCH] Extend documentation of the results section of step-59 --- doc/doxygen/references.bib | 25 +++++++++++ examples/step-59/doc/intro.dox | 9 ++-- examples/step-59/doc/results.dox | 48 ++++++++++++++++++--- include/deal.II/lac/tensor_product_matrix.h | 5 +-- 4 files changed, 71 insertions(+), 16 deletions(-) diff --git a/doc/doxygen/references.bib b/doc/doxygen/references.bib index 78f904cd4d..85cb424cc7 100644 --- a/doc/doxygen/references.bib +++ b/doc/doxygen/references.bib @@ -2181,3 +2181,28 @@ year={1979}, publisher={Elsevier} } + +@article{fehn2020hybrid, + doi = {10.1016/j.jcp.2020.109538}, + url = {https://doi.org/10.1016/j.jcp.2020.109538}, + year = {2020}, + publisher = {Elsevier {BV}}, + volume = {415}, + pages = {109538}, + author = {N. Fehn and P. Munch and W. A. Wall and M. Kronbichler}, + title = {Hybrid multigrid methods for high-order discontinuous {G}alerkin discretizations}, + journal = {Journal of Computational Physics} +} + +@article{antonietti2016uniform, + doi = {10.1007/s10915-016-0259-9}, + url = {https://doi.org/10.1007/s10915-016-0259-9}, + year = {2016}, + publisher = {Springer Science and Business Media {LLC}}, + volume = {70}, + number = {2}, + pages = {608--630}, + author = {P. F. Antonietti and M. Sarti and M. Verani and L. T. Zikatanov}, + title = {A Uniform Additive {S}chwarz Preconditioner for High-Order Discontinuous {G}alerkin Approximations of Elliptic Problems}, + journal = {Journal of Scientific Computing} +} diff --git a/examples/step-59/doc/intro.dox b/examples/step-59/doc/intro.dox index e5523b22bb..f78b7930f5 100644 --- a/examples/step-59/doc/intro.dox +++ b/examples/step-59/doc/intro.dox @@ -244,7 +244,7 @@ only holds for Cartesian cells and constant coefficients, which is a pretty narrow case, we refrain from pursuing this idea. Interestingly, the exact inverse of the matrix $L$ can be found through tensor -products due to a method introduced by Lynch et al. @cite Lynch1964 from 1964, +products due to a method introduced by Lynch et al. @cite Lynch1964 in 1964, @f{align*}{ L^{-1} &= S_1 \otimes S_0 (\Lambda_1 \otimes I + I \otimes \Lambda_0)^{-1} S_1^\mathrm T \otimes S_0^\mathrm T, @@ -271,9 +271,8 @@ edges of different refinement, as explained in step-39. One thing we do, however, is to still wrap our block-Jacobi preconditioner inside PreconditionChebyshev. That class relieves us from finding an appropriate relaxation parameter (which would be around 0.7 in 2D and 0.5 in 3D for the -block-Jacobi smoother), and often increases smoothing efficiency a bit over -plain Jacobi smoothing in that it enables lower the time to solution when -setting the degree of the Chebyshev polynomial to one or two. +block-Jacobi smoother), and often increases smoothing efficiency somewhat over +plain Jacobi smoothing, especially when using several iterations. Note that the block-Jacobi smoother has an additional benefit: The fast diagonalization method can also be interpreted as a change from the @@ -282,6 +281,6 @@ Laplacian is diagonal. Thus, it cancels the effect of the basis, and we get the same iteration counts irrespective of whether we use FE_DGQHermite or FE_DGQ. This is in contrast to using the PreconditionChebyshev class with only the diagonal (a point-Jacobi scheme), where FE_DGQ and FE_DGQHermite do indeed -behave differently and FE_DGQ needs 2-5 less iterations than FE_DGQHermite, +behave differently and FE_DGQ needs fewer iterations than FE_DGQHermite, despite the modification made to the Hermite-like shape functions to ensure a good conditioning. diff --git a/examples/step-59/doc/results.dox b/examples/step-59/doc/results.dox index d85f116caa..cfc396dff1 100644 --- a/examples/step-59/doc/results.dox +++ b/examples/step-59/doc/results.dox @@ -333,12 +333,46 @@ the effectiveness of these basis functions.

Possibilities for extension

-As mentioned in the introduction, the fast diagonalization method is tied to a -Cartesian mesh with constant coefficients. If we wanted to solve -variable-coefficient problems, we would need to invest a bit more time in the -design of the smoother parameters by selecting proper generalizations (e.g., -approximating the inverse on the nearest box-shaped element). +As mentioned in the introduction, the fast diagonalization method as realized +here is tied to a Cartesian mesh with constant coefficients. When dealing with +meshes that contain deformed cells or with variable coefficients, it is common +to determine a nearby Cartesian mesh cell as an approximation. This can be +done with the class TensorProductMatrixSymmetricSumCollection. Here, one can +insert cell matrices similarly to the PreconditionBlockJacobi::initialize() +function of this tutorial program. The benefit of the collection class is that +cells on which the coefficient of the PDE has the same value can re-use the +same Laplacian matrix, which reduces the memory consumption for the inverse +matrices. As compared to the algorithm implemented in this tutorial program, +one would define the length scales as the distances between opposing +faces. For continuous elements, the code project Cache-optimized and +low-overhead implementations of multigrid smoothers for high-order FEM +computations presents the computation for continuous elements. There is +currently no infrastructure in deal.II to automatically generate the 1D +matrices for discontinuous elements with SIP-DG discretization, as opposed to +continuous elements, where we provide +TensorProductMatrixCreator::create_laplace_tensor_product_matrix(). Another way of extending the program would be to include support for adaptive -meshes, for which interface operations at edges of different refinement -level become necessary, as discussed in step-39. +meshes. While the classical approach of defining interface operations at edges +of different refinement level, as discussed in step-39, is one possibility, +for Poisson-type problems another option is typically more beneficial. Using +the class MGTransferGlobalCoarsening, which is explained in the step-75 +tutorial program, one can deal with meshes of hanging nodes on all levels. An +algorithmic improvement can be obtained by combining the discontinuous +function space with the auxiliary continuous finite element space of the same +polynomial degree. This idea, introduced by Antonietti et al. +@cite antonietti2016uniform in 2016, allows making the multigrid convergence +independent of the penalty parameter. As demonstrated by Fehn et al. +@cite fehn2020hybrid, this also gives considerably lower iteration counts than +a multigrid solver directly working on levels with discontinuous function +spaces. The latter work also proposes p-multigrid techniques and combination +with algebraic multigrid coarse spaces as a means to efficiently solve Poisson +problems with high-order discontinuous Galerkin discretizations on complicated +geometries, representing the current state-of-the-art for simple Poisson-type +problems. The class MGTransferGlobalCoarsening provides features for each of +these three coarsening variants, the discontinuous-continuous auxiliary +function concept, p-multigrid, and traditional h-multigrid. The main +ingredient is to define an appropriate MGTwoLevelTransfer object and call +MGTwoLevelTransfer::reinit_geometric_transfer() or +MGTwoLevelTranfer::reinit_polynomial_transfer(), respectively. diff --git a/include/deal.II/lac/tensor_product_matrix.h b/include/deal.II/lac/tensor_product_matrix.h index 57745af67d..a7e7ef32d2 100644 --- a/include/deal.II/lac/tensor_product_matrix.h +++ b/include/deal.II/lac/tensor_product_matrix.h @@ -64,10 +64,7 @@ class FullMatrix; * $\text{size}(M)^{3d}$ for setting up the inverse of $L$. * * Interestingly, the exact inverse of the matrix $L$ can be found through - * tensor products due to an article by R. E. Lynch, J. R. Rice, - * D. H. Thomas, Direct solution of partial difference equations by tensor - * product methods, Numerische Mathematik 6, 185-199 from 1964, + * tensor products due to 1964's work by Lynch et al. @cite Lynch1964, * @f{align*}{ * L^{-1} &= S_1 \otimes S_0 (\Lambda_1 \otimes I + I \otimes \Lambda_0)^{-1} * S_1^\mathrm T \otimes S_0^\mathrm T, -- 2.39.5