From 302c9f7c1feafaca84b86af916bf3f648cd27a0e Mon Sep 17 00:00:00 2001 From: Marc Fehling Date: Tue, 11 May 2021 19:37:59 -0600 Subject: [PATCH] Tutorial for parallel hp-FEM. --- doc/doxygen/references.bib | 43 + doc/doxygen/tutorial/tutorial.h.in | 22 +- doc/news/changes/major/20210415step-75 | 4 + examples/step-27/doc/results.dox | 12 +- examples/step-75/CMakeLists.txt | 55 + examples/step-75/doc/builds-on | 1 + examples/step-75/doc/intro.dox | 272 ++++ examples/step-75/doc/kind | 1 + examples/step-75/doc/results.dox | 178 +++ examples/step-75/doc/tooltip | 2 + examples/step-75/step-75.cc | 1537 ++++++++++++++++++++ include/deal.II/base/geometric_utilities.h | 2 + include/deal.II/distributed/cell_weights.h | 2 + include/deal.II/distributed/tria.h | 2 + include/deal.II/grid/grid_generator.h | 2 + 15 files changed, 2126 insertions(+), 9 deletions(-) create mode 100644 doc/news/changes/major/20210415step-75 create mode 100644 examples/step-75/CMakeLists.txt create mode 100644 examples/step-75/doc/builds-on create mode 100644 examples/step-75/doc/intro.dox create mode 100644 examples/step-75/doc/kind create mode 100644 examples/step-75/doc/results.dox create mode 100644 examples/step-75/doc/tooltip create mode 100644 examples/step-75/step-75.cc diff --git a/doc/doxygen/references.bib b/doc/doxygen/references.bib index 4af204cd8e..6995d6dc59 100644 --- a/doc/doxygen/references.bib +++ b/doc/doxygen/references.bib @@ -1050,6 +1050,49 @@ year = {2008}, } +% ------------------------------------ +% Step 75 +% ------------------------------------ + +@book{brenner2008, + title = {The {{Mathematical Theory}} of {{Finite Element Methods}}}, + author = {Brenner, Susanne and Scott, Ridgway}, + year = {2008}, + edition = {3}, + publisher = {{Springer-Verlag}}, + location = {{New York}}, + url = {https://www.springer.com/de/book/9780387759333}, + isbn = {978-0-387-75933-3}, + series = {Texts in {{Applied Mathematics}}} +} + +@article{mitchell2010hpmg, + title = {The \textit{hp}‐multigrid method applied to \textit{hp}‐adaptive refinement of triangular grids}, + author = {Mitchell, William F.}, + year = {2010}, + month = {Apr}, + journal = {Numerical Linear Algebra with Applications}, + volume = {17}, + pages = {211--228}, + issn = {1070-5325}, + doi = {10.1002/nla.700}, + number = {2-3} +} + +@article{mitchell2014hp, + title = {A {{Comparison}} of \textit{hp}-{{Adaptive Strategies}} for {{Elliptic Partial Differential Equations}}}, + author = {Mitchell, William F. and McClain, Marjorie A.}, + year = {2014}, + month = {Oct}, + journal = {ACM Trans. Math. Softw.}, + volume = {41}, + pages = {2/1-39}, + issn = {0098-3500}, + doi = {10.1145/2629459}, + number = {1} +} + + % ------------------------------------ % Step 76 % ------------------------------------ diff --git a/doc/doxygen/tutorial/tutorial.h.in b/doc/doxygen/tutorial/tutorial.h.in index 8e0b09a18a..28c0e8bc4b 100644 --- a/doc/doxygen/tutorial/tutorial.h.in +++ b/doc/doxygen/tutorial/tutorial.h.in @@ -622,6 +622,13 @@ * * * + * step-75 + * Solving the Laplace equation on hp-adaptive meshes with MatrixFree + * methods on thousands of processors. + *
Keywords: parallel::distributed::Triangulation, hp::Refinement, MatrixFree + * + * + * * step-76 * Like step-67, but for demonstrating MPI-3.0 shared-memory features. *
Keywords: MatrixFree, MatrixFree::cell_loop() @@ -740,7 +747,8 @@ * step-50, * step-55, * step-71, - * step-72 + * step-72, + * step-75 * * * @@ -798,7 +806,8 @@ * step-59, * step-67, * step-69, - * step-70 + * step-70, + * step-75 * * * @@ -888,7 +897,8 @@ * * * step-27, - * step-46 + * step-46, + * step-75 * * * @@ -1037,7 +1047,8 @@ * step-50, * step-56, * step-59, - * step-63 + * step-63, + * step-75 * * * @@ -1053,7 +1064,8 @@ * step-42, * step-50, * step-55, - * step-59 + * step-59, + * step-75 * * * diff --git a/doc/news/changes/major/20210415step-75 b/doc/news/changes/major/20210415step-75 new file mode 100644 index 0000000000..8b036135be --- /dev/null +++ b/doc/news/changes/major/20210415step-75 @@ -0,0 +1,4 @@ +New: The step-75 tutorial program shows how to solve a simple Laplace +equation in parallel with hp-adaptive and MatrixFree methods. +
+(Marc Fehling, Peter Munch, Wolfgang Bangerth, 2021/04/15) diff --git a/examples/step-27/doc/results.dox b/examples/step-27/doc/results.dox index 8d43f29d73..2285c905ad 100644 --- a/examples/step-27/doc/results.dox +++ b/examples/step-27/doc/results.dox @@ -247,10 +247,10 @@ adaptation type, of which a few are already implemented in deal.II: The transition from the Fourier strategy to the Legendre one is quite simple: You just need to change the series expansion class and the corresponding smoothness estimation function to be part of the proper namespaces - FESeries::Legendre and SmoothnessEstimator::Legendre. For the theoretical - background of this strategy, consult the general documentation of the - SmoothnessEstimator::Legendre namespace, as well as @cite mavriplis1994hp , - @cite eibner2007hp and @cite davydov2017hp. + FESeries::Legendre and SmoothnessEstimator::Legendre. This strategy is used + in step-75. For the theoretical background of this strategy, consult the + general documentation of the SmoothnessEstimator::Legendre namespace, as well + as @cite mavriplis1994hp , @cite eibner2007hp and @cite davydov2017hp .
  • Refinement history: The last strategy is quite different from the other two. In theory, we know how the error will converge @@ -295,3 +295,7 @@ parallel::distributed::Triangulation classes. If you feel eager to try it, we recommend reading step-18 for the former and step-40 for the latter case first for further background information on the topic, and then come back to this tutorial to try out your newly acquired skills. + +We go one step further in step-75: Here, we combine hp-adapative and +MatrixFree methods in combination with +parallel::distributed::Triangulation objects. diff --git a/examples/step-75/CMakeLists.txt b/examples/step-75/CMakeLists.txt new file mode 100644 index 0000000000..213e541684 --- /dev/null +++ b/examples/step-75/CMakeLists.txt @@ -0,0 +1,55 @@ +## +# CMake script for the step-75 tutorial program: +## + +# Set the name of the project and target: +SET(TARGET "step-75") + +# Declare all source files the target consists of. Here, this is only +# the one step-X.cc file, but as you expand your project you may wish +# to add other source files as well. If your project becomes much larger, +# you may want to either replace the following statement by something like +# FILE(GLOB_RECURSE TARGET_SRC "source/*.cc") +# FILE(GLOB_RECURSE TARGET_INC "include/*.h") +# SET(TARGET_SRC ${TARGET_SRC} ${TARGET_INC}) +# or switch altogether to the large project CMakeLists.txt file discussed +# in the "CMake in user projects" page accessible from the "User info" +# page of the documentation. +SET(TARGET_SRC + ${TARGET}.cc + ) + +# Usually, you will not need to modify anything beyond this point... + +CMAKE_MINIMUM_REQUIRED(VERSION 3.1.0) + +FIND_PACKAGE(deal.II 9.3.0 + HINTS ${deal.II_DIR} ${DEAL_II_DIR} ../ ../../ $ENV{DEAL_II_DIR} + ) +IF(NOT ${deal.II_FOUND}) + MESSAGE(FATAL_ERROR "\n" + "*** Could not locate a (sufficiently recent) version of deal.II. ***\n\n" + "You may want to either pass a flag -DDEAL_II_DIR=/path/to/deal.II to cmake\n" + "or set an environment variable \"DEAL_II_DIR\" that contains this path." + ) +ENDIF() + +# +# Are all dependencies fulfilled? +# +IF(NOT DEAL_II_WITH_P4EST OR NOT DEAL_II_WITH_TRILINOS) # keep in one line + MESSAGE(FATAL_ERROR " +Error! This tutorial requires a deal.II library that was configured with the following options: + DEAL_II_WITH_P4EST = ON + DEAL_II_WITH_TRILINOS = ON +However, the deal.II library found at ${DEAL_II_PATH} was configured with these options + DEAL_II_WITH_P4EST = ${DEAL_II_WITH_P4EST} + DEAL_II_WITH_TRILINOS = ${DEAL_II_WITH_TRILINOS} +which conflict with the requirements." + ) +ENDIF() + +DEAL_II_INITIALIZE_CACHED_VARIABLES() +SET(CLEAN_UP_FILES *.log *.gmv *.gnuplot *.gpl *.eps *.pov *.vtk *.ucd *.d2 *.vtu *.pvtu) +PROJECT(${TARGET}) +DEAL_II_INVOKE_AUTOPILOT() diff --git a/examples/step-75/doc/builds-on b/examples/step-75/doc/builds-on new file mode 100644 index 0000000000..64d53f9f25 --- /dev/null +++ b/examples/step-75/doc/builds-on @@ -0,0 +1 @@ +step-27 step-37 step-40 diff --git a/examples/step-75/doc/intro.dox b/examples/step-75/doc/intro.dox new file mode 100644 index 0000000000..b45d906520 --- /dev/null +++ b/examples/step-75/doc/intro.dox @@ -0,0 +1,272 @@ +
    + +This program was contributed by Marc Fehling, Peter Munch and +Wolfgang Bangerth. +
    +This material is based upon work partly supported by the National +Science Foundation under Award No. DMS-1821210, EAR-1550901, and +OAC-1835673. Any opinions, findings, and conclusions or recommendations +expressed in this publication are those of the authors and do not +necessarily reflect the views of the National Science Foundation. +
    +Peter Munch would like to thank Timo Heister, Martin Kronbichler, and +Laura Prieto Saavedra for many very interesting discussions. +
    + + +@note As a prerequisite of this program, you need to have the p4est +library and the Trilinos library installed. The installation of deal.II +together with these additional libraries is described in the README file. + + + + +

    Introduction

    + +In the finite element context, more degrees of freedom usually yield a +more accurate solution but also require more computational effort. + +Throughout previous tutorials, we found ways to effectively distribute +degrees of freedom by aligning the grid resolution locally with the +complexity of the solution (adaptive mesh refinement, step-6). This +approach is particularly effective if we do not only adapt the grid +alone, but also locally adjust the polynomial degree of the associated +finite element on each cell (hp-adaptation, step-27). + +In addition, assigning more processes to run your program simultaneously +helps to tackle the computational workload in lesser time. Depending on +the hardware architecture of your machine, your program must either be +prepared for the case that all processes have access to the same memory +(shared memory, step-18), or that processes are hosted on several +independent nodes (distributed memory, step-40). + +In the high-performance computing segment, memory access turns out to be +the current bottleneck on supercomputers. We can avoid storing matrices +altogether by computing the effect of matrix-vector products on the fly +with MatrixFree methods (step-37). They can be used for geometric +multigrid methods (step-50) and also for polynomial multigrid methods to +speed solving the system of equation tremendously. + +This tutorial combines all of these particularities and presents a +state-of-the-art way how to solve a simple Laplace problem: utilizing +both hp-adaptation and matrix-free hybrid multigrid methods on machines +with distributed memory. + + +

    Load balancing

    + +For parallel applications in FEM, we partition the grid into +subdomains (aka domain decomposition), which are assigned to processes. +This partitioning happens on active cells in deal.II as demonstrated in +step-40. There, each cell has the same finite element and the same +number of degrees of freedom assigned, and approximately the same +workload. To balance the workload among all processes, we have to +balance the number of cells on all participating processes. + +With hp-adaptive methods, this is no longer the case: the finite element +type may vary from cell to cell and consequently also the number of +degrees of freedom. Matching the number of cells does not yield a +balanced workload. In the matrix-free context, the workload can be +assumed to be proportional the number of degrees of freedom of each +process, since in the best case only the source and the destination +vector have to be loaded. + +One could balance the workload by assigning weights to every cell which +are proportional to the number of degrees of freedom, and balance the +sum of all weights between all processes. Assigning individual weights +to each cell can be realized with the class parallel::CellWeights which +we will use later. + + +

    hp-decision indicators

    + +With hp-adaptive methods, we not only have to decide which cells we want +to refine or coarsen, but we also have the choice how we want to do +that: either by adjusting the grid resolution or the polynomial degree +of the finite element. + +We will again base the decision on which cells to adapt on (a +posteriori) computed error estimates of the current solution, e.g., +using the KellyErrorEstimator. We will similarly decide how to adapt +with (a posteriori) computed smoothness estimates: large polynomial +degrees work best on smooth parts of the solution while fine grid +resolutions are favorable on irregular parts. In step-27, we presented a +way to calculate smoothness estimates based on the decay of Fourier +coefficients. Let us take here the opportunity and present an +alternative that follows the same idea, but with Legendre coefficients. + +We will briefly present the idea of this new technique, but limit its +description to 1D for simplicity. Suppose $u_\text{hp}(x)$ is a finite +element function defined on a cell $K$ as +@f[ +u_\text{hp}(x) = \sum c_i \varphi_i(x) +@f] +where each $\varphi_i(x)$ is a shape function. +We can equivalently represent $u_\text{hp}(x)$ in the basis of Legendre +polynomials $P_k$ as +@f[ +u_\text{hp}(x) = \sum l_k P_k(x). +@f] +Our goal is to obtain a mapping between the finite element coefficients +$c_i$ and the Legendre coefficients $l_k$. We will accomplish this by +writing the problem as a $L^2$-projection of $u_\text{hp}(x)$ onto the +Legendre basis. Each coefficient $l_k$ can be calculated via +@f[ +l_k = \int_K u_\text{hp}(x) P_k(x) dx. +@f] +By construction, the Legendre polynomials are orthogonal under the +$L^2$-inner product on $K$. Additionally, we assume that they have been +normalized, so their inner products can be written as +@f[ +\int_K P_i(x) P_j(x) dx = \det(J_K) \, \delta_{ij} +@f] +where $\delta_{ij}$ is the Kronecker delta, and $J_K$ is the Jacobian of +the mapping from $\hat{K}$ to $K$, which (in this tutorial) is assumed +to be constant (i.e., the mapping must be affine). + +Hence, combining all these assumptions, the projection matrix for +expressing $u_\text{hp}(x)$ in the Legendre basis is just $\det(J_K) \, +\mathbb{I}$ -- that is, $\det(J_K)$ times the identity matrix. Let $F_K$ +be the Mapping from $K$ to its reference cell $\hat{K}$. The entries in +the right-hand side in the projection system are, therefore, +@f[ +\int_K u_\text{hp}(x) P_k(x) dx += \det(J_K) \int_\hat{K} u_\text{hp}(F_K(\hat{x})) P_k(F_K(\hat{x})) d\hat{x}. +@f] +Recalling the shape function representation of $u_\text{hp}(x)$, we can +write this as $\det(J_K) \, \mathbf{C} \, \mathbf{c}$, where +$\mathbf{C}$ is the change-of-basis matrix with entries +@f[ +\int_K P_i(x) \varphi_j(x) dx += \det(J_K) \int_{\hat{K}} P_i(F_K(\hat{x})) \varphi_j(F_K(\hat{x})) d\hat{x} += \det(J_K) \int_{\hat{K}} \hat{P}_i(\hat{x}) \hat{\varphi}_j(\hat{x}) d\hat{x} +\dealcoloneq \det(J_K) \, C_{ij} +@f] +so the values of $\mathbf{C}$ can be written independently of +$K$ by factoring $\det(J_K)$ out front after transforming to reference +coordinates. Hence, putting it all together, the projection problem can +be written as +@f[ +\det(J_K) \, \mathbb{I} \, \mathbf{l} = \det(J_K) \, \mathbf{C} \, \mathbf{c} +@f] +which can be rewritten as simply +@f[ +\mathbf{l} = \mathbf{C} \, \mathbf{c}. +@f] + +At this point, we need to emphasize that most finite element +applications use unstructured meshes for which mapping is almost always +non-affine. Put another way: the assumption that $J_K$ is constant +across the cell is not true for general meshes. Hence, a correct +calculation of $l_k$ requires not only that we calculate the +corresponding transformation matrix $\mathbf{C}$ for every single cell, +but that we also define a set of Legendre-like orthogonal functions on a +cell $K$ which may have an arbitrary and very complex geometry. The +second part, in particular, is very computationally expensive. The +current implementation of the FESeries transformation classes relies on +the simplification resulting from having a constant Jacobian to increase +performance and thus only yields correct results for affine mappings. +The transformation is only used for the purpose of smoothness estimation +to decide on the type of adaptation, which is not a critical component +of a finite element program. Apart from that, this circumstance does not +pose a problem for this tutorial as we only use square-shaped cells. + +Eibner and Melenk @cite eibner2007hp argued that a function is analytic, +i.e., representable by a power series, if and only if the absolute +values of the Legendre coefficients decay exponentially with increasing +index $k$: +@f[ +\exists C,\sigma > 0 : \quad \forall k \in \mathbb{N}_0 : \quad |l_k| +\leq C \exp\left( - \sigma k \right) . +@f] +The rate of decay $\sigma$ can be interpreted as a measure for the +smoothness of that function. We can get it as the slope of a linear +regression fit of the transformation coefficients: +@f[ +\ln(|l_k|) \sim \ln(C) - \sigma k . +@f] + +We will perform this fit on each cell $K$ to get a local estimate for +the smoothness of the finite element approximation. The decay rate +$\sigma_K$ then acts as the decision indicator for hp-adaptation. For a +finite element on a cell $K$ with a polynomial degree $p$, calculating +the coefficients for $k \leq (p+1)$ proved to be a reasonable choice to +estimate smoothness. You can find a more detailed and dimension +independent description in @cite fehling2020. + +All of the above is already implemented in the FESeries::Legendre class +and the SmoothnessEstimator::Legendre namespace. With the error +estimates and smoothness indicators, we are then left to flag the cells +for actual refinement and coarsening. Some functions from the +parallel::distributed::GridRefinement and hp::Refinement namespaces will +help us with that later. + + +

    Hybrid geometric multigrid

    + +Finite element matrices are typically very sparse. Additionally, +hp-adaptive methods correspond to matrices with highly variable numbers +of nonzero entries per row. Some state-of-the-art preconditioners, like +the algebraic multigrid (AMG) ones as used in step-40, behave poorly in +these circumstances. + +We will thus rely on a matrix-free hybrid multigrid preconditioner. +step-50 has already demonstrated the superiority of geometric multigrid +methods method when combined with the MatrixFree framework. The +application on hp-adaptive FEM requires some additional work though +since the children of a cell might have different polynomial degrees. As +a remedy, we perform a p-relaxation to linear elements first (similar to +Mitchell @cite mitchell2010hpmg) and then perform h-relaxation in the +usual manner. On the coarsest level, we apply an algebraic multigrid +solver. The combination of p-multigrid, h-multigrid, and AMG makes the +solver to a hybrid multigrid solver. + +We will create a custom hybrid multigrid preconditioner with the special +level requirements as described above with the help of the existing +global-coarsening infrastructure via the use of +MGTransferGlobalCoarsening. + + +

    The test case

    + +For elliptic equations, each reentrant corner typically invokes a +singularity @cite brenner2008. We can use this circumstance to put our +hp-decision algorithms to a test: on all cells to be adapted, we would +prefer a fine grid near the singularity, and a high polynomial degree +otherwise. + +As the simplest elliptic problem to solve under these conditions, we +chose the Laplace equation in a L-shaped domain with the reentrant +corner in the origin of the coordinate system. + +To be able to determine the actual error, we manufacture a boundary +value problem with a known solution. On the above mentioned domain, one +solution to the Laplace equation is, in polar coordinates, +$(r, \varphi)$: +@f[ +u_\text{sol} = r^{2/3} \sin(2/3 \varphi). +@f] + +See also @cite brenner2008 or @cite mitchell2014hp. The solution looks as follows: + +
    + Analytic solution. +
    + +The singularity becomes obvious by investigating the solution's gradient +in the vicinity of the reentrant corner, i.e., the origin +@f[ +\left\| \nabla u_\text{sol} \right\|_{2} = 2/3 r^{-1/3} , \quad +\lim\limits_{r \rightarrow 0} \left\| \nabla u_\text{sol} \right\|_{2} = +\infty . +@f] + +As we know where the singularity will be located, we expect that our +hp-decision algorithm decides for a fine grid resolution in this +particular region, and high polynomial degree anywhere else. + +So let's see if that is actually the case, and how hp-adaptation +performs compared to pure h-adaptation. But first let us have a detailed +look at the actual code. diff --git a/examples/step-75/doc/kind b/examples/step-75/doc/kind new file mode 100644 index 0000000000..c1d9154931 --- /dev/null +++ b/examples/step-75/doc/kind @@ -0,0 +1 @@ +techniques diff --git a/examples/step-75/doc/results.dox b/examples/step-75/doc/results.dox new file mode 100644 index 0000000000..677282951b --- /dev/null +++ b/examples/step-75/doc/results.dox @@ -0,0 +1,178 @@ +

    Results

    + +When you run the program with the given parameters on four processes in +release mode, your terminal output should look like this: +@code +Running with Trilinos on 4 MPI rank(s)... +Calculating transformation matrices... +Cycle 0: + Number of active cells: 3072 + by partition: 768 768 768 768 + Number of degrees of freedom: 12545 + by partition: 3201 3104 3136 3104 + Number of constraints: 542 + by partition: 165 74 138 165 + Frequencies of poly. degrees: 2:3072 + Solved in 7 iterations. + + ++---------------------------------------------+------------+------------+ +| Total wallclock time elapsed since start | 0.598s | | +| | | | +| Section | no. calls | wall time | % of total | ++---------------------------------+-----------+------------+------------+ +| calculate transformation | 1 | 0.0533s | 8.9% | +| compute indicators | 1 | 0.0177s | 3% | +| initialize grid | 1 | 0.0397s | 6.6% | +| output results | 1 | 0.0844s | 14% | +| setup system | 1 | 0.0351s | 5.9% | +| solve system | 1 | 0.362s | 61% | ++---------------------------------+-----------+------------+------------+ + + +Cycle 1: + Number of active cells: 3351 + by partition: 875 761 843 872 + Number of degrees of freedom: 18223 + by partition: 4535 4735 4543 4410 + Number of constraints: 1202 + by partition: 303 290 326 283 + Frequencies of poly. degrees: 2:2523 3:828 + Solved in 7 iterations. + + ++---------------------------------------------+------------+------------+ +| Total wallclock time elapsed since start | 0.442s | | +| | | | +| Section | no. calls | wall time | % of total | ++---------------------------------+-----------+------------+------------+ +| adapt resolution | 1 | 0.0189s | 4.3% | +| compute indicators | 1 | 0.0135s | 3% | +| output results | 1 | 0.064s | 14% | +| setup system | 1 | 0.0232s | 5.2% | +| solve system | 1 | 0.322s | 73% | ++---------------------------------+-----------+------------+------------+ + + +... + + +Cycle 7: + Number of active cells: 5610 + by partition: 1324 1483 1482 1321 + Number of degrees of freedom: 82062 + by partition: 21116 19951 20113 20882 + Number of constraints: 14383 + by partition: 3825 3225 3557 3776 + Frequencies of poly. degrees: 2:1130 3:1283 4:2727 5:465 6:5 + Solved in 7 iterations. + + ++---------------------------------------------+------------+------------+ +| Total wallclock time elapsed since start | 0.932s | | +| | | | +| Section | no. calls | wall time | % of total | ++---------------------------------+-----------+------------+------------+ +| adapt resolution | 1 | 0.0182s | 1.9% | +| compute indicators | 1 | 0.0173s | 1.9% | +| output results | 1 | 0.0572s | 6.1% | +| setup system | 1 | 0.0252s | 2.7% | +| solve system | 1 | 0.813s | 87% | ++---------------------------------+-----------+------------+------------+ +@endcode + +When running the code with more processes, you will notice slight +differences in the number of active cells and degrees of freedom. This +is due to the fact that solver and preconditioner depend on the +partitioning of the problem, which might yield to slight differences of +the solution in the last digits and ultimately yields to different +adaptation behavior. + +Furthermore, the number of iterations for the solver stays about the +same in all cycles despite hp-adaptation, indicating the robustness of +the proposed algorithms and promising good scalability for even larger +problem sizes and on more processes. + +Let us have a look at the graphical output of the program. After all +refinement cycles in the given parameter configuration, the actual +discretized function space looks like the following with its +partitioning on twelve processes on the left and the polynomial degrees +of finite elements on the right. In the left picture, each color +represents a unique subdomain. In the right picture, the lightest color +corresponds to the polynomial degree two and the darkest one corresponds +to degree six: + +
    +
    + Partitioning after seven refinements. +
    +
    + Local approximation degrees after seven refinements. +
    +
    + + + + +

    Possibilities for extensions

    + +

    Different hp-decision strategies

    + +The deal.II library offers multiple strategies to decide which type of +adaptation to impose on cells: either adjust the grid resolution or +change the polynomial degree. We only presented the Legendre +coefficient decay strategy in this tutorial, while step-27 +demonstrated the Fourier equivalent of the same idea. + +See the "possibilities for extensions" section of step-27 for an +overview over these strategies, or the corresponding documentation +for a detailed description. + +There, another strategy is mentioned that has not been shown in any +tutorial so far: the strategy based on refinement history. The +usage of this method for parallel distributed applications is more +tricky than the others, so we will highlight the challenges that come +along with it. We need information about the final state of refinement +flags, and we need to transfer the solution across refined meshes. For +the former, we need to attach the hp::Refinement::predict_error() +function to the Triangulation::Signals::post_p4est_refinement signal in +a way that it will be called after the +hp::Refinement::limit_p_level_difference() function. At this stage, all +refinement flags and future FE indices are terminally set and a reliable +prediction of the error is possible. The predicted error then needs to +be transferred across refined meshes with the aid of +parallel::distributed::CellDataTransfer. + +Try implementing one of these strategies into this tutorial and observe +the subtle changes to the results. You will notice that all strategies +are capable of identifying the singularities near the reentrant corners +and will perform $h$-refinement in these regions, while preferring +$p$-refinement in the bulk domain. A detailed comparison of these +strategies is presented in @cite fehling2020 . + + +

    Solve with matrix-based methods

    + +This tutorial focuses solely on matrix-free strategies. All hp-adaptive +algorithms however also work with matrix-based approaches in the +parallel distributed context. + +To create a system matrix, you can either use the +LaplaceOperator::get_system_matrix() function, or use an +assemble_system() function similar to the one of step-27. +You can then pass the system matrix to the solver as usual. + +You can time the results of both matrix-based and matrix-free +implementations, quantify the speed-up, and convince yourself which +variant is faster. + + +

    Multigrid variants

    + +For sake of simplicity, we have restricted ourselves to a single type of +coarse-grid solver (CG with AMG), smoother (Chebyshev smoother with +point Jacobi preconditioner), and geometric-coarsening scheme (global +coarsening) within the multigrid algorithm. Feel free to try out +alternatives and investigate their performance and robustness. diff --git a/examples/step-75/doc/tooltip b/examples/step-75/doc/tooltip new file mode 100644 index 0000000000..7d7fde4889 --- /dev/null +++ b/examples/step-75/doc/tooltip @@ -0,0 +1,2 @@ +Solving the Laplace equation on hp-adaptive meshes with a MatrixFree hybrid +multigrid approach on thousands of processors. diff --git a/examples/step-75/step-75.cc b/examples/step-75/step-75.cc new file mode 100644 index 0000000000..443fb4399e --- /dev/null +++ b/examples/step-75/step-75.cc @@ -0,0 +1,1537 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2021 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE.md at + * the top level directory of deal.II. + * + * --------------------------------------------------------------------- + + * + * Author: Marc Fehling, Colorado State University, 2021 + * Peter Munch, Technical University of Munich and Helmholtz-Zentrum + * hereon, 2021 + * Wolfgang Bangerth, Colorado State University, 2021 + */ + + +// @sect3{Include files} +// +// The following include files have been used and discussed in previous tutorial +// programs, especially in step-27 and step-40. +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +// For load balancing we will assign individual weights on cells, and for that +// we will use the class parallel::CellWeights. +#include + +// The solution function requires a transformation from Cartesian to polar +// coordinates. The GeometricUtilities::Coordinates namespace provides the +// necessary tools. +#include +#include + +// The following include files will enable the MatrixFree functionality. +#include +#include +#include + +// We will use LinearAlgebra::distributed::Vector for linear algebra operations. +#include + +// We are left to include the files needed by the multigrid solver. +#include +#include +#include +#include +#include +#include +#include + +namespace Step75 +{ + using namespace dealii; + + // @sect3{The Solution class template} + + // We have an analytic solution to the scenario at our disposal. We will use + // this solution to impose boundary conditions for the numerical solution of + // the problem. The formulation of the solution requires a transformation to + // polar coordinates. To transform from Cartesian to spherical coordinates, we + // will use a helper function from the GeometricUtilities::Coordinates + // namespace. The first two coordinates of this transformation correspond to + // polar coordinates in the x-y-plane. + template + class Solution : public Function + { + public: + Solution() + : Function() + {} + + virtual double value(const Point &p, + const unsigned int /*component*/) const override + { + const std::array p_sphere = + GeometricUtilities::Coordinates::to_spherical(p); + + constexpr const double alpha = 2. / 3.; + return std::pow(p_sphere[0], alpha) * std::sin(alpha * p_sphere[1]); + } + }; + + + + // @sect3{Parameters} + + // For this tutorial, we will use a simplified set of parameters. It is also + // possible to use a ParameterHandler class here, but to keep this tutorial + // short we decided on using simple structs. The actual intention of all these + // parameters will be described in the upcoming classes at their respective + // location where they are used. + // + // The following parameter set controls the coarse-grid solver, the smoothers, + // and the inter-grid transfer scheme of the multigrid mechanism. + // We populate it with default parameters. + struct MultigridParameters + { + struct + { + std::string type = "cg_with_amg"; + unsigned int maxiter = 10000; + double abstol = 1e-20; + double reltol = 1e-4; + unsigned int smoother_sweeps = 1; + unsigned int n_cycles = 1; + std::string smoother_type = "ILU"; + } coarse_solver; + + struct + { + std::string type = "chebyshev"; + double smoothing_range = 20; + unsigned int degree = 5; + unsigned int eig_cg_n_iterations = 20; + } smoother; + + struct + { + MGTransferGlobalCoarseningTools::PolynomialCoarseningSequenceType + p_sequence = MGTransferGlobalCoarseningTools:: + PolynomialCoarseningSequenceType::decrease_by_one; + bool perform_h_transfer = true; + } transfer; + }; + + + + // This is the general parameter struct for the problem class. You will find + // this struct divided into several categories, including general runtime + // parameters, level limits, refine and coarsen fractions, as well as + // parameters for cell weighting. It also contains an instance of the above + // struct for multigrid parameters which will be passed to the multigrid + // algorithm. + struct Parameters + { + unsigned int n_cycles = 8; + double tolerance_factor = 1e-12; + + MultigridParameters mg_data; + + unsigned int min_h_level = 5; + unsigned int max_h_level = 12; + unsigned int min_p_degree = 2; + unsigned int max_p_degree = 6; + unsigned int max_p_level_difference = 1; + + double refine_fraction = 0.3; + double coarsen_fraction = 0.03; + double p_refine_fraction = 0.9; + double p_coarsen_fraction = 0.9; + + double weighting_factor = 1e6; + double weighting_exponent = 1.; + }; + + + + // @sect3{Matrix-free Laplace operator} + + // This is a matrix-free implementation of the Laplace operator that will + // basically take over the part of the `assemble_system()` function from other + // tutorials. The meaning of all member functions will be explained at their + // definition later. + // + // We will use the FEEvaluation class to evaluate the solution vector + // at the quadrature points and to perform the integration. In contrast to + // other tutorials, the template arguments `degree` is set to $-1$ and + // `number of quadrature in 1D` to $0$. In this case, FEEvaluation selects + // dynamically the correct polynomial degree and number of quadrature + // points. Here, we introduce an alias to FEEvaluation with the correct + // template parameters so that we do not have to worry about them later on. + template + class LaplaceOperator : public Subscriptor + { + public: + using VectorType = LinearAlgebra::distributed::Vector; + + using FECellIntegrator = FEEvaluation; + + LaplaceOperator() = default; + + LaplaceOperator(const hp::MappingCollection &mapping, + const DoFHandler & dof_handler, + const hp::QCollection & quad, + const AffineConstraints & constraints, + VectorType & system_rhs); + + void reinit(const hp::MappingCollection &mapping, + const DoFHandler & dof_handler, + const hp::QCollection & quad, + const AffineConstraints & constraints, + VectorType & system_rhs); + + types::global_dof_index m() const; + + number el(unsigned int, unsigned int) const; + + void initialize_dof_vector(VectorType &vec) const; + + void vmult(VectorType &dst, const VectorType &src) const; + + void Tvmult(VectorType &dst, const VectorType &src) const; + + const TrilinosWrappers::SparseMatrix &get_system_matrix() const; + + void compute_inverse_diagonal(VectorType &diagonal) const; + + private: + void do_cell_integral_local(FECellIntegrator &integrator) const; + + void do_cell_integral_global(FECellIntegrator &integrator, + VectorType & dst, + const VectorType &src) const; + + + void do_cell_integral_range( + const MatrixFree & matrix_free, + VectorType & dst, + const VectorType & src, + const std::pair &range) const; + + MatrixFree matrix_free; + + // To solve the equation system on the coarsest level with an AMG + // preconditioner, we need an actual system matrix on the coarsest level. + // For this purpose, we provide a mechanism that optionally computes a + // matrix from the matrix-free formulation, for which we introduce a + // dedicated SparseMatrix object. In the default case, this matrix stays + // empty. Once `get_system_matrix()` is called, this matrix is filled (lazy + // allocation). Since this is a `const` function, we need the "mutable" + // keyword here. We also need a the constraints object to build the matrix. + AffineConstraints constraints; + mutable TrilinosWrappers::SparseMatrix system_matrix; + }; + + + + // The following section contains functions to initialize and reinitialize + // the class. In particular, these functions initialize the internal + // MatrixFree instance. For sake of simplicity, we also compute the system + // right-hand-side vector. + template + LaplaceOperator::LaplaceOperator( + const hp::MappingCollection &mapping, + const DoFHandler & dof_handler, + const hp::QCollection & quad, + const AffineConstraints & constraints, + VectorType & system_rhs) + { + this->reinit(mapping, dof_handler, quad, constraints, system_rhs); + } + + + + template + void LaplaceOperator::reinit( + const hp::MappingCollection &mapping, + const DoFHandler & dof_handler, + const hp::QCollection & quad, + const AffineConstraints & constraints, + VectorType & system_rhs) + { + // Clear internal data structures (in the case that the operator is reused). + this->system_matrix.clear(); + + // Copy the constraints, since they might be needed for computation of the + // system matrix later on. + this->constraints.copy_from(constraints); + + // Set up MatrixFree. At the quadrature points, we only need to evaluate + // the gradient of the solution and test with the gradient of the shape + // functions so that we only need to set the flag `update_gradients`. + typename MatrixFree::AdditionalData data; + data.mapping_update_flags = update_gradients; + + matrix_free.reinit(mapping, dof_handler, constraints, quad, data); + + // Compute the right-hand side vector. For this purpose, we set up a second + // MatrixFree instance that uses a modified AffineConstraints not containing + // the constraints due to Dirichlet-boundary conditions. This modified + // operator is applied to a vector with only the Dirichlet values set. The + // result is the negative right-hand-side vector. + { + AffineConstraints constraints_without_dbc; + + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dof_handler, + locally_relevant_dofs); + constraints_without_dbc.reinit(locally_relevant_dofs); + + DoFTools::make_hanging_node_constraints(dof_handler, + constraints_without_dbc); + constraints_without_dbc.close(); + + VectorType b, x; + + this->initialize_dof_vector(system_rhs); + + MatrixFree matrix_free; + matrix_free.reinit( + mapping, dof_handler, constraints_without_dbc, quad, data); + + matrix_free.initialize_dof_vector(b); + matrix_free.initialize_dof_vector(x); + + constraints.distribute(x); + + matrix_free.cell_loop(&LaplaceOperator::do_cell_integral_range, + this, + b, + x); + + constraints.set_zero(b); + + system_rhs -= b; + } + } + + + + // The following functions are implicitly needed by the multigrid algorithm, + // including the smoothers. + + // Since we do not have a matrix, query the DoFHandler for the number of + // degrees of freedom. + template + types::global_dof_index LaplaceOperator::m() const + { + return matrix_free.get_dof_handler().n_dofs(); + } + + + + // Access a particular element in the matrix. This function is neither + // needed nor implemented, however, is required to compile the program. + template + number LaplaceOperator::el(unsigned int, unsigned int) const + { + Assert(false, ExcNotImplemented()); + return 0; + } + + + + // Initialize the given vector. We simply delegate the task to the + // MatrixFree function with the same name. + template + void + LaplaceOperator::initialize_dof_vector(VectorType &vec) const + { + matrix_free.initialize_dof_vector(vec); + } + + + + // Perform an operator evaluation by looping with the help of MatrixFree + // over all cells and evaluating the effect of the cell integrals (see also: + // `do_cell_integral_local()` and `do_cell_integral_global()`). + template + void LaplaceOperator::vmult(VectorType & dst, + const VectorType &src) const + { + this->matrix_free.cell_loop( + &LaplaceOperator::do_cell_integral_range, this, dst, src, true); + } + + + + // Perform the transposed operator evaluation. Since we are considering + // symmetric "matrices", this function can simply delegate it task to vmult(). + template + void LaplaceOperator::Tvmult(VectorType & dst, + const VectorType &src) const + { + this->vmult(dst, src); + } + + + + // Since we do not have a system matrix, we cannot loop over the the + // diagonal entries of the matrix. Instead, we compute the diagonal by + // performing a sequence of operator evaluations to unit basis vectors. + // For this purpose, an optimized function from the MatrixFreeTools + // namespace is used. The inversion is performed manually afterwards. + template + void LaplaceOperator::compute_inverse_diagonal( + VectorType &diagonal) const + { + MatrixFreeTools::compute_diagonal(matrix_free, + diagonal, + &LaplaceOperator::do_cell_integral_local, + this); + + for (auto &i : diagonal) + i = (std::abs(i) > 1.0e-10) ? (1.0 / i) : 1.0; + } + + + + // In the matrix-free context, no system matrix is set up during + // initialization of this class. As a consequence, it has to be computed + // here if it should be requested. Since the matrix is only computed in + // this tutorial for linear elements (on the coarse grid), this is + // acceptable. + // The matrix entries are obtained via sequence of operator evaluations. + // For this purpose, the optimized function MatrixFreeTools::compute_matrix() + // is used. The matrix will only be computed if it has not been set up yet + // (lazy allocation). + template + const TrilinosWrappers::SparseMatrix & + LaplaceOperator::get_system_matrix() const + { + if (system_matrix.m() == 0 && system_matrix.n() == 0) + { + const auto &dof_handler = this->matrix_free.get_dof_handler(); + + TrilinosWrappers::SparsityPattern dsp( + dof_handler.locally_owned_dofs(), + dof_handler.get_triangulation().get_communicator()); + + DoFTools::make_sparsity_pattern(dof_handler, dsp, this->constraints); + + dsp.compress(); + system_matrix.reinit(dsp); + + MatrixFreeTools::compute_matrix( + matrix_free, + constraints, + system_matrix, + &LaplaceOperator::do_cell_integral_local, + this); + } + + return this->system_matrix; + } + + + + // Perform cell integral on a cell batch without gathering and scattering + // the values. This function is needed for the MatrixFreeTools functions + // since these functions operate directly on the buffers of FEEvaluation. + template + void LaplaceOperator::do_cell_integral_local( + FECellIntegrator &integrator) const + { + integrator.evaluate(EvaluationFlags::gradients); + + for (unsigned int q = 0; q < integrator.n_q_points; ++q) + integrator.submit_gradient(integrator.get_gradient(q), q); + + integrator.integrate(EvaluationFlags::gradients); + } + + + + // Same as above but with access to the global vectors. + template + void LaplaceOperator::do_cell_integral_global( + FECellIntegrator &integrator, + VectorType & dst, + const VectorType &src) const + { + integrator.gather_evaluate(src, EvaluationFlags::gradients); + + for (unsigned int q = 0; q < integrator.n_q_points; ++q) + integrator.submit_gradient(integrator.get_gradient(q), q); + + integrator.integrate_scatter(EvaluationFlags::gradients, dst); + } + + + + // This function loops over all cell batches within a cell-batch range and + // calls the above function. + template + void LaplaceOperator::do_cell_integral_range( + const MatrixFree & matrix_free, + VectorType & dst, + const VectorType & src, + const std::pair &range) const + { + FECellIntegrator integrator(matrix_free, range); + + for (unsigned cell = range.first; cell < range.second; ++cell) + { + integrator.reinit(cell); + + do_cell_integral_global(integrator, dst, src); + } + } + + + + // @sect3{Solver and preconditioner} + + // @sect4{Conjugate-gradient solver with multigrid preconditioner} + + // This function solves the equation system with a sequence of provided + // multigrid objects. It is meant to be treated as general as possible, hence + // the multitude of template parameters. + template + static void + mg_solve(SolverControl & solver_control, + VectorType & dst, + const VectorType & src, + const MultigridParameters &mg_data, + const DoFHandler & dof, + const SystemMatrixType & fine_matrix, + const MGLevelObject> &mg_matrices, + const MGTransferType & mg_transfer) + { + AssertThrow(mg_data.coarse_solver.type == "cg_with_amg", + ExcNotImplemented()); + AssertThrow(mg_data.smoother.type == "chebyshev", ExcNotImplemented()); + + const unsigned int min_level = mg_matrices.min_level(); + const unsigned int max_level = mg_matrices.max_level(); + + using SmootherPreconditionerType = DiagonalMatrix; + using SmootherType = PreconditionChebyshev; + using PreconditionerType = PreconditionMG; + + // We initialize level operators and Chebyshev smoothers here. + mg::Matrix mg_matrix(mg_matrices); + + MGLevelObject smoother_data( + min_level, max_level); + + for (unsigned int level = min_level; level <= max_level; level++) + { + smoother_data[level].preconditioner = + std::make_shared(); + mg_matrices[level]->compute_inverse_diagonal( + smoother_data[level].preconditioner->get_vector()); + smoother_data[level].smoothing_range = mg_data.smoother.smoothing_range; + smoother_data[level].degree = mg_data.smoother.degree; + smoother_data[level].eig_cg_n_iterations = + mg_data.smoother.eig_cg_n_iterations; + } + + MGSmootherPrecondition + mg_smoother; + mg_smoother.initialize(mg_matrices, smoother_data); + + // Next, we initialize the coarse-grid solver. We use conjugate-gradient + // method with AMG as preconditioner. + ReductionControl coarse_grid_solver_control(mg_data.coarse_solver.maxiter, + mg_data.coarse_solver.abstol, + mg_data.coarse_solver.reltol, + false, + false); + SolverCG coarse_grid_solver(coarse_grid_solver_control); + + std::unique_ptr> mg_coarse; + + TrilinosWrappers::PreconditionAMG precondition_amg; + TrilinosWrappers::PreconditionAMG::AdditionalData amg_data; + amg_data.smoother_sweeps = mg_data.coarse_solver.smoother_sweeps; + amg_data.n_cycles = mg_data.coarse_solver.n_cycles; + amg_data.smoother_type = mg_data.coarse_solver.smoother_type.c_str(); + + precondition_amg.initialize(mg_matrices[min_level]->get_system_matrix(), + amg_data); + + mg_coarse = + std::make_unique, + LevelMatrixType, + decltype(precondition_amg)>>( + coarse_grid_solver, *mg_matrices[min_level], precondition_amg); + + // Finally, we create the Multigrid object, convert it to a preconditioner, + // and use it inside of a conjugate-gradient solver to solve the linear + // system of equations. + Multigrid mg( + mg_matrix, *mg_coarse, mg_transfer, mg_smoother, mg_smoother); + + PreconditionerType preconditioner(dof, mg, mg_transfer); + + SolverCG(solver_control) + .solve(fine_matrix, dst, src, preconditioner); + } + + + + // @sect4{Hybrid polynomial/geometric-global-coarsening multigrid preconditioner} + + // The above function deals with the actual solution for a given sequence of + // multigrid objects. This functions creates the actual multigrid levels, in + // particular the operators, and the transfer operator as a + // MGTransferGlobalCoarsening object. + template + void solve_with_gmg(SolverControl & solver_control, + const OperatorType & system_matrix, + VectorType & dst, + const VectorType & src, + const MultigridParameters & mg_data, + const hp::MappingCollection mapping_collection, + const DoFHandler & dof_handler, + const hp::QCollection & quadrature_collection) + { + // Create a DoFHandler and operator for each multigrid level, + // as well as, create transfer operators. To be able to + // set up the operators, we need a set of DoFHandler that we create + // via global coarsening of p or h. For latter, we need also a sequence + // of Triangulation objects that are obtained by + // Triangulation::coarsen_global(). + // + // In case no h-transfer is requested, we provide an empty deleter for the + // `emplace_back()` function, since the Triangulation of our DoFHandler is + // an external field and its destructor is called somewhere else. + MGLevelObject> dof_handlers; + MGLevelObject> operators; + MGLevelObject> transfers; + + std::vector>> + coarse_grid_triangulations; + if (mg_data.transfer.perform_h_transfer) + coarse_grid_triangulations = + MGTransferGlobalCoarseningTools::create_geometric_coarsening_sequence( + dof_handler.get_triangulation()); + else + coarse_grid_triangulations.emplace_back( + const_cast *>(&(dof_handler.get_triangulation())), + [](auto &) {}); + + // Determine the total number of levels for the multigrid operation and + // allocate sufficient memory for all levels. + const unsigned int n_h_levels = coarse_grid_triangulations.size() - 1; + + const auto get_max_active_fe_degree = [&](const auto &dof_handler) { + unsigned int max = 0; + + for (auto &cell : dof_handler.active_cell_iterators()) + if (cell->is_locally_owned()) + max = + std::max(max, dof_handler.get_fe(cell->active_fe_index()).degree); + + return Utilities::MPI::max(max, MPI_COMM_WORLD); + }; + + const unsigned int n_p_levels = + MGTransferGlobalCoarseningTools::create_polynomial_coarsening_sequence( + get_max_active_fe_degree(dof_handler), mg_data.transfer.p_sequence) + .size(); + + std::map fe_index_for_degree; + for (unsigned int i = 0; i < dof_handler.get_fe_collection().size(); ++i) + { + const unsigned int degree = dof_handler.get_fe(i).degree; + Assert(fe_index_for_degree.find(degree) == fe_index_for_degree.end(), + ExcMessage("FECollection does not contain unique degrees.")); + fe_index_for_degree[degree] = i; + } + + unsigned int minlevel = 0; + unsigned int minlevel_p = n_h_levels; + unsigned int maxlevel = n_h_levels + n_p_levels - 1; + + dof_handlers.resize(minlevel, maxlevel); + operators.resize(minlevel, maxlevel); + transfers.resize(minlevel, maxlevel); + + // Loop from the minimum (coarsest) to the maximum (finest) level and set up + // DoFHandler accordingly. We start with the h-levels, where we distribute + // on increasingly finer meshes linear elements. + for (unsigned int l = 0; l < n_h_levels; ++l) + { + dof_handlers[l].reinit(*coarse_grid_triangulations[l]); + dof_handlers[l].distribute_dofs(dof_handler.get_fe_collection()); + } + + // After we reached the finest mesh, we will adjust the polynomial degrees + // on each level. We reverse iterate over our data structure and start at + // the finest mesh that contains all information about the active FE + // indices. We then lower the polynomial degree of each cell level by level. + for (unsigned int i = 0, l = maxlevel; i < n_p_levels; ++i, --l) + { + dof_handlers[l].reinit(dof_handler.get_triangulation()); + + if (l == maxlevel) // finest level + { + auto &dof_handler_mg = dof_handlers[l]; + + auto cell_other = dof_handler.begin_active(); + for (auto &cell : dof_handler_mg.active_cell_iterators()) + { + if (cell->is_locally_owned()) + cell->set_active_fe_index(cell_other->active_fe_index()); + cell_other++; + } + } + else // coarse level + { + auto &dof_handler_fine = dof_handlers[l + 1]; + auto &dof_handler_coarse = dof_handlers[l + 0]; + + auto cell_other = dof_handler_fine.begin_active(); + for (auto &cell : dof_handler_coarse.active_cell_iterators()) + { + const unsigned int next_degree = + MGTransferGlobalCoarseningTools:: + create_next_polynomial_coarsening_degree( + cell_other->get_fe().degree, mg_data.transfer.p_sequence); + Assert(fe_index_for_degree.find(next_degree) != + fe_index_for_degree.end(), + ExcMessage("Next polynomial degree in sequence " + "does not exist in FECollection.")); + + if (cell->is_locally_owned()) + cell->set_active_fe_index(fe_index_for_degree[next_degree]); + cell_other++; + } + } + + dof_handlers[l].distribute_dofs(dof_handler.get_fe_collection()); + } + + // Next, we will create all data structures additionally needed on each + // multigrid level. This involves determining constraints with homogeneous + // Dirichlet boundary conditions, and building the operator just like on the + // active level. + MGLevelObject> + constraints(minlevel, maxlevel); + + for (unsigned int level = minlevel; level <= maxlevel; ++level) + { + const auto &dof_handler = dof_handlers[level]; + auto & constraint = constraints[level]; + + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dof_handler, + locally_relevant_dofs); + constraint.reinit(locally_relevant_dofs); + + DoFTools::make_hanging_node_constraints(dof_handler, constraint); + VectorTools::interpolate_boundary_values(mapping_collection, + dof_handler, + 0, + Functions::ZeroFunction(), + constraint); + constraint.close(); + + VectorType dummy; + + operators[level] = std::make_unique(mapping_collection, + dof_handler, + quadrature_collection, + constraint, + dummy); + } + + // Set up intergrid operators and collect transfer operators within a single + // operator as needed by the Multigrid solver class. + for (unsigned int level = minlevel; level < minlevel_p; ++level) + transfers[level + 1].reinit_geometric_transfer(dof_handlers[level + 1], + dof_handlers[level], + constraints[level + 1], + constraints[level]); + + for (unsigned int level = minlevel_p; level < maxlevel; ++level) + transfers[level + 1].reinit_polynomial_transfer(dof_handlers[level + 1], + dof_handlers[level], + constraints[level + 1], + constraints[level]); + + MGTransferGlobalCoarsening transfer( + transfers, [&](const auto l, auto &vec) { + operators[l]->initialize_dof_vector(vec); + }); + + // Finally, proceed to solve the problem with multigrid. + mg_solve(solver_control, + dst, + src, + mg_data, + dof_handler, + system_matrix, + operators, + transfer); + } + + + + // @sect3{The LaplaceProblem class template} + + // Now we will finally declare the main class of this program, which solves + // the Laplace equation on subsequently refined function spaces. Its structure + // will look familiar as it is similar to the main classes of step-27 and + // step-40. There are basically just two additions: + // - The SparseMatrix object that would hold the system matrix has been + // replaced by an object of the LaplaceOperator class for the MatrixFree + // formulation. + // - An object of parallel::CellWeights, which will help us with load + // balancing, has been added. + template + class LaplaceProblem + { + public: + LaplaceProblem(const Parameters ¶meters); + + void run(); + + private: + void initialize_grid(); + void setup_system(); + void print_diagnostics(); + void solve_system(); + void compute_indicators(); + void adapt_resolution(); + void output_results(const unsigned int cycle); + + MPI_Comm mpi_communicator; + + const Parameters prm; + + parallel::distributed::Triangulation triangulation; + DoFHandler dof_handler; + + hp::MappingCollection mapping_collection; + hp::FECollection fe_collection; + hp::QCollection quadrature_collection; + hp::QCollection face_quadrature_collection; + + IndexSet locally_owned_dofs; + IndexSet locally_relevant_dofs; + + AffineConstraints constraints; + + LaplaceOperator laplace_operator; + LinearAlgebra::distributed::Vector locally_relevant_solution; + LinearAlgebra::distributed::Vector system_rhs; + + std::unique_ptr> legendre; + std::unique_ptr> cell_weights; + + Vector estimated_error_per_cell; + Vector hp_decision_indicators; + + ConditionalOStream pcout; + TimerOutput computing_timer; + }; + + + + // @sect3{The LaplaceProblem class implementation} + + // @sect4{Constructor} + + // The constructor starts with an initializer list that looks similar to the + // one of step-40. We again prepare the ConditionalOStream object to allow + // only the first process to output anything over the console, and initialize + // the computing timer properly. + template + LaplaceProblem::LaplaceProblem(const Parameters ¶meters) + : mpi_communicator(MPI_COMM_WORLD) + , prm(parameters) + , triangulation(mpi_communicator) + , dof_handler(triangulation) + , pcout(std::cout, + (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)) + , computing_timer(mpi_communicator, + pcout, + TimerOutput::summary, + TimerOutput::wall_times) + { + Assert(prm.min_h_level <= prm.max_h_level, + ExcMessage( + "Triangulation level limits have been incorrectly set up.")); + Assert(prm.min_p_degree <= prm.max_p_degree, + ExcMessage("FECollection degrees have been incorrectly set up.")); + + // We need to prepare the data structures for the hp-functionality in the + // actual body of the constructor, and create corresponding objects for + // every degree in the specified range from the parameter struct. As we are + // only dealing with non-distorted rectangular cells, a linear mapping + // object is sufficient in this context. + // + // In the Parameters struct, we provide ranges for levels on which the + // function space is operating with a reasonable resolution. The multigrid + // algorithm requires linear elements on the coarsest possible level. So we + // start with the lowest polynomial degree and fill the collection with + // consecutively higher degrees until the user-specified maximum is + // reached. + mapping_collection.push_back(MappingQ1()); + + for (unsigned int degree = 1; degree <= prm.max_p_degree; ++degree) + { + fe_collection.push_back(FE_Q(degree)); + quadrature_collection.push_back(QGauss(degree + 1)); + face_quadrature_collection.push_back(QGauss(degree + 1)); + } + + // As our FECollection contains more finite elements than we want to use for + // the finite element approximation of our solution, we would like to limit + // the range on which active FE indices can operate on. For this, the + // FECollection class allows to register a hierarchy that determines the + // succeeding and preceding finite element in case of of p-refinement and + // p-coarsening, respectively. All functions in the hp::Refinement namespace + // consult this hierarchy to determine future FE indices. We will register + // such a hierarchy that only works on finite elements with polynomial + // degrees in the proposed range [min_p_degree, max_p_degree]. + const unsigned int min_fe_index = prm.min_p_degree - 1; + fe_collection.set_hierarchy( + /*next_index=*/ + [](const typename hp::FECollection &fe_collection, + const unsigned int fe_index) -> unsigned int { + return ((fe_index + 1) < fe_collection.size()) ? fe_index + 1 : + fe_index; + }, + /*previous_index=*/ + [min_fe_index](const typename hp::FECollection &, + const unsigned int fe_index) -> unsigned int { + Assert(fe_index >= min_fe_index, + ExcMessage("Finite element is not part of hierarchy!")); + return (fe_index > min_fe_index) ? fe_index - 1 : fe_index; + }); + + // We initialize the FESeries::Legendre object in the default configuration + // for smoothness estimation. + legendre = std::make_unique>( + SmoothnessEstimator::Legendre::default_fe_series(fe_collection)); + + // The next part is going to be tricky. During execution of refinement, a + // few hp-algorithms need to interfere with the actual refinement process on + // the Triangulation object. We do this by connecting several functions to + // Triangulation::Signals: signals will be called at different stages during + // the actual refinement process and trigger all connected functions. We + // require this functionality for load balancing and to limit the polynomial + // degrees of neighboring cells. + // + // For the former, we would like to assign a weight to every cell that is + // proportional to the number of degrees of freedom of its future finite + // element. The library offers a class parallel::CellWeights that allows to + // easily attach individual weights at the right place during the refinement + // process, i.e., after all refine and coarsen flags have been set correctly + // for hp-adaptation and right before repartitioning for load balancing is + // about to happen. Functions can be registered that will attach weights in + // the form that $a (n_\text{dofs})^b$ with a provided pair of parameters + // $(a,b)$. We register such a function in the following. Every cell will be + // charged with a constant weight at creation, which is a value of 1000 (see + // Triangulation::Signals::cell_weight). + // + // For load balancing, efficient solvers like the one we use should scale + // linearly with the number of degrees of freedom owned. Further, to + // increase the impact of the weights we would like to attach, make sure + // that the individual weight will exceed this base weight by orders of + // magnitude. We set the parameters for cell weighting correspondingly: A + // large weighting factor of $10^6$ and an exponent of $1$. + cell_weights = std::make_unique>( + dof_handler, + parallel::CellWeights::ndofs_weighting( + {prm.weighting_factor, prm.weighting_exponent})); + + // In h-adaptive applications, we ensure a 2:1 mesh balance by limiting the + // difference of refinement levels of neighboring cells to one. With the + // second call in the following code snippet, we will ensure the same for + // p-levels on neighboring cells: levels of future finite elements are not + // allowed to differ by more than a specified difference. The function + // hp::Refinement::limit_p_level_difference takes care of this, but needs to + // be connected to a very specific signal in the parallel context. The issue + // is that we need to know how the mesh will be actually refined to set + // future FE indices accordingly. As we ask the p4est oracle to perform + // refinement, we need to ensure that the Triangulation has been updated + // with the adaptation flags of the oracle first. An instantiation of + // parallel::distributed::TemporarilyMatchRefineFlags does exactly + // that for the duration of its life. Thus, we will create an object of this + // class right before limiting the p-level difference, and connect the + // corresponding lambda function to the signal + // Triangulation::Signals::post_p4est_refinement, which will be triggered + // after the oracle got refined, but before the Triangulation is refined. + // Furthermore, we specify that this function will be connected to the front + // of the signal, to ensure that the modification is performed before any + // other function connected to the same signal. + triangulation.signals.post_p4est_refinement.connect( + [&, min_fe_index]() { + const parallel::distributed::TemporarilyMatchRefineFlags + refine_modifier(triangulation); + hp::Refinement::limit_p_level_difference(dof_handler, + prm.max_p_level_difference, + /*contains=*/min_fe_index); + }, + boost::signals2::at_front); + } + + + + // @sect4{LaplaceProblem::initialize_grid} + + // For a L-shaped domain, we could use the function GridGenerator::hyper_L() + // as demonstrated in step-50. However in the 2D case, that particular + // function removes the first quadrant, while we need the fourth quadrant + // removed in our scenario. Thus, we will use a different function + // GridGenerator::subdivided_hyper_L() which gives us more options to create + // the mesh. Furthermore, we formulate that function in a way that it also + // generates a 3D mesh: the 2D L-shaped domain will basically elongated by 1 + // in the positive z-direction. + // + // We first pretend to build a GridGenerator::subdivided_hyper_rectangle(). + // The parameters that we need to provide are Point objects for the lower left + // and top right corners, as well as the number of repetitions that the base + // mesh will have in each direction. We provide them for the first two + // dimensions and treat the higher third dimension separately. + // + // To create a L-shaped domain, we need to remove the excess cells. For this, + // we specify the cells_to_remove accordingly. We would like to + // remove one cell in every cell from the negative direction, but remove one + // from the positive x-direction. + // + // In the end, we supply the number of initial refinements that corresponds to + // the supplied minimal grid refinement level. Further, we set the initial + // active FE indices accordingly. + template + void LaplaceProblem::initialize_grid() + { + TimerOutput::Scope t(computing_timer, "initialize grid"); + + std::vector repetitions(dim); + Point bottom_left, top_right; + for (unsigned int d = 0; d < dim; ++d) + if (d < 2) + { + repetitions[d] = 2; + bottom_left[d] = -1.; + top_right[d] = 1.; + } + else + { + repetitions[d] = 1; + bottom_left[d] = 0.; + top_right[d] = 1.; + } + + std::vector cells_to_remove(dim, 1); + cells_to_remove[0] = -1; + + GridGenerator::subdivided_hyper_L( + triangulation, repetitions, bottom_left, top_right, cells_to_remove); + + triangulation.refine_global(prm.min_h_level); + + const unsigned int min_fe_index = prm.min_p_degree - 1; + for (const auto &cell : dof_handler.active_cell_iterators()) + if (cell->is_locally_owned()) + cell->set_active_fe_index(min_fe_index); + } + + + + // @sect4{LaplaceProblem::setup_system} + + // This function looks exactly the same to the one of step-40, but you will + // notice the absence of the system matrix as well as the scaffold that + // surrounds it. Instead, we will initialize the MatrixFree formulation of the + // laplace_operator here. For boundary conditions, we will use + // the Solution class introduced earlier in this tutorial. + template + void LaplaceProblem::setup_system() + { + TimerOutput::Scope t(computing_timer, "setup system"); + + dof_handler.distribute_dofs(fe_collection); + + locally_owned_dofs = dof_handler.locally_owned_dofs(); + DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); + + locally_relevant_solution.reinit(locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + system_rhs.reinit(locally_owned_dofs, mpi_communicator); + + constraints.clear(); + constraints.reinit(locally_relevant_dofs); + DoFTools::make_hanging_node_constraints(dof_handler, constraints); + VectorTools::interpolate_boundary_values( + mapping_collection, dof_handler, 0, Solution(), constraints); + constraints.close(); + + laplace_operator.reinit(mapping_collection, + dof_handler, + quadrature_collection, + constraints, + system_rhs); + } + + + + // @sect4{LaplaceProblem::print_diagnostics} + + // This is a function that prints additional diagnostics about the equation + // system and its partitioning. In addition to the usual global number of + // active cells and degrees of freedom, we also output their local + // equivalents. For a regulated output, we will communicate the local + // quantities with a Utilities::MPI::gather operation to the first process + // which will then output all information. Output of local quantities is + // limited to the first 8 processes to avoid cluttering the terminal. + // + // Furthermore, we would like to print the frequencies of the polynomial + // degrees in the numerical discretization. Since this information is only + // stored locally, we will count the finite elements on locally owned cells + // and later communicate them via Utilities::MPI::sum. + template + void LaplaceProblem::print_diagnostics() + { + const unsigned int first_n_processes = + std::min(8, + Utilities::MPI::n_mpi_processes(mpi_communicator)); + const bool output_cropped = + first_n_processes < Utilities::MPI::n_mpi_processes(mpi_communicator); + + { + pcout << " Number of active cells: " + << triangulation.n_global_active_cells() << std::endl + << " by partition: "; + + std::vector n_active_cells_per_subdomain = + Utilities::MPI::gather(mpi_communicator, + triangulation.n_locally_owned_active_cells()); + for (unsigned int i = 0; i < first_n_processes; ++i) + pcout << ' ' << n_active_cells_per_subdomain[i]; + if (output_cropped) + pcout << " ..."; + pcout << std::endl; + } + + { + pcout << " Number of degrees of freedom: " << dof_handler.n_dofs() + << std::endl + << " by partition: "; + + std::vector n_dofs_per_subdomain = + Utilities::MPI::gather(mpi_communicator, + dof_handler.n_locally_owned_dofs()); + for (unsigned int i = 0; i < first_n_processes; ++i) + pcout << ' ' << n_dofs_per_subdomain[i]; + if (output_cropped) + pcout << " ..."; + pcout << std::endl; + } + + { + std::vector n_constraints_per_subdomain = + Utilities::MPI::gather(mpi_communicator, constraints.n_constraints()); + + pcout << " Number of constraints: " + << std::accumulate(n_constraints_per_subdomain.begin(), + n_constraints_per_subdomain.end(), + 0) + << std::endl + << " by partition: "; + for (unsigned int i = 0; i < first_n_processes; ++i) + pcout << ' ' << n_constraints_per_subdomain[i]; + if (output_cropped) + pcout << " ..."; + pcout << std::endl; + } + + { + std::vector n_fe_indices(fe_collection.size(), 0); + for (const auto &cell : dof_handler.active_cell_iterators()) + if (cell->is_locally_owned()) + n_fe_indices[cell->active_fe_index()]++; + + Utilities::MPI::sum(n_fe_indices, mpi_communicator, n_fe_indices); + + pcout << " Frequencies of poly. degrees:"; + for (unsigned int i = 0; i < fe_collection.size(); ++i) + if (n_fe_indices[i] > 0) + pcout << ' ' << fe_collection[i].degree << ":" << n_fe_indices[i]; + pcout << std::endl; + } + } + + + + // @sect4{LaplaceProblem::solve_system} + + // The scaffold around the solution is similar to the one of step-40. We + // prepare a vector that matches the requirements of MatrixFree and collect + // the locally-relevant degrees of freedoms we solved the equation system. The + // solution happens with the function introduced earlier. + template + void LaplaceProblem::solve_system() + { + TimerOutput::Scope t(computing_timer, "solve system"); + + LinearAlgebra::distributed::Vector completely_distributed_solution; + laplace_operator.initialize_dof_vector(completely_distributed_solution); + + SolverControl solver_control(system_rhs.size(), + prm.tolerance_factor * system_rhs.l2_norm()); + + solve_with_gmg(solver_control, + laplace_operator, + completely_distributed_solution, + system_rhs, + prm.mg_data, + mapping_collection, + dof_handler, + quadrature_collection); + + pcout << " Solved in " << solver_control.last_step() << " iterations." + << std::endl; + + constraints.distribute(completely_distributed_solution); + + locally_relevant_solution.copy_locally_owned_data_from( + completely_distributed_solution); + locally_relevant_solution.update_ghost_values(); + } + + + + // @sect4{LaplaceProblem::compute_indicators} + + // This function contains only a part of the typical refine_grid + // function from other tutorials and is new in that sense. Here, we will only + // calculate all indicators for adaptation with actually refining the grid. We + // do this for the purpose of writing all indicators to the file system, so we + // store them for later. + // + // Since we are dealing the an elliptic problem, we will make use of the + // KellyErrorEstimator again, but with a slight difference. Modifying the + // scaling factor of the underlying face integrals to be dependent on the + // actual polynomial degree of the neighboring elements is favorable in + // hp-adaptive applications @cite davydov2017hp. We can do this by specifying + // the very last parameter from the additional ones you notices. The others + // are actually just the defaults. + // + // For the purpose of hp-adaptation, we will calculate smoothness estimates + // with the strategy presented in the tutorial introduction and use the + // implementation in SmoothnessEstimator::Legendre. In the Parameters struct, + // we set the minimal polynomial degree to 2 as it seems that the smoothness + // estimation algorithms have trouble with linear elements. + template + void LaplaceProblem::compute_indicators() + { + TimerOutput::Scope t(computing_timer, "compute indicators"); + + estimated_error_per_cell.grow_or_shrink(triangulation.n_active_cells()); + KellyErrorEstimator::estimate( + dof_handler, + face_quadrature_collection, + std::map *>(), + locally_relevant_solution, + estimated_error_per_cell, + /*component_mask=*/ComponentMask(), + /*coefficients=*/nullptr, + /*n_threads=*/numbers::invalid_unsigned_int, + /*subdomain_id=*/numbers::invalid_subdomain_id, + /*material_id=*/numbers::invalid_material_id, + /*strategy=*/ + KellyErrorEstimator::Strategy::face_diameter_over_twice_max_degree); + + hp_decision_indicators.grow_or_shrink(triangulation.n_active_cells()); + SmoothnessEstimator::Legendre::coefficient_decay(*legendre, + dof_handler, + locally_relevant_solution, + hp_decision_indicators); + } + + + + // @sect4{LaplaceProblem::adapt_resolution} + + // With the previously calculated indicators, we will finally flag all cells + // for adaptation and also execute refinement in this function. As in previous + // tutorials, we will use the "fixed number" strategy, but now for + // hp-adaptation. + template + void LaplaceProblem::adapt_resolution() + { + TimerOutput::Scope t(computing_timer, "adapt resolution"); + + // First, we will set refine and coarsen flags based on the error estimates + // on each cell. There is nothing new here. + // + // We will use general refine and coarsen fractions that have been + // elaborated in the other deal.II tutorials: using the fixed number + // strategy, we will flag 30% of all cells for refinement and 3% for + // coarsening, as provided in the Parameters struct. + parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number( + triangulation, + estimated_error_per_cell, + prm.refine_fraction, + prm.coarsen_fraction); + + // Next, we will make all adjustments for hp-adaptation. We want to refine + // and coarsen those cells flagged in the previous step, but need to decide + // if we would like to do it by adjusting the grid resolution or the + // polynomial degree. + // + // The next function call sets future FE indices according to the previously + // calculated smoothness indicators as p-adaptation indicators. These + // indices will only be set on those cells that have refine or coarsen flags + // assigned. + // + // For the p-adaptation fractions, we will take an educated guess. Since we + // only expect a single singularity in our scenario, i.e., in the origin of + // the domain, and a smooth solution anywhere else, we would like to + // strongly prefer to use p-adaptation over h-adaptation. This reflects in + // our choice of a fraction of 90% for both p-refinement and p-coarsening. + hp::Refinement::p_adaptivity_fixed_number(dof_handler, + hp_decision_indicators, + prm.p_refine_fraction, + prm.p_coarsen_fraction); + + // At this stage, we have both the future FE indices and the classic refine + // and coarsen flags set, from which the latter will be interpreted by + // Triangulation::execute_coarsening_and_refinement() for h-adaptation. + // We would like to only impose one type of adaptation on cells, which is + // what the next function will sort out for us. In short, on cells which + // have both types of indicators assigned, we will favor the p-adaptation + // one and remove the h-adaptation one. + hp::Refinement::choose_p_over_h(dof_handler); + + // After setting all indicators, we will remove those that exceed the + // specified limits of the provided level ranges in the Parameters struct. + // This limitation naturally arises for p-adaptation as the number of + // supplied finite elements is limited. In addition, we registered a custom + // hierarchy for p-adaptation in the constructor. Now, we need to do this + // manually in the h-adaptive context like in step-31. + // + // We will iterate over all cells on the designated min and max levels and + // remove the corresponding flags. As an alternative, we could also flag + // these cells for p-adaptation by setting future FE indices accordingly + // instead of simply clearing the refine and coarsen flags. + Assert(triangulation.n_levels() >= prm.min_h_level + 1 && + triangulation.n_levels() <= prm.max_h_level + 1, + ExcInternalError()); + + if (triangulation.n_levels() > prm.max_h_level) + for (const auto &cell : + triangulation.active_cell_iterators_on_level(prm.max_h_level)) + cell->clear_refine_flag(); + + for (const auto &cell : + triangulation.active_cell_iterators_on_level(prm.min_h_level)) + cell->clear_coarsen_flag(); + + // In the end, we are left to execute coarsening and refinement. Here, not + // only the grid will be updated, but also all previous future FE indices + // will become active. + // + // Remember that we have attached functions to triangulation signals in the + // constructor, will be triggered in this function call. So there is even + // more happening: weighted repartitioning will be performed to ensure load + // balancing, as well as we will limit the difference of p-levels between + // neighboring cells. + triangulation.execute_coarsening_and_refinement(); + } + + + + // @sect4{LaplaceProblem::output_results} + + // Writing results to the file system in parallel applications works exactly + // like in step-40. In addition to the data containers that we prepared + // throughout the tutorial, we would also like to write out the polynomial + // degree of each finite element on the grid as well as the subdomain each + // cell belongs to. We prepare necessary containers for this in the scope of + // this function. + template + void LaplaceProblem::output_results(const unsigned int cycle) + { + TimerOutput::Scope t(computing_timer, "output results"); + + Vector fe_degrees(triangulation.n_active_cells()); + for (const auto &cell : dof_handler.active_cell_iterators()) + if (cell->is_locally_owned()) + fe_degrees(cell->active_cell_index()) = cell->get_fe().degree; + + Vector subdomain(triangulation.n_active_cells()); + for (auto &subd : subdomain) + subd = triangulation.locally_owned_subdomain(); + + DataOut data_out; + data_out.attach_dof_handler(dof_handler); + data_out.add_data_vector(locally_relevant_solution, "solution"); + data_out.add_data_vector(fe_degrees, "fe_degree"); + data_out.add_data_vector(subdomain, "subdomain"); + data_out.add_data_vector(estimated_error_per_cell, "error"); + data_out.add_data_vector(hp_decision_indicators, "hp_indicator"); + data_out.build_patches(mapping_collection); + + data_out.write_vtu_with_pvtu_record( + "./", "solution", cycle, mpi_communicator, 2, 1); + } + + + + // @sect4{LaplaceProblem::run} + + // The actual run function again looks very familiar to step-40. The only + // addition is the bracketed section that precedes the actual cycle loop. + // Here, we will pre-calculate the Legendre transformation matrices. In + // general, these will be calculated on the fly via lazy allocation whenever a + // certain matrix is needed. For timing purposes however, we would like to + // calculate them all at once before the actual time measurement begins. We + // will thus designate their calculation to their own scope. + template + void LaplaceProblem::run() + { + pcout << "Running with Trilinos on " + << Utilities::MPI::n_mpi_processes(mpi_communicator) + << " MPI rank(s)..." << std::endl; + + { + pcout << "Calculating transformation matrices..." << std::endl; + TimerOutput::Scope t(computing_timer, "calculate transformation"); + legendre->precalculate_all_transformation_matrices(); + } + + for (unsigned int cycle = 0; cycle < prm.n_cycles; ++cycle) + { + pcout << "Cycle " << cycle << ':' << std::endl; + + if (cycle == 0) + initialize_grid(); + else + adapt_resolution(); + + setup_system(); + + print_diagnostics(); + + solve_system(); + + compute_indicators(); + + if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 32) + output_results(cycle); + + computing_timer.print_summary(); + computing_timer.reset(); + + pcout << std::endl; + } + } +} // namespace Step75 + + + +// @sect4{main()} + +// The final function is the main function that will ultimately +// create and run a LaplaceOperator instantiation. Its structure is similar to +// most other tutorial programs. +int main(int argc, char *argv[]) +{ + try + { + using namespace dealii; + using namespace Step75; + + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + Parameters prm; + LaplaceProblem<2> laplace_problem(prm); + laplace_problem.run(); + } + catch (std::exception &exc) + { + std::cerr << std::endl + << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + + return 1; + } + catch (...) + { + std::cerr << std::endl + << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + + return 0; +} diff --git a/include/deal.II/base/geometric_utilities.h b/include/deal.II/base/geometric_utilities.h index 8b7c5771af..0ea6e108bd 100644 --- a/include/deal.II/base/geometric_utilities.h +++ b/include/deal.II/base/geometric_utilities.h @@ -51,6 +51,8 @@ namespace GeometricUtilities * \theta &= {\rm atan}(y/x) \\ * \phi &= {\rm acos} (z/r) * @f} + * + * The use of this function is demonstrated in step-75. */ template std::array diff --git a/include/deal.II/distributed/cell_weights.h b/include/deal.II/distributed/cell_weights.h index 5cb72bc069..a19ff4e253 100644 --- a/include/deal.II/distributed/cell_weights.h +++ b/include/deal.II/distributed/cell_weights.h @@ -73,6 +73,8 @@ namespace parallel * {1000, 1})); * @endcode * + * The use of this class is demonstrated in step-75. + * * @note See Triangulation::Signals::cell_weight for more information on * weighting and load balancing. * diff --git a/include/deal.II/distributed/tria.h b/include/deal.II/distributed/tria.h index 27a9661790..44a7b37202 100644 --- a/include/deal.II/distributed/tria.h +++ b/include/deal.II/distributed/tria.h @@ -1034,6 +1034,8 @@ namespace parallel * the triangulation is still unchanged. After the modification, all * refine and coarsen flags describe how the traingulation will acutally * be refined. + * + * The use of this class is demonstrated in step-75. */ template class TemporarilyMatchRefineFlags : public Subscriptor diff --git a/include/deal.II/grid/grid_generator.h b/include/deal.II/grid/grid_generator.h index 69c06d0680..2da9f9590c 100644 --- a/include/deal.II/grid/grid_generator.h +++ b/include/deal.II/grid/grid_generator.h @@ -1115,6 +1115,8 @@ namespace GridGenerator * denotes cutting away cells in the reverse direction, so right to left, * top to bottom, and back to front. * + * A demonstration of this grid can be found in step-75. + * * This function may be used to generate a mesh for a backward * facing step, a useful domain for benchmark problems in fluid dynamics. * The first image is a backward facing step in 3D, generated by -- 2.39.5