From 654e89aa21bc7b9cd16c522c2c1191f109990a25 Mon Sep 17 00:00:00 2001 From: tcclevenger Date: Mon, 20 Apr 2020 11:12:11 -0400 Subject: [PATCH] update intro/changes --- doc/doxygen/tutorial/tutorial.h.in | 17 +- doc/news/changes/major/20200420step50 | 8 + examples/step-50/amg_2d.prm | 17 +- examples/step-50/amg_3d.prm | 19 + examples/step-50/doc/intro.dox | 186 +++--- examples/step-50/doc/results.dox | 387 +++++++++--- examples/step-50/doc/tooltip | 2 +- examples/step-50/gmg_2d.prm | 12 - examples/step-50/gmg_mb_2d.prm | 19 + examples/step-50/gmg_mb_3d.prm | 19 + examples/step-50/gmg_mf_2d.prm | 19 + examples/step-50/gmg_mf_3d.prm | 19 + examples/step-50/step-50.cc | 822 ++++++++++++++++++++------ 13 files changed, 1182 insertions(+), 364 deletions(-) create mode 100644 doc/news/changes/major/20200420step50 create mode 100644 examples/step-50/amg_3d.prm delete mode 100644 examples/step-50/gmg_2d.prm create mode 100644 examples/step-50/gmg_mb_2d.prm create mode 100644 examples/step-50/gmg_mb_3d.prm create mode 100644 examples/step-50/gmg_mf_2d.prm create mode 100644 examples/step-50/gmg_mf_3d.prm diff --git a/doc/doxygen/tutorial/tutorial.h.in b/doc/doxygen/tutorial/tutorial.h.in index 52a222bbd5..de48ed344d 100644 --- a/doc/doxygen/tutorial/tutorial.h.in +++ b/doc/doxygen/tutorial/tutorial.h.in @@ -465,6 +465,12 @@ * * * + * step-50 + * Geometric multigrid on adaptive meshes distributed in parallel. + *
Keywords: Multigrid, MGLevelObject, MGConstrainedDoFs, IndexSet, MGTools, PreconditionMG, FEInterfaceValues, MeshWorker::mesh_loop() + * + * + * * step-51 * Solving the convection-diffusion equation with a hybridizable * discontinuous Galerkin method using face elements. @@ -668,9 +674,11 @@ * step-31, * step-32, * step-33, + * step-40, * step-41, * step-42, * step-43, + * step-50, * step-55 * * @@ -682,6 +690,7 @@ * step-17, * step-18, * step-40, + * step-50, * step-55 * * @@ -691,7 +700,9 @@ * * * step-32, + * step-40, * step-42, + * step-50, * step-55 * * @@ -720,6 +731,7 @@ * step-37, * step-40, * step-42, + * step-50, * step-55, * step-59, * step-67, @@ -766,7 +778,8 @@ * step-6, * step-9, * step-14, - * step-39 + * step-39, + * step-50 * * * @@ -937,6 +950,7 @@ * step-41, * step-42, * step-43, + * step-50, * step-56, * step-59, * step-63 @@ -953,6 +967,7 @@ * step-37, * step-40, * step-42, + * step-50, * step-55, * step-59 * diff --git a/doc/news/changes/major/20200420step50 b/doc/news/changes/major/20200420step50 new file mode 100644 index 0000000000..7cce146b82 --- /dev/null +++ b/doc/news/changes/major/20200420step50 @@ -0,0 +1,8 @@ +New: The Step-50 tutorial has been updated. We discuss how to +use the multilevel preconditioner from step-16 in parallel and +give a comparison of parallel scaling between GMG and AMG for +a 3D Laplace example where adaptive refinement comes from a +residual-based, cell-wise a posteriori error estimator. +
+(Thomas C. Clevenger, Timo Heister, 2020/04/20) + diff --git a/examples/step-50/amg_2d.prm b/examples/step-50/amg_2d.prm index 426f17c651..6343a8eaee 100644 --- a/examples/step-50/amg_2d.prm +++ b/examples/step-50/amg_2d.prm @@ -1,12 +1,19 @@ # Listing of Parameters # --------------------- -set assembler = AMG +# The problem dimension. set dim = 2 + # Number of adaptive refinement steps. -set n_steps = 20 +set n_steps = 16 + +# Output graphical results. set output = true -# Select how to refine. Options: global|kelly|estimator -set refinement type = estimator +# Dampen factor for the smoother. set smoother dampen = 1.0 -set smoother steps = 2 + +# Number of smoother steps. +set smoother steps = 1 + +# Switch between GMG and AMG. +set solver = AMG diff --git a/examples/step-50/amg_3d.prm b/examples/step-50/amg_3d.prm new file mode 100644 index 0000000000..9131f07641 --- /dev/null +++ b/examples/step-50/amg_3d.prm @@ -0,0 +1,19 @@ +# Listing of Parameters +# --------------------- +# The problem dimension. +set dim = 3 + +# Number of adaptive refinement steps. +set n_steps = 12 + +# Output graphical results. +set output = true + +# Dampen factor for the smoother. +set smoother dampen = 1.0 + +# Number of smoother steps. +set smoother steps = 1 + +# Switch between GMG and AMG. +set solver = AMG diff --git a/examples/step-50/doc/intro.dox b/examples/step-50/doc/intro.dox index e597798731..eaffbcbd42 100644 --- a/examples/step-50/doc/intro.dox +++ b/examples/step-50/doc/intro.dox @@ -1,90 +1,122 @@
-This program has evolved from a version originally written by Guido -Kanschat in 2003. It has undergone significant revisions by Bärbel -Janssen, Guido Kanschat and Wolfgang Bangerth in 2009 and 2010 to demonstrate -multigrid algorithms on adaptively refined meshes. + +This program was contributed by Thomas C Clevenger and Timo Heister. +
+This material is based upon work partly supported by the National +Science Foundation Award DMS-1901529, OAC-2015848, EAR-1925575, by the Computational +Infrastructure in Geodynamics initiative (CIG), through the NSF under Award +EAR-0949446 and EAR-1550901 and The University of California -- Davis.
+@note As a prerequisite of this program, you need to have both p4est and either the PETSc +or Trilinos library installed. The installation of deal.II together with these additional +libraries is described in the README file. + +

Introduction

-This example shows the basic usage of the multilevel functions in -deal.II. It solves the same problem as used in step-6, -but demonstrating the things one has to provide when using multigrid -as a preconditioner. In particular, this requires that we define a -hierarchy of levels, provide transfer operators from one level to the -next and back, and provide representations of the Laplace operator on -each level. - -In order to allow sufficient flexibility in conjunction with systems of -differential equations and block preconditioners, quite a few different objects -have to be created before starting the multilevel method, although -most of what needs to be done is provided by deal.II itself. These are - -Most of these objects will only be needed inside the function that -actually solves the linear system. There, these objects are combined -in an object of type Multigrid, containing the implementation of the -V-cycle, which is in turn used by the preconditioner PreconditionMG, -ready for plug-in into a linear solver of the LAC library. - -The multilevel method in deal.II follows in many respects the outlines -of the various publications by James Bramble, Joseph Pasciak and -Jinchao Xu (i.e. the "BPX" framework). In order to understand many of -the options, a rough familiarity with their work is quite helpful. - -However, in comparison to this framework, the implementation in -deal.II has to take into account the fact that we want to solve linear -systems on adaptively refined meshes. This leads to the complication -that it isn't quite as clear any more what exactly a "level" in a -multilevel hierarchy of a mesh is. The following image shows what we -consider to be a "level": - -

- @image html "hanging_nodes.png" "" -

- -In other words, the fine level in this mesh consists only of the -degrees of freedom that are defined on the refined cells, but does not -extend to that part of the domain that is not refined. While this -guarantees that the overall effort grows as ${\cal O}(N)$ as necessary -for optimal multigrid complexity, it leads to problems when defining -where to smooth and what boundary conditions to pose for the operators -defined on individual levels if the level boundary is not an external -boundary. These questions are discussed in detail in the -@ref mg_paper "Multigrid paper by Janssen and Kanschat" that describes -the implementation in deal.II. - +This example shows the usage of the multilevel functions in deal.II on distributed meshes +and gives a comparison between geometric and algebraic multigrid methods. The algebraic +multigrid (AMG) preconditioner is the same used in step-40, and the geometric multigrid +(GMG) preconditioner is based on the one used in step-16. Here we discuss the +necessary changes needed for parallel computations.

The testcase

-The problem we solve here is exactly the same as in -step-6, the only difference being the solver we use -here. You may want to look there for a definition of what we solve, -right hand side and boundary conditions. Obviously, the program would -also work if we changed the geometry and other pieces of data that -defines this particular problem. - -The things that are new are all those parts that concern the -multigrid. In particular, this includes the following members of the -main class: -- LaplaceProblem::mg_dof_handler -- LaplaceProblem::mg_sparsity -- LaplaceProblem::mg_matrices -- LaplaceProblem::mg_interface_matrices_up -- LaplaceProblem::assemble_multigrid () -- LaplaceProblem::solve () -Take a look at these functions. +We consider the variable-coefficient Laplacian weak formulation +@f{align*} + (\epsilon \nabla u, \nabla v) = (f,v) \quad \forall v \in V_h +@f} +on the domain $\Omega = [-1,1]^\text{dim} \setminus [0,1]^\text{dim}$ (an L-shaped domain for 2D and a Fichera corner for 3D) with +$\epsilon = 1$ if $\min(x,y,z)>-\frac{1}{2}$ and $\epsilon = 100$ otherwise. The +boundary conditions are $u=0$ on the whole boundary and the right-hand side is $f=1$. +We use continuous Q2 elements to discretize $V_h$ and use a residual-based, cell-wise a +posteriori error estimator $e(K) = e_{\text{cell}}(K) + e_{\text{face}}(K)$ from +_CITE EST PAPER_ with +@f{align*} + e_{\text{cell}}(K) = h^2 \| f + \epsilon \triangle u \|_K^2, \qquad + e_{\text{face}}(K) = \sum_F h_F \| [ \epsilon \nabla u \cdot n ] \|_F^2. +@f} +The following figure visualizes the solution and refinement for 2D + +and for 3D, the solution(left) and a slice for $x$ close to the +center of the domain showing the adaptively refined mesh (right) are depicted here + + + + + +
+ + + +
+ + +

Workload imbalance

+For the active mesh, we use the parallel::distributed::Triangulation class as done +in step-40 which uses functionality in the external library +p4est for the distribution of the active cells +among processors. For the non-active cells in the multilevel hierarchy, deal.II +implements what we will refer to as the ``first-child rule'' where, for each cell +in the hierarchy, we recursively assign the parent of a cell to the owner of the +first child cell. The following figures give an example of such a distribution. Here +the left image represents the active cells for a sample 2D mesh partitioned using a +space-filling curve (similar to p4est), the center image gives the tree representation +of the active mesh, and the right image gives the multilevel hierarchy of cells. The +colors and numbers represent the different processors. The circular nodes in the tree +are the non-active cells which are distributed using the ``first-child rule''. + + + +Included among the output to screen in this example is a value ``Workload imbalance'' +given by the function MGTools::workload_imbalance(). This value, which will be denoted +by $\mathbb{E}$, quantifies the overhead produced by not having a perfect work balance +on each level of the multigrid hierarchy (as is evident from the example above). + +For defining $\mathbb{E}$, let $N_{\ell}$ be the number of cells on level $\ell$ +(both active and non-active cells) and $N_{\ell,p}$ of the subset owned by processor +$p$. Assuming that the workload for any one processor is proportional to the number +of cells owned by that processor, the optimal workload per processor is given by +@f{align*} +W_{\text{opt}}=\frac1{n_{p}}\sum_{\ell}\sum_{p}N_{\ell,p}=\frac1{n_{p}}\sum_{\ell} N_{\ell}. +@f} +Next, assuming a synchronization of work on each level (i.e., on each level of a vcycle, +work must be completed by all processors before moving on to the next level), the +limiting effort on each level is given by +@f{align*} +W_\ell = \max_{p} N_{\ell,p}, +@f} +and the total parallel complexity +@f{align*} +W = \sum_{\ell} W_\ell. +@f} +Then we define $\mathbb{E}$ as a ratio of the optimal partition to the parallel +complexity of the current partition +@f{align*} + \mathbb{E} = \frac{W_{\text{opt}}}{W}. +@f} +For the example distribution above, we have +@f{align*} +W_{\text{opt}}&=\frac{1}{n_p}\sum_{\ell} N_{\ell} = \frac{1}{3} \left(1+4+4\right)= 3 \qquad +\\ +W &= \sum_\ell W_\ell = 1 + 2 + 3 = 6 +\\ +\mathbb{E} &= \frac{W_{\text{opt}}}{W} = \frac12. +@f} + +_CITE MG PAPER_ contains a full discussion of the partition efficiency model +and the effect the imbalance has on the GMG vcycle timing. In summary, the value +of $\mathbb{E}$ is highly dependent on the type a mesh refinement used and has +optimal value $\mathbb{E} = 1$ for globally refined meshes. Typically for adaptively +refined meshes, the number of processors used to distribute a single mesh has a +negative impact on $\mathbb{E}$ but only up to a leveling off point, where the imbalance +remains relatively constant for an increasing number of processors, and further refinement +has very little impact on $\mathbb{E}$. Finally, $1/\mathbb{E}$ was shown to give an +accurate representation of the slowdown in parallel scaling expected for the timing of +a vcycle. diff --git a/examples/step-50/doc/results.dox b/examples/step-50/doc/results.dox index 2d5fe0deb0..edbe4f8c48 100644 --- a/examples/step-50/doc/results.dox +++ b/examples/step-50/doc/results.dox @@ -1,98 +1,311 @@

Results

-The output that this program generates is, of course, the same as that -of step-6, so you may see there for more results. On the -other hand, since no tutorial program is a good one unless it has at -least one colorful picture, here is, again, the solution: +When you run the program, the screen output should look like the following: +@code +Cycle 0: + Number of active cells: 56 (2 global levels) + Workload imbalance: 1.14286 + Number of degrees of freedom: 665 (by level: 117, 665) + Number of CG iterations: 10 -When run, the output of this program is -
-Cycle 0:
-   Number of active cells:       20
-   Number of degrees of freedom: 25 (by level: 8, 25)
-   7 CG iterations needed to obtain convergence.
++---------------------------------------------+------------+------------+
+| Total wallclock time elapsed since start    |    0.0536s |            |
+|                                             |            |            |
+| Section                         | no. calls |  wall time | % of total |
++---------------------------------+-----------+------------+------------+
+| Assemble                        |         1 |    0.0026s |       4.8% |
+| Assemble multigrid              |         1 |   0.00303s |       5.6% |
+| Estimate                        |         1 |    0.0273s |        51% |
+| Setup                           |         1 |   0.00477s |       8.9% |
+| Setup multigrid                 |         1 |   0.00539s |        10% |
+| Solve                           |         1 |   0.00801s |        15% |
+| Solve: 1 GMG vcycle             |         1 |  0.000655s |       1.2% |
+| Solve: CG                       |         1 |   0.00472s |       8.8% |
+| Solve: GMG preconditioner setup |         1 |   0.00232s |       4.3% |
++---------------------------------+-----------+------------+------------+
+
 Cycle 1:
-   Number of active cells:       44
-   Number of degrees of freedom: 57 (by level: 8, 25, 48)
-   8 CG iterations needed to obtain convergence.
+   Number of active cells:       126 (3 global levels)
+   Workload imbalance:           1.17483
+   Number of degrees of freedom: 1672 (by level: 117, 665, 1100)
+   Number of CG iterations:      11
+
+
++---------------------------------------------+------------+------------+
+| Total wallclock time elapsed since start    |    0.0861s |            |
+|                                             |            |            |
+| Section                         | no. calls |  wall time | % of total |
++---------------------------------+-----------+------------+------------+
+| Assemble                        |         1 |   0.00578s |       6.7% |
+| Assemble multigrid              |         1 |   0.00745s |       8.7% |
+| Estimate                        |         1 |    0.0281s |        33% |
+| Refine grid                     |         1 |   0.00992s |        12% |
+| Setup                           |         1 |   0.00878s |        10% |
+| Setup multigrid                 |         1 |    0.0115s |        13% |
+| Solve                           |         1 |    0.0144s |        17% |
+| Solve: 1 GMG vcycle             |         1 |  0.000868s |         1% |
+| Solve: CG                       |         1 |   0.00879s |        10% |
+| Solve: GMG preconditioner setup |         1 |   0.00414s |       4.8% |
++---------------------------------+-----------+------------+------------+
+
 Cycle 2:
-   Number of active cells:       92
-   Number of degrees of freedom: 117 (by level: 8, 25, 80, 60)
-   9 CG iterations needed to obtain convergence.
-Cycle 3:
-   Number of active cells:       188
-   Number of degrees of freedom: 221 (by level: 8, 25, 80, 200)
-   12 CG iterations needed to obtain convergence.
-Cycle 4:
-   Number of active cells:       416
-   Number of degrees of freedom: 485 (by level: 8, 25, 89, 288, 280)
-   13 CG iterations needed to obtain convergence.
-Cycle 5:
-   Number of active cells:       800
-   Number of degrees of freedom: 925 (by level: 8, 25, 89, 288, 784, 132)
-   14 CG iterations needed to obtain convergence.
-Cycle 6:
-   Number of active cells:       1628
-   Number of degrees of freedom: 1865 (by level: 8, 25, 89, 304, 1000, 1164, 72)
-   14 CG iterations needed to obtain convergence.
-Cycle 7:
-   Number of active cells:       3194
-   Number of degrees of freedom: 3603 (by level: 8, 25, 89, 328, 1032, 2200, 1392)
-   16 CG iterations needed to obtain convergence.
-
-That's not perfect — we would have hoped for a constant number -of iterations rather than one that increases as we get more and more -degrees of freedom — but it is also not far away. The reason for -this is easy enough to understand, however: since we have a strongly -varying coefficient, the operators that we assembly by quadrature on -the lower levels become worse and worse approximations of the operator -on the finest level. Consequently, even if we had perfect solvers on -the coarser levels, they would not be good preconditioners on the -finest level. This theory is easily tested by comparing results when -we use a constant coefficient: in that case, the number of iterations -remains constant at 9 after the first three or four refinement steps. - -We can also compare what this program produces with how @ref step_5 -"step-5" performed. To solve the same problem as in step-5, the only -two changes that are necessary are (i) to replace the body of the -function LaplaceProblem::refine_grid by a call to -triangulation.refine_global(1), and (ii) to use the same -SolverControl object and tolerance as in step-5 — the rest of the -program remains unchanged. In that case, here is how the solvers used -in step-5 and the multigrid solver used in the current program -compare: - - - - - - - - +. +. +. +@endcode +Here, the timing of the `solve()` function is spilt up in 3 parts: setting +up the multigrid preconditioner, execution of a single multigrid vcycle, and +the CG solver. The vcycle that is timed is unnecessary for the overall solve +and only meant to give an insight at the different costs for AMG and GMG. +Also it should be noted that when using the AMG solver, ``Workload imbalance'' +is not included in the output since the hierarchy of coarse meshes are not +required. + +In addition to the AMG and GMG solvers in this tutorial, included will be timings +from a 3rd matrix-free (MF) GMG solver on the same problem (see possible extensions +for a discussion on what is required for the matrix-free solver). We will refer to +the GMG solver in tutorial as the matrix-based (MB) GMG solver. + +The following table gives the timings for setup, assembly, and solve for GMG and AMG +on up to 256M DoFs and 7168 processors. +
cellsstep-5step-16
20 13 6
80 17 7
320 29 9
1280 51 10
5120 94 11
2048018013
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ProcsCycleDoFsImbalanceSetupSetup GMGAssembleAssemble GMGSolve
MF-GMG112 + 13 + 4M + 0.37 + + + + + + +
448 + 15 + 16M + 0.29 + + + + + + +
1792 + 17 + 65M + 0.22 + + + + + + +
7168 + 19 + 256M + 0.16 + + + + + + +
MB-GMG112 + 13 + 4M + 0.37 + + + + + + +
448 + 15 + 16M + 0.29 + + + + + + +
1792 + 17 + 65M + 0.22 + + + + + + +
7168 + 19 + 256M + 0.16 + + + + + + +
AMG112 + 13 + 4M + - + + + + + + +
448 + 15 + 16M + - + + + + + + +
1792 + 17 + 65M + - + + + + + + +
7168 + 19 + 256M + - + + + + + + +
-This isn't only fewer iterations than in step-5 (each of which -is, however, much more expensive) but more importantly, the number of -iterations also grows much more slowly under mesh refinement (again, -it would be almost constant if the coefficient was constant rather -than strongly varying as chosen here). This justifies the common -observation that, whenever possible, multigrid methods should be used -for second order problems. + +The following figure gives the strong scaling for each method for cycle 16 (32M DoFs) +and 19 (256M DoFs) on between 56 to 28672 processors. +

Possible extensions

-A close inspection of this program's performance shows that it is mostly -dominated by matrix-vector operations. step-37 shows one way -how this can be avoided by working with matrix-free methods. - -Another avenue would be to use algebraic multigrid methods. The -geometric multigrid method used here can at times be a bit awkward to -implement because it needs all those additional data structures, and -it becomes even more difficult if the program is to run in %parallel on -machines coupled through MPI, for example. In that case, it would be -simpler if one could use a black-box preconditioner that uses some -sort of multigrid hierarchy for good performance but can figure out -level matrices and similar things out by itself. Algebraic multigrid -methods do exactly this, and we will use them in -step-31 for the solution of a Stokes problem. +

Add matrix-free GMG preconditioner

+The results above include timings from a matrix-free GMG preconditioner +which is not currently a part of this tutorial. See step-37 for an example +of such a preconditioner for the Laplace equation. + +It should be noted that the MatrixFree class is only compatible with the +dealii::LinearAlgebra::distributed::Vector class, while this tutorial uses either +PETSc or Trilinos vectors. It may be of use to define functions which copy between +two types of vectors, for example, for Trilinos vectors one could use the following: +@code +namespace ChangeVectorTypes +{ + void import(TrilinosWrappers::MPI::Vector & out, + const dealii::LinearAlgebra::ReadWriteVector &rwv, + const VectorOperation::values operation) + { + Assert(out.size() == rwv.size(), + ExcMessage( + "Both vectors need to have the same size for import() to work!")); + + Assert(out.locally_owned_elements() == rwv.get_stored_elements(), + ExcNotImplemented()); + + if (operation == VectorOperation::insert) + { + for (const auto idx : out.locally_owned_elements()) + out[idx] = rwv[idx]; + } + else if (operation == VectorOperation::add) + { + for (const auto idx : out.locally_owned_elements()) + out[idx] += rwv[idx]; + } + else + AssertThrow(false, ExcNotImplemented()); + + out.compress(operation); + } + + + void copy(TrilinosWrappers::MPI::Vector & out, + const dealii::LinearAlgebra::distributed::Vector &in) + { + dealii::LinearAlgebra::ReadWriteVector rwv( + out.locally_owned_elements()); + rwv.import(in, VectorOperation::insert); + // This import function doesn't exist until after dealii 9.0 + // Implemented above + import(out, rwv, VectorOperation::insert); + } + + void copy(dealii::LinearAlgebra::distributed::Vector &out, + const TrilinosWrappers::MPI::Vector & in) + { + dealii::LinearAlgebra::ReadWriteVector rwv; + rwv.reinit(in); + out.import(rwv, VectorOperation::insert); + } +} +@endcode + diff --git a/examples/step-50/doc/tooltip b/examples/step-50/doc/tooltip index 2fd65590a0..c7b7df11ca 100644 --- a/examples/step-50/doc/tooltip +++ b/examples/step-50/doc/tooltip @@ -1 +1 @@ -Multigrid on adaptive meshes. +Multigrid on adaptive meshes distributed in parallel. diff --git a/examples/step-50/gmg_2d.prm b/examples/step-50/gmg_2d.prm deleted file mode 100644 index 60e5257fdb..0000000000 --- a/examples/step-50/gmg_2d.prm +++ /dev/null @@ -1,12 +0,0 @@ -# Listing of Parameters -# --------------------- -set assembler = GMG -set dim = 2 -# Number of adaptive refinement steps. -set n_steps = 20 -set output = true - -# Select how to refine. Options: global|kelly|estimator -set refinement type = estimator -set smoother dampen = 1.0 -set smoother steps = 1 diff --git a/examples/step-50/gmg_mb_2d.prm b/examples/step-50/gmg_mb_2d.prm new file mode 100644 index 0000000000..59b264d54c --- /dev/null +++ b/examples/step-50/gmg_mb_2d.prm @@ -0,0 +1,19 @@ +# Listing of Parameters +# --------------------- +# The problem dimension. +set dim = 2 + +# Number of adaptive refinement steps. +set n_steps = 16 + +# Output graphical results. +set output = true + +# Dampen factor for the smoother. +set smoother dampen = 1.0 + +# Number of smoother steps. +set smoother steps = 1 + +# Switch between GMG and AMG. +set solver = MB diff --git a/examples/step-50/gmg_mb_3d.prm b/examples/step-50/gmg_mb_3d.prm new file mode 100644 index 0000000000..a9d4f86db8 --- /dev/null +++ b/examples/step-50/gmg_mb_3d.prm @@ -0,0 +1,19 @@ +# Listing of Parameters +# --------------------- +# The problem dimension. +set dim = 3 + +# Number of adaptive refinement steps. +set n_steps = 12 + +# Output graphical results. +set output = true + +# Dampen factor for the smoother. +set smoother dampen = 1.0 + +# Number of smoother steps. +set smoother steps = 1 + +# Switch between GMG and AMG. +set solver = MB diff --git a/examples/step-50/gmg_mf_2d.prm b/examples/step-50/gmg_mf_2d.prm new file mode 100644 index 0000000000..e4a3e20aad --- /dev/null +++ b/examples/step-50/gmg_mf_2d.prm @@ -0,0 +1,19 @@ +# Listing of Parameters +# --------------------- +# The problem dimension. +set dim = 2 + +# Number of adaptive refinement steps. +set n_steps = 16 + +# Output graphical results. +set output = true + +# Dampen factor for the smoother. +set smoother dampen = 1.0 + +# Number of smoother steps. +set smoother steps = 1 + +# Switch between GMG and AMG. +set solver = MF diff --git a/examples/step-50/gmg_mf_3d.prm b/examples/step-50/gmg_mf_3d.prm new file mode 100644 index 0000000000..d0d1a35b1d --- /dev/null +++ b/examples/step-50/gmg_mf_3d.prm @@ -0,0 +1,19 @@ +# Listing of Parameters +# --------------------- +# The problem dimension. +set dim = 3 + +# Number of adaptive refinement steps. +set n_steps = 12 + +# Output graphical results. +set output = true + +# Dampen factor for the smoother. +set smoother dampen = 1.0 + +# Number of smoother steps. +set smoother steps = 1 + +# Switch between GMG and AMG. +set solver = MF diff --git a/examples/step-50/step-50.cc b/examples/step-50/step-50.cc index 355ccb7090..10bf160960 100644 --- a/examples/step-50/step-50.cc +++ b/examples/step-50/step-50.cc @@ -42,6 +42,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -49,6 +52,7 @@ #include #include #include +#include #include #include #include @@ -80,6 +84,41 @@ namespace LA using namespace dealii; + + +#ifdef USE_PETSC_LA +// No ChangeVectorTypes::copy() for PETSc vector types. +// Vector::import() needs to be implemented. +#else +/** + * Matrix-free operators must use deal.II defined vectors, rest of the code is + * based on Trilinos vectors. + */ +namespace ChangeVectorTypes +{ + template + void copy(TrilinosWrappers::MPI::Vector &out, + const dealii::LinearAlgebra::distributed::Vector &in) + { + dealii::LinearAlgebra::ReadWriteVector rwv( + out.locally_owned_elements()); + rwv.import(in, VectorOperation::insert); + out.import(rwv, VectorOperation::insert); + } + + template + void copy(dealii::LinearAlgebra::distributed::Vector &out, + const TrilinosWrappers::MPI::Vector &in) + { + dealii::LinearAlgebra::ReadWriteVector rwv; + rwv.reinit(in); + out.import(rwv, VectorOperation::insert); + } +} // namespace ChangeVectorTypes +#endif + + + template class RightHandSide : public Function { @@ -99,6 +138,10 @@ class Coefficient : public Function public: virtual double value(const Point & p, const unsigned int component = 0) const override; + + template + VectorizedArray value(const Point> &p, + const unsigned int component = 0) const; }; @@ -114,6 +157,32 @@ double Coefficient::value(const Point &p, const unsigned int) const } +template +template +VectorizedArray +Coefficient::value(const Point> &p, + const unsigned int) const +{ + VectorizedArray return_value = VectorizedArray(); + for (unsigned int i = 0; i < VectorizedArray::size(); ++i) + { + bool found = false; + for (int d = 0; d < dim; ++d) + if (p[d][i] < -0.5) + { + return_value[i] = 100.0; + found = true; + break; + } + + if (!found) + return_value[i] = 1.0; + } + + return return_value; +} + + void average(std::vector &values) { @@ -128,16 +197,196 @@ void average(std::vector &values) +/** + * Matrix-free Laplace operator + */ +template +class LaplaceOperator + : public MatrixFreeOperators::Base> +{ +public: + LaplaceOperator(); + + void clear() override; + + void evaluate_coefficient(const Coefficient &coefficient_function); + Table<1, VectorizedArray> get_coefficient_table(); + + virtual void compute_diagonal() override; + +private: + virtual void apply_add( + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src) const override; + + void + local_apply(const MatrixFree & data, + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src, + const std::pair &cell_range) const; + + void local_compute_diagonal( + const MatrixFree & data, + LinearAlgebra::distributed::Vector & dst, + const unsigned int & dummy, + const std::pair &cell_range) const; + + Table<1, VectorizedArray> coefficient; +}; + + +template +LaplaceOperator::LaplaceOperator() + : MatrixFreeOperators::Base>() +{} + + +template +void LaplaceOperator::clear() +{ + coefficient.reinit(TableIndices<1>(0)); + MatrixFreeOperators::Base>:: + clear(); +} + + +template +void LaplaceOperator::evaluate_coefficient( + const Coefficient &coefficient_function) +{ + const unsigned int n_cells = this->data->n_macro_cells(); + FEEvaluation phi(*this->data); + + coefficient.reinit(TableIndices<1>(n_cells)); + for (unsigned int cell = 0; cell < n_cells; ++cell) + { + phi.reinit(cell); + + VectorizedArray averaged_value(0); + for (unsigned int q = 0; q < phi.n_q_points; ++q) + averaged_value += coefficient_function.value(phi.quadrature_point(q)); + averaged_value /= phi.n_q_points; + + coefficient(cell) = averaged_value; + } +} + + +template +Table<1, VectorizedArray> +LaplaceOperator::get_coefficient_table() +{ + return coefficient; +} + + +template +void LaplaceOperator::local_apply( + const MatrixFree & data, + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src, + const std::pair & cell_range) const +{ + FEEvaluation phi(data); + + for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell) + { + AssertDimension(coefficient.size(0), data.n_macro_cells()); + + phi.reinit(cell); + phi.read_dof_values(src); + phi.evaluate(false, true); + for (unsigned int q = 0; q < phi.n_q_points; ++q) + phi.submit_gradient(coefficient(cell) * phi.get_gradient(q), q); + phi.integrate(false, true); + phi.distribute_local_to_global(dst); + } +} + + +template +void LaplaceOperator::apply_add( + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src) const +{ + this->data->cell_loop(&LaplaceOperator::local_apply, this, dst, src); +} + + +template +void LaplaceOperator::compute_diagonal() +{ + this->inverse_diagonal_entries.reset( + new DiagonalMatrix>()); + LinearAlgebra::distributed::Vector &inverse_diagonal = + this->inverse_diagonal_entries->get_vector(); + this->data->initialize_dof_vector(inverse_diagonal); + unsigned int dummy = 0; + this->data->cell_loop(&LaplaceOperator::local_compute_diagonal, + this, + inverse_diagonal, + dummy); + + this->set_constrained_entries_to_one(inverse_diagonal); + + for (unsigned int i = 0; i < inverse_diagonal.local_size(); ++i) + { + Assert(inverse_diagonal.local_element(i) > 0., + ExcMessage("No diagonal entry in a positive definite operator " + "should be zero")); + inverse_diagonal.local_element(i) = + 1. / inverse_diagonal.local_element(i); + } +} + + +template +void LaplaceOperator::local_compute_diagonal( + const MatrixFree & data, + LinearAlgebra::distributed::Vector &dst, + const unsigned int &, + const std::pair &cell_range) const +{ + FEEvaluation phi(data); + + AlignedVector> diagonal(phi.dofs_per_cell); + + for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell) + { + AssertDimension(coefficient.size(0), data.n_macro_cells()); + + phi.reinit(cell); + for (unsigned int i = 0; i < phi.dofs_per_cell; ++i) + { + for (unsigned int j = 0; j < phi.dofs_per_cell; ++j) + phi.submit_dof_value(VectorizedArray(), j); + phi.submit_dof_value(make_vectorized_array(1.), i); + + phi.evaluate(false, true); + for (unsigned int q = 0; q < phi.n_q_points; ++q) + phi.submit_gradient(coefficient(cell) * phi.get_gradient(q), q); + phi.integrate(false, true); + diagonal[i] = phi.get_dof_value(i); + } + for (unsigned int i = 0; i < phi.dofs_per_cell; ++i) + phi.submit_dof_value(diagonal[i], i); + phi.distribute_local_to_global(dst); + } +} + + + struct Settings { bool try_parse(const std::string &prm_filename); - enum AssembleEnum + enum SolverType { - gmg, + gmg_mb, + gmg_mf, amg - } assembler; - std::string assembler_text; + } solver; int dimension; double smoother_dampen; @@ -149,10 +398,15 @@ struct Settings template class LaplaceProblem { - typedef LA::MPI::SparseMatrix MatrixType; - typedef LA::MPI::Vector VectorType; - typedef LA::MPI::PreconditionAMG PreconditionAMG; - typedef LA::MPI::PreconditionJacobi PreconditionJacobi; + using MatrixType = LA::MPI::SparseMatrix; + using VectorType = LA::MPI::Vector; + using PreconditionAMG = LA::MPI::PreconditionAMG; + using PreconditionJacobi = LA::MPI::PreconditionJacobi; + + using MatrixFreeLevelMatrix = LaplaceOperator; + using MatrixFreeActiveMatrix = LaplaceOperator; + using MatrixFreeLevelVector = LinearAlgebra::distributed::Vector; + using MatrixFreeActiveVector = LinearAlgebra::distributed::Vector; public: LaplaceProblem(const Settings &settings); @@ -163,6 +417,7 @@ private: void setup_multigrid(); void assemble_system(); void assemble_multigrid(); + void assemble_rhs_for_matrix_free(); void solve(); void estimate(); void refine_grid(); @@ -183,15 +438,18 @@ private: IndexSet locally_relevant_set; AffineConstraints constraints; - MatrixType system_matrix; - VectorType solution; - VectorType right_hand_side; - Vector estimate_vector; + MatrixType system_matrix; + MatrixFreeActiveMatrix mf_system_matrix; + VectorType solution; + VectorType right_hand_side; + Vector estimate_vector; MGLevelObject mg_matrix; MGLevelObject mg_interface_in; MGConstrainedDoFs mg_constrained_dofs; + MGLevelObject mf_mg_matrix; + TimerOutput computing_timer; }; @@ -203,16 +461,16 @@ LaplaceProblem::LaplaceProblem(const Settings &settings) , pcout(std::cout, (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)) , triangulation(mpi_communicator, Triangulation::limit_level_difference_at_vertices, - (settings.assembler == Settings::amg) ? + (settings.solver == Settings::amg) ? parallel::distributed::Triangulation::default_setting : parallel::distributed::Triangulation< dim>::construct_multigrid_hierarchy) , mapping() , fe(2) , dof_handler(triangulation) - , computing_timer(pcout, TimerOutput::summary, TimerOutput::wall_times) + , computing_timer(pcout, TimerOutput::never, TimerOutput::wall_times) { - GridGenerator::hyper_L(triangulation, -1, 1, /*colorize*/ false); + GridGenerator::hyper_L(triangulation, -1., 1., /*colorize*/ false); triangulation.refine_global(1); } @@ -222,7 +480,7 @@ bool Settings::try_parse(const std::string &prm_filename) ParameterHandler prm; prm.declare_entry("dim", "2", Patterns::Integer(), "The problem dimension."); prm.declare_entry("n_steps", - "20", + "10", Patterns::Integer(0), "Number of adaptive refinement steps."); prm.declare_entry("smoother dampen", @@ -230,36 +488,46 @@ bool Settings::try_parse(const std::string &prm_filename) Patterns::Double(0.0), "Dampen factor for the smoother."); prm.declare_entry("smoother steps", - "2", + "1", Patterns::Integer(1), "Number of smoother steps."); - prm.declare_entry("assembler", - "GMG", - Patterns::Selection("GMG|AMG"), - "Switch between GMG and AMG."); + prm.declare_entry( + "solver", + "MF", + Patterns::Selection("MF|MB|AMG"), + "Switch between matrix-free GMG, matrix-based GMG, and AMG."); prm.declare_entry("output", "false", Patterns::Bool(), "Output graphical results."); + if (prm_filename.size() == 0) + { + // No .prm file provided? Print the default values and exit. + if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) + prm.print_parameters(std::cout, ParameterHandler::Text); + return false; + } + try { prm.parse_input(prm_filename); } - catch (...) + catch (std::exception &e) { if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) - prm.print_parameters(std::cout, ParameterHandler::Text); + std::cerr << e.what() << std::endl; return false; } - if (prm.get("assembler") == "GMG") - this->assembler = gmg; - else if (prm.get("assembler") == "AMG") - this->assembler = amg; + if (prm.get("solver") == "MF") + this->solver = gmg_mf; + else if (prm.get("solver") == "MB") + this->solver = gmg_mb; + else if (prm.get("solver") == "AMG") + this->solver = amg; else AssertThrow(false, ExcNotImplemented()); - this->assembler_text = prm.get("assembler"); this->dimension = prm.get_integer("dim"); this->n_steps = prm.get_integer("n_steps"); @@ -291,28 +559,48 @@ void LaplaceProblem::setup_system() mapping, dof_handler, 0, Functions::ZeroFunction(), constraints); constraints.close(); -#ifdef USE_PETSC_LA - DynamicSparsityPattern dsp(locally_relevant_set); - DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints); - - SparsityTools::distribute_sparsity_pattern(dsp, - locally_owned_set, - mpi_communicator, - locally_relevant_set); - system_matrix.reinit(locally_owned_set, - locally_owned_set, - dsp, - mpi_communicator); + if (settings.solver = Settings::gmg_mf) + { + typename MatrixFree::AdditionalData additional_data; + additional_data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + additional_data.mapping_update_flags = + (update_gradients | update_JxW_values | update_quadrature_points); + std::shared_ptr> mf_storage( + new MatrixFree()); + mf_storage->reinit(dof_handler, + constraints, + QGauss<1>(fe.degree + 1), + additional_data); + mf_system_matrix.initialize(mf_storage); + mf_system_matrix.evaluate_coefficient(Coefficient()); + } + else + { +#ifdef USE_PETSC_LA + DynamicSparsityPattern dsp(locally_relevant_set); + DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints); + + SparsityTools::distribute_sparsity_pattern(dsp, + locally_owned_set, + mpi_communicator, + locally_relevant_set); + + system_matrix.reinit(locally_owned_set, + locally_owned_set, + dsp, + mpi_communicator); #else - TrilinosWrappers::SparsityPattern dsp(locally_owned_set, - locally_owned_set, - locally_relevant_set, - MPI_COMM_WORLD); - DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints); - dsp.compress(); - system_matrix.reinit(dsp); + TrilinosWrappers::SparsityPattern dsp(locally_owned_set, + locally_owned_set, + locally_relevant_set, + MPI_COMM_WORLD); + DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints); + dsp.compress(); + system_matrix.reinit(dsp); #endif + } } @@ -330,77 +618,120 @@ void LaplaceProblem::setup_multigrid() mg_constrained_dofs.make_zero_boundary_constraints(dof_handler, bset); const unsigned int n_levels = triangulation.n_global_levels(); - mg_matrix.resize(0, n_levels - 1); - mg_matrix.clear_elements(); - mg_interface_in.resize(0, n_levels - 1); - mg_interface_in.clear_elements(); + if (settings.solver = Settings::gmg_mf) + { + mf_mg_matrix.resize(0, n_levels - 1); - for (unsigned int level = 0; level < n_levels; ++level) + for (unsigned int level = 0; level < n_levels; ++level) + { + IndexSet relevant_dofs; + DoFTools::extract_locally_relevant_level_dofs(dof_handler, + level, + relevant_dofs); + AffineConstraints level_constraints; + level_constraints.reinit(relevant_dofs); + level_constraints.add_lines( + mg_constrained_dofs.get_boundary_indices(level)); + level_constraints.close(); + + typename MatrixFree::AdditionalData additional_data; + additional_data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + additional_data.mapping_update_flags = + (update_gradients | update_JxW_values | update_quadrature_points); + additional_data.mg_level = level; + std::shared_ptr> mf_storage_level( + new MatrixFree()); + mf_storage_level->reinit(dof_handler, + level_constraints, + QGauss<1>(fe.degree + 1), + additional_data); + + mf_mg_matrix[level].initialize(mf_storage_level, + mg_constrained_dofs, + level); + + mf_mg_matrix[level].evaluate_coefficient(Coefficient()); + mf_mg_matrix[level].compute_diagonal(); + } + } + else { - IndexSet dofset; - DoFTools::extract_locally_relevant_level_dofs(dof_handler, level, dofset); + mg_matrix.resize(0, n_levels - 1); + mg_matrix.clear_elements(); + mg_interface_in.resize(0, n_levels - 1); + mg_interface_in.clear_elements(); - { + for (unsigned int level = 0; level < n_levels; ++level) + { + IndexSet dofset; + DoFTools::extract_locally_relevant_level_dofs(dof_handler, + level, + dofset); + + { #ifdef USE_PETSC_LA - DynamicSparsityPattern dsp(dofset); - MGTools::make_sparsity_pattern(dof_handler, dsp, level); - dsp.compress(); - SparsityTools::distribute_sparsity_pattern( - dsp, - dof_handler.locally_owned_mg_dofs(level), - mpi_communicator, - dofset); - - mg_matrix[level].reinit(dof_handler.locally_owned_mg_dofs(level), - dof_handler.locally_owned_mg_dofs(level), - dsp, - mpi_communicator); + DynamicSparsityPattern dsp(dofset); + MGTools::make_sparsity_pattern(dof_handler, dsp, level); + dsp.compress(); + SparsityTools::distribute_sparsity_pattern( + dsp, + dof_handler.locally_owned_mg_dofs(level), + mpi_communicator, + dofset); + + mg_matrix[level].reinit(dof_handler.locally_owned_mg_dofs(level), + dof_handler.locally_owned_mg_dofs(level), + dsp, + mpi_communicator); #else - TrilinosWrappers::SparsityPattern dsp( - dof_handler.locally_owned_mg_dofs(level), - dof_handler.locally_owned_mg_dofs(level), - dofset, - mpi_communicator); - MGTools::make_sparsity_pattern(dof_handler, dsp, level); - - dsp.compress(); - mg_matrix[level].reinit(dsp); + TrilinosWrappers::SparsityPattern dsp( + dof_handler.locally_owned_mg_dofs(level), + dof_handler.locally_owned_mg_dofs(level), + dofset, + mpi_communicator); + MGTools::make_sparsity_pattern(dof_handler, dsp, level); + + dsp.compress(); + mg_matrix[level].reinit(dsp); #endif - } + } - { + { #ifdef USE_PETSC_LA - DynamicSparsityPattern dsp(dofset); - MGTools::make_interface_sparsity_pattern(dof_handler, - mg_constrained_dofs, - dsp, - level); - dsp.compress(); - SparsityTools::distribute_sparsity_pattern( - dsp, - dof_handler.locally_owned_mg_dofs(level), - mpi_communicator, - dofset); - - mg_interface_in[level].reinit(dof_handler.locally_owned_mg_dofs(level), - dof_handler.locally_owned_mg_dofs(level), - dsp, - mpi_communicator); + DynamicSparsityPattern dsp(dofset); + MGTools::make_interface_sparsity_pattern(dof_handler, + mg_constrained_dofs, + dsp, + level); + dsp.compress(); + SparsityTools::distribute_sparsity_pattern( + dsp, + dof_handler.locally_owned_mg_dofs(level), + mpi_communicator, + dofset); + + mg_interface_in[level].reinit( + dof_handler.locally_owned_mg_dofs(level), + dof_handler.locally_owned_mg_dofs(level), + dsp, + mpi_communicator); #else - TrilinosWrappers::SparsityPattern dsp( - dof_handler.locally_owned_mg_dofs(level), - dof_handler.locally_owned_mg_dofs(level), - dofset, - mpi_communicator); - - MGTools::make_interface_sparsity_pattern(dof_handler, - mg_constrained_dofs, - dsp, - level); - dsp.compress(); - mg_interface_in[level].reinit(dsp); + TrilinosWrappers::SparsityPattern dsp( + dof_handler.locally_owned_mg_dofs(level), + dof_handler.locally_owned_mg_dofs(level), + dofset, + mpi_communicator); + + MGTools::make_interface_sparsity_pattern(dof_handler, + mg_constrained_dofs, + dsp, + level); + dsp.compress(); + mg_interface_in[level].reinit(dsp); #endif - } + } + } } } @@ -550,6 +881,71 @@ void LaplaceProblem::assemble_multigrid() } +template +void LaplaceProblem::assemble_rhs_for_matrix_free() +{ + TimerOutput::Scope timing(computing_timer, "Assemble right hand side"); + + MatrixFreeActiveVector solution_copy; + MatrixFreeActiveVector right_hand_side_copy; + mf_system_matrix.initialize_dof_vector(solution_copy); + mf_system_matrix.initialize_dof_vector(right_hand_side_copy); + + solution_copy = 0.; + constraints.distribute(solution_copy); + solution_copy.update_ghost_values(); + right_hand_side_copy = 0; + const Table<1, VectorizedArray> coefficient_table = + mf_system_matrix.get_coefficient_table(); + + RightHandSide right_hand_side_function; + + FEEvaluation phi(*mf_system_matrix.get_matrix_free()); + + for (unsigned int cell = 0; + cell < mf_system_matrix.get_matrix_free()->n_macro_cells(); + ++cell) + { + phi.reinit(cell); + phi.read_dof_values_plain(solution_copy); + phi.evaluate(false, true, false); + + for (unsigned int q = 0; q < phi.n_q_points; ++q) + { + // Submit gradient + phi.submit_gradient(-1.0 * + (coefficient_table(cell) * phi.get_gradient(q)), + q); + + // Submit RHS value + VectorizedArray rhs_value = + make_vectorized_array(1.0); + for (unsigned int i = 0; i < VectorizedArray::size(); ++i) + { + Point p; + for (unsigned int d = 0; d < dim; ++d) + p(d) = phi.quadrature_point(q)(d)[i]; + + rhs_value[i] = right_hand_side_function.value(p); + } + phi.submit_value(rhs_value, q); + } + + phi.integrate(true, true); + phi.distribute_local_to_global(right_hand_side_copy); + } + + right_hand_side_copy.compress(VectorOperation::add); +#ifdef USE_PETSC_LA + AssertThrow(false, + ExcMessage("CopyVectorTypes::copy() not implemented for " + "PETSc vector types.")); +#else + ChangeVectorTypes::copy(right_hand_side, right_hand_side_copy); +#endif +} + + template void LaplaceProblem::solve() { @@ -557,63 +953,114 @@ void LaplaceProblem::solve() SolverControl solver_control(1000, 1.e-10 * right_hand_side.l2_norm()); solver_control.enable_history_data(); - SolverCG solver(solver_control); solution = 0.; - if (settings.assembler == Settings::amg) + if (settings.solver == Settings::gmg_mf) { - computing_timer.enter_subsection("Solve: AMG preconditioner setup"); + computing_timer.enter_subsection("Solve: Preconditioner setup"); - PreconditionAMG prec; - PreconditionAMG::AdditionalData Amg_data; + MGTransferMatrixFree mg_transfer(mg_constrained_dofs); + mg_transfer.build(dof_handler); + + SolverControl coarse_solver_control(1000, 1e-12, false, false); + SolverCG coarse_solver(coarse_solver_control); + PreconditionIdentity identity; + MGCoarseGridIterativeSolver, + MatrixFreeLevelMatrix, + PreconditionIdentity> + coarse_grid_solver(coarse_solver, mf_mg_matrix[0], identity); + + using Smoother = dealii::PreconditionJacobi; + MGSmootherPrecondition + smoother; + smoother.initialize(mf_mg_matrix, + typename Smoother::AdditionalData( + settings.smoother_dampen)); + smoother.set_steps(settings.smoother_steps); + + mg::Matrix mg_m(mf_mg_matrix); + + MGLevelObject< + MatrixFreeOperators::MGInterfaceOperator> + mg_interface_matrices; + mg_interface_matrices.resize(0, triangulation.n_global_levels() - 1); + for (unsigned int level = 0; level < triangulation.n_global_levels(); + ++level) + mg_interface_matrices[level].initialize(mf_mg_matrix[level]); + mg::Matrix mg_interface(mg_interface_matrices); + + Multigrid mg( + mg_m, coarse_grid_solver, mg_transfer, smoother, smoother); + mg.set_edge_matrices(mg_interface, mg_interface); + + PreconditionMG> + preconditioner(dof_handler, mg, mg_transfer); + + MatrixFreeActiveVector solution_copy; + MatrixFreeActiveVector right_hand_side_copy; + mf_system_matrix.initialize_dof_vector(solution_copy); + mf_system_matrix.initialize_dof_vector(right_hand_side_copy); #ifdef USE_PETSC_LA - Amg_data.symmetric_operator = true; + AssertThrow(false, + ExcMessage("CopyVectorTypes::copy() not implemented for " + "PETSc vector types.")); #else - Amg_data.elliptic = true; - Amg_data.smoother_type = "Jacobi"; - Amg_data.higher_order_elements = true; - Amg_data.smoother_sweeps = settings.smoother_steps; - Amg_data.aggregation_threshold = 0.02; + ChangeVectorTypes::copy(solution_copy, solution); + ChangeVectorTypes::copy(right_hand_side_copy, right_hand_side); #endif + computing_timer.leave_subsection("Solve: Preconditioner setup"); - Amg_data.output_details = false; - - prec.initialize(system_matrix, Amg_data); - computing_timer.leave_subsection("Solve: AMG preconditioner setup"); - + // Timing 1 vcycle { - TimerOutput::Scope timing(computing_timer, "Solve: 1 AMG vcycle"); - prec.vmult(solution, right_hand_side); + TimerOutput::Scope timing(computing_timer, "Solve: 1 multigrid vcycle"); + preconditioner.vmult(solution_copy, right_hand_side_copy); } - solution = 0.; + solution_copy = 0.; { + SolverCG solver(solver_control); + TimerOutput::Scope timing(computing_timer, "Solve: CG"); - solver.solve(system_matrix, solution, right_hand_side, prec); + solver.solve(mf_system_matrix, + solution_copy, + right_hand_side_copy, + preconditioner); } + + solution_copy.update_ghost_values(); +#ifdef USE_PETSC_LA + AssertThrow(false, + ExcMessage("CopyVectorTypes::copy() not implemented for " + "PETSc vector types.")); +#else + ChangeVectorTypes::copy(solution, solution_copy); +#endif constraints.distribute(solution); } - else + else if (settings.solver == Settings::gmg_mb) { - computing_timer.enter_subsection("Solve: GMG preconditioner setup"); + computing_timer.enter_subsection("Solve: Preconditioner setup"); MGTransferPrebuilt mg_transfer(mg_constrained_dofs); mg_transfer.build(dof_handler); - MatrixType & coarse_matrix = mg_matrix[0]; SolverControl coarse_solver_control(1000, 1e-12, false, false); SolverCG coarse_solver(coarse_solver_control); PreconditionIdentity identity; - MGCoarseGridIterativeSolver, MatrixType, PreconditionIdentity> - coarse_grid_solver(coarse_solver, coarse_matrix, identity); + coarse_grid_solver(coarse_solver, mg_matrix[0], identity); - typedef LA::MPI::PreconditionJacobi Smoother; + using Smoother = LA::MPI::PreconditionJacobi; MGSmootherPrecondition smoother; #ifdef USE_PETSC_LA @@ -640,31 +1087,62 @@ void LaplaceProblem::solve() PreconditionMG> preconditioner(dof_handler, mg, mg_transfer); - computing_timer.leave_subsection("Solve: GMG preconditioner setup"); + computing_timer.leave_subsection("Solve: Preconditioner setup"); { - TimerOutput::Scope timing(computing_timer, "Solve: 1 GMG vcycle"); + TimerOutput::Scope timing(computing_timer, "Solve: 1 multigrid vcycle"); preconditioner.vmult(solution, right_hand_side); } solution = 0.; { + SolverCG solver(solver_control); + TimerOutput::Scope timing(computing_timer, "Solve: CG"); solver.solve(system_matrix, solution, right_hand_side, preconditioner); } constraints.distribute(solution); } + else + { + computing_timer.enter_subsection("Solve: Preconditioner setup"); - double rate = solver_control.final_reduction(); - { - double r0 = right_hand_side.l2_norm(); - double rn = solver_control.last_value(); - rate = 1.0 / solver_control.last_step() * log(r0 / rn) / log(10); - } + PreconditionAMG preconditioner; + PreconditionAMG::AdditionalData Amg_data; + +#ifdef USE_PETSC_LA + Amg_data.symmetric_operator = true; +#else + Amg_data.elliptic = true; + Amg_data.smoother_type = "Jacobi"; + Amg_data.higher_order_elements = true; + Amg_data.smoother_sweeps = settings.smoother_steps; + Amg_data.aggregation_threshold = 0.02; +#endif + + Amg_data.output_details = false; - pcout << " CG iterations: " << solver_control.last_step() - << ", iters: " << 10.0 / rate << ", rate: " << rate << std::endl; + preconditioner.initialize(system_matrix, Amg_data); + computing_timer.leave_subsection("Solve: Preconditioner setup"); + + { + TimerOutput::Scope timing(computing_timer, "Solve: 1 multigrid vcycle"); + preconditioner.vmult(solution, right_hand_side); + } + solution = 0.; + + { + SolverCG solver(solver_control); + + TimerOutput::Scope timing(computing_timer, "Solve: CG"); + solver.solve(system_matrix, solution, right_hand_side, preconditioner); + } + constraints.distribute(solution); + } + + pcout << " Number of CG iterations: " << solver_control.last_step() + << std::endl; } @@ -743,7 +1221,7 @@ void LaplaceProblem::estimate() auto cell_worker = [&](const Iterator & cell, ScratchData &scratch_data, CopyData & copy_data) { - // assemble cell residual $h^2 \| f + \epsilon \triangle u \|_K^2$ + /*assemble cell residual $h^2 \| f + \epsilon \triangle u \|_K^2$*/ FEValues &fe_values = scratch_data.fe_values; fe_values.reinit(cell); @@ -776,7 +1254,7 @@ void LaplaceProblem::estimate() const unsigned int &nsf, ScratchData & scratch_data, CopyData & copy_data) { - // face term $\sum_F h_F \| [ \epsilon \nabla u \cdot n ] \|_F^2$ + /* face term $\sum_F h_F \| [ \epsilon \nabla u \cdot n ] \|_F^2$*/ FEInterfaceValues &fe_interface_values = scratch_data.fe_interface_values; @@ -790,7 +1268,7 @@ void LaplaceProblem::estimate() const double nu1 = coefficient.value(cell->center()); const double nu2 = coefficient.value(ncell->center()); - const double h = cell->face(f)->measure(); // TODO: FEIV.measure + const double h = cell->face(f)->measure(); std::vector> grad_u[2]; @@ -871,14 +1349,13 @@ void LaplaceProblem::output_results(const unsigned int cycle) { TimerOutput::Scope timing(computing_timer, "Output results"); - DataOut data_out; - VectorType temp_solution; temp_solution.reinit(locally_owned_set, locally_relevant_set, mpi_communicator); temp_solution = solution; + DataOut data_out; data_out.attach_dof_handler(dof_handler); data_out.add_data_vector(temp_solution, "solution"); Vector subdomain(triangulation.n_active_cells()); @@ -896,32 +1373,10 @@ void LaplaceProblem::output_results(const unsigned int cycle) data_out.build_patches(0); - const std::string filename = - ("solution-" + Utilities::int_to_string(cycle, 5) + "." + - Utilities::int_to_string(triangulation.locally_owned_subdomain(), 4) + - ".vtu"); - std::ofstream output(filename.c_str()); - data_out.write_vtu(output); + const std::string master = data_out.write_vtu_with_pvtu_record( + "", "solution", cycle, mpi_communicator, 2 /*n_digits*/, 1 /*n_groups*/); - if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0) - { - std::vector filenames; - for (unsigned int i = 0; - i < Utilities::MPI::n_mpi_processes(mpi_communicator); - ++i) - filenames.push_back(std::string("solution-") + - Utilities::int_to_string(cycle, 5) + "." + - Utilities::int_to_string(i, 4) + ".vtu"); - const std::string pvtu_master_filename = - ("solution-" + Utilities::int_to_string(cycle, 5) + ".pvtu"); - std::ofstream pvtu_master(pvtu_master_filename.c_str()); - data_out.write_pvtu_record(pvtu_master, filenames); - - const std::string visit_master_filename = - ("solution-" + Utilities::int_to_string(cycle, 5) + ".visit"); - std::ofstream visit_master(visit_master_filename.c_str()); - DataOutBase::write_visit_record(visit_master, filenames); - } + pcout << " Wrote " << master << std::endl; } @@ -936,7 +1391,7 @@ void LaplaceProblem::run() pcout << " Number of active cells: " << triangulation.n_global_active_cells(); - if (settings.assembler == Settings::gmg) + if (settings.solver != Settings::amg) pcout << " (" << triangulation.n_global_levels() << " global levels)" << std::endl << " Workload imbalance: " @@ -944,11 +1399,11 @@ void LaplaceProblem::run() pcout << std::endl; setup_system(); - if (settings.assembler == Settings::gmg) + if (settings.solver != Settings::amg) setup_multigrid(); pcout << " Number of degrees of freedom: " << dof_handler.n_dofs(); - if (settings.assembler != Settings::amg) + if (settings.solver != Settings::amg) { pcout << " (by level: "; for (unsigned int level = 0; level < triangulation.n_global_levels(); @@ -959,9 +1414,14 @@ void LaplaceProblem::run() } pcout << std::endl; - assemble_system(); - if (settings.assembler == Settings::gmg) - assemble_multigrid(); + if (settings.solver == Settings::gmg_mf) + assemble_rhs_for_matrix_free(); + else + { + assemble_system(); + if (settings.solver == Settings::gmg_mb) + assemble_multigrid(); + } solve(); estimate(); @@ -977,8 +1437,8 @@ void LaplaceProblem::run() int main(int argc, char *argv[]) { - dealii::Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); using namespace dealii; + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); Settings settings; if (!settings.try_parse((argc > 1) ? (argv[1]) : "")) -- 2.39.5