]> https://gitweb.dealii.org/ - dealii.git/commitdiff
update intro/changes
authortcclevenger <tcleven@clemson.edu>
Mon, 20 Apr 2020 15:12:11 +0000 (11:12 -0400)
committerTimo Heister <timo.heister@gmail.com>
Sat, 9 May 2020 17:41:55 +0000 (13:41 -0400)
13 files changed:
doc/doxygen/tutorial/tutorial.h.in
doc/news/changes/major/20200420step50 [new file with mode: 0644]
examples/step-50/amg_2d.prm
examples/step-50/amg_3d.prm [new file with mode: 0644]
examples/step-50/doc/intro.dox
examples/step-50/doc/results.dox
examples/step-50/doc/tooltip
examples/step-50/gmg_2d.prm [deleted file]
examples/step-50/gmg_mb_2d.prm [new file with mode: 0644]
examples/step-50/gmg_mb_3d.prm [new file with mode: 0644]
examples/step-50/gmg_mf_2d.prm [new file with mode: 0644]
examples/step-50/gmg_mf_3d.prm [new file with mode: 0644]
examples/step-50/step-50.cc

index 52a222bbd56091029f220f9447cfe212b7fed32c..de48ed344d6421e1395edc3c8d05fe50e4be3f73 100644 (file)
  *       </td></tr>
  *
  *   <tr valign="top">
+ *       <td>step-50</td>
+ *       <td> Geometric multigrid on adaptive meshes distributed in parallel.
+ *       <br/> Keywords: Multigrid, MGLevelObject, MGConstrainedDoFs, IndexSet, MGTools, PreconditionMG, FEInterfaceValues, MeshWorker::mesh_loop()
+ *       </td></tr>
+ *
+ *   <tr valign="top">
  *       <td>step-51</td>
  *       <td> Solving the convection-diffusion equation with a hybridizable
  *       discontinuous Galerkin method using face elements.
  *       step-31,
  *       step-32,
  *       step-33,
+ *       step-40,
  *       step-41,
  *       step-42,
  *       step-43,
+ *       step-50,
  *       step-55
  *     </td>
  *   </tr>
  *       step-17,
  *       step-18,
  *       step-40,
+ *       step-50,
  *       step-55
  *     </td>
  *   </tr>
  *     </td>
  *     <td>
  *       step-32,
+ *       step-40,
  *       step-42,
+ *       step-50,
  *       step-55
  *     </td>
  *   </tr>
  *       step-37,
  *       step-40,
  *       step-42,
+ *       step-50,
  *       step-55,
  *       step-59,
  *       step-67,
  *       step-6,
  *       step-9,
  *       step-14,
- *       step-39
+ *       step-39,
+ *       step-50
  *     </td>
  *   </tr>
  *
  *       step-41,
  *       step-42,
  *       step-43,
+ *       step-50,
  *       step-56,
  *       step-59,
  *       step-63
  *       step-37,
  *       step-40,
  *       step-42,
+ *       step-50,
  *       step-55,
  *       step-59
  *     </td>
diff --git a/doc/news/changes/major/20200420step50 b/doc/news/changes/major/20200420step50
new file mode 100644 (file)
index 0000000..7cce146
--- /dev/null
@@ -0,0 +1,8 @@
+New: The Step-50 tutorial has been updated. We discuss how to 
+use the multilevel preconditioner from step-16 in parallel and 
+give a comparison of parallel scaling between GMG and AMG for 
+a 3D Laplace example where adaptive refinement comes from a 
+residual-based, cell-wise a posteriori error estimator. 
+<br>
+(Thomas C. Clevenger, Timo Heister, 2020/04/20)
+
index 426f17c65138b4f8925063f3ac19def48a8f86b5..6343a8eaee8a7c319bc7f9e51ea18e1e41fdc0c2 100644 (file)
@@ -1,12 +1,19 @@
 # Listing of Parameters
 # ---------------------
-set assembler       = AMG
+# The problem dimension.
 set dim             = 2
+
 # Number of adaptive refinement steps.
-set n_steps         = 20
+set n_steps         = 16
+
+# Output graphical results.
 set output          = true
 
-# Select how to refine. Options: global|kelly|estimator
-set refinement type = estimator
+# Dampen factor for the smoother.
 set smoother dampen = 1.0
-set smoother steps  = 2
+
+# Number of smoother steps.
+set smoother steps  = 1
+
+# Switch between GMG and AMG.
+set solver          = AMG
diff --git a/examples/step-50/amg_3d.prm b/examples/step-50/amg_3d.prm
new file mode 100644 (file)
index 0000000..9131f07
--- /dev/null
@@ -0,0 +1,19 @@
+# Listing of Parameters
+# ---------------------
+# The problem dimension.
+set dim             = 3
+
+# Number of adaptive refinement steps.
+set n_steps         = 12
+
+# Output graphical results.
+set output          = true
+
+# Dampen factor for the smoother.
+set smoother dampen = 1.0
+
+# Number of smoother steps.
+set smoother steps  = 1
+
+# Switch between GMG and AMG.
+set solver          = AMG
index e597798731f9038944b4df4b487caccbf7f041e9..eaffbcbd427e8b9fe5fb94b89ddb3d6f1e05f189 100644 (file)
 <br>
 
-<i>This program has evolved from a version originally written by Guido
-Kanschat in 2003. It has undergone significant revisions by B&auml;rbel
-Janssen, Guido Kanschat and Wolfgang Bangerth in 2009 and 2010 to demonstrate
-multigrid algorithms on adaptively refined meshes.
+<i>
+This program was contributed by Thomas C Clevenger and Timo Heister.
+<br>
+This material is based upon work partly supported by the National
+Science Foundation Award DMS-1901529, OAC-2015848, EAR-1925575, by the Computational
+Infrastructure in Geodynamics initiative (CIG), through the NSF under Award
+EAR-0949446 and EAR-1550901 and The University of California -- Davis.
 </i>
 
 
+@note As a prerequisite of this program, you need to have both p4est and either the PETSc
+or Trilinos library installed. The installation of deal.II together with these additional
+libraries is described in the <a href="../../readme.html" target="body">README</a> file.
+
+
 <a name="Intro"></a>
 <h1>Introduction</h1>
 
 
-This example shows the basic usage of the multilevel functions in
-deal.II. It solves the same problem as used in step-6,
-but demonstrating the things one has to provide when using multigrid
-as a preconditioner. In particular, this requires that we define a
-hierarchy of levels, provide transfer operators from one level to the
-next and back, and provide representations of the Laplace operator on
-each level.
-
-In order to allow sufficient flexibility in conjunction with systems of
-differential equations and block preconditioners, quite a few different objects
-have to be created before starting the multilevel method, although
-most of what needs to be done is provided by deal.II itself. These are
-<ul>
-<li>An the object handling transfer between grids; we use the
-    MGTransferPrebuilt class for this that does almost all of the work
-    inside the library.
-<li>The solver on the coarsest level; here, we use MGCoarseGridHouseholder.
-<li>The smoother on all other levels, which in our case will be the
-    MGSmootherRelaxation class using SOR as the underlying method
-<li>And mg::Matrix, a class having a special level multiplication, i.e. we
-    basically store one matrix per grid level and allow multiplication
-    with it.
-</ul>
-Most of these objects will only be needed inside the function that
-actually solves the linear system. There, these objects are combined
-in an object of type Multigrid, containing the implementation of the
-V-cycle, which is in turn used by the preconditioner PreconditionMG,
-ready for plug-in into a linear solver of the LAC library.
-
-The multilevel method in deal.II follows in many respects the outlines
-of the various publications by James Bramble, Joseph Pasciak and
-Jinchao Xu (i.e. the "BPX" framework). In order to understand many of
-the options, a rough familiarity with their work is quite helpful.
-
-However, in comparison to this framework, the implementation in
-deal.II has to take into account the fact that we want to solve linear
-systems on adaptively refined meshes. This leads to the complication
-that it isn't quite as clear any more what exactly a "level" in a
-multilevel hierarchy of a mesh is. The following image shows what we
-consider to be a "level":
-
-<p align="center">
-  @image html "hanging_nodes.png" ""
-</p>
-
-In other words, the fine level in this mesh consists only of the
-degrees of freedom that are defined on the refined cells, but does not
-extend to that part of the domain that is not refined. While this
-guarantees that the overall effort grows as ${\cal O}(N)$ as necessary
-for optimal multigrid complexity, it leads to problems when defining
-where to smooth and what boundary conditions to pose for the operators
-defined on individual levels if the level boundary is not an external
-boundary. These questions are discussed in detail in the
-@ref mg_paper "Multigrid paper by Janssen and Kanschat" that describes
-the implementation in deal.II.
-
+This example shows the usage of the multilevel functions in deal.II on distributed meshes
+and gives a comparison between geometric and algebraic multigrid methods. The algebraic
+multigrid (AMG) preconditioner is the same used in step-40, and the geometric multigrid
+(GMG) preconditioner is based on the one used in step-16. Here we discuss the
+necessary changes needed for parallel computations.
 
 
 <h3>The testcase</h3>
 
-The problem we solve here is exactly the same as in
-step-6, the only difference being the solver we use
-here. You may want to look there for a definition of what we solve,
-right hand side and boundary conditions. Obviously, the program would
-also work if we changed the geometry and other pieces of data that
-defines this particular problem.
-
-The things that are new are all those parts that concern the
-multigrid. In particular, this includes the following members of the
-main class:
-- <code>LaplaceProblem::mg_dof_handler</code>
-- <code>LaplaceProblem::mg_sparsity</code>
-- <code>LaplaceProblem::mg_matrices</code>
-- <code>LaplaceProblem::mg_interface_matrices_up</code>
-- <code>LaplaceProblem::assemble_multigrid ()</code>
-- <code>LaplaceProblem::solve ()</code>
-Take a look at these functions.
+We consider the variable-coefficient Laplacian weak formulation
+@f{align*}
+ (\epsilon \nabla u, \nabla v) = (f,v) \quad \forall v \in V_h
+@f}
+on the domain $\Omega = [-1,1]^\text{dim} \setminus [0,1]^\text{dim}$ (an L-shaped domain for 2D and a Fichera corner for 3D) with
+$\epsilon = 1$ if $\min(x,y,z)>-\frac{1}{2}$ and $\epsilon = 100$ otherwise. The
+boundary conditions are $u=0$ on the whole boundary and the right-hand side is $f=1$.
+We use continuous Q2 elements to discretize $V_h$ and use a residual-based, cell-wise a
+posteriori error estimator $e(K) = e_{\text{cell}}(K) + e_{\text{face}}(K)$ from
+_CITE EST PAPER_ with
+@f{align*}
+ e_{\text{cell}}(K) = h^2 \| f + \epsilon \triangle u \|_K^2, \qquad
+ e_{\text{face}}(K) = \sum_F h_F \| [ \epsilon \nabla u \cdot n ] \|_F^2.
+@f}
+The following figure visualizes the solution and refinement for 2D
+<img src="https://www.dealii.org/images/steps/developer/step-50-2d-solution.png" alt="">
+and for 3D, the solution(left) and a slice for $x$ close to the
+center of the domain showing the adaptively refined mesh (right) are depicted here
+<table width="60%" align="center">
+  <tr>
+    <td align="center">
+      <img src="https://www.dealii.org/images/steps/developer/step-50-3d-solution.png" alt="">
+    </td>
+    <td align="center">
+      <img src="https://www.dealii.org/images/steps/developer/step-50-refinement.png" alt="">
+    </td>
+  </tr>
+</table>
+
+
+<h3>Workload imbalance</h3>
+For the active mesh, we use the parallel::distributed::Triangulation class as done
+in step-40 which uses functionality in the external library
+<a href="http://www.p4est.org/">p4est</a> for the distribution of the active cells
+among processors. For the non-active cells in the multilevel hierarchy, deal.II
+implements what we will refer to as the ``first-child rule'' where, for each cell
+in the hierarchy, we recursively assign the parent of a cell to the owner of the
+first child cell. The following figures give an example of such a distribution. Here
+the left image represents the active cells for a sample 2D mesh partitioned using a
+space-filling curve (similar to p4est), the center image gives the tree representation
+of the active mesh, and the right image gives the multilevel hierarchy of cells. The
+colors and numbers represent the different processors. The circular nodes in the tree
+are the non-active cells which are distributed using the ``first-child rule''.
+
+<img src="https://www.dealii.org/images/steps/developer/step-50-workload-example.png" alt="">
+
+Included among the output to screen in this example is a value ``Workload imbalance''
+given by the function MGTools::workload_imbalance(). This value, which will be denoted
+by $\mathbb{E}$,  quantifies the overhead produced by not having a perfect work balance
+on each level of the multigrid hierarchy (as is evident from the example above).
+
+For defining $\mathbb{E}$, let $N_{\ell}$ be the number of cells on level $\ell$
+(both active and non-active cells) and $N_{\ell,p}$ of the subset owned by processor
+$p$. Assuming that the workload for any one processor is proportional to the number
+of cells owned by that processor, the optimal workload per processor is given by
+@f{align*}
+W_{\text{opt}}=\frac1{n_{p}}\sum_{\ell}\sum_{p}N_{\ell,p}=\frac1{n_{p}}\sum_{\ell} N_{\ell}.
+@f}
+Next, assuming a synchronization of work on each level (i.e., on each level of a vcycle,
+work must be completed by all processors before moving on to the next level), the
+limiting effort on each level is given by
+@f{align*}
+W_\ell = \max_{p} N_{\ell,p},
+@f}
+and the total parallel complexity
+@f{align*}
+W = \sum_{\ell} W_\ell.
+@f}
+Then we define $\mathbb{E}$ as a ratio of the optimal partition to the parallel
+complexity of the current partition
+@f{align*}
+  \mathbb{E} = \frac{W_{\text{opt}}}{W}.
+@f}
+For the example distribution above, we have
+@f{align*}
+W_{\text{opt}}&=\frac{1}{n_p}\sum_{\ell} N_{\ell} = \frac{1}{3} \left(1+4+4\right)= 3 \qquad
+\\
+W &= \sum_\ell W_\ell = 1 + 2 + 3 = 6
+\\
+\mathbb{E} &= \frac{W_{\text{opt}}}{W} = \frac12.
+@f}
+
+_CITE MG PAPER_ contains a full discussion of the partition efficiency model
+and the effect the imbalance has on the GMG vcycle timing. In summary, the value
+of $\mathbb{E}$ is highly dependent on the type a mesh refinement used and has
+optimal value $\mathbb{E} = 1$ for globally refined meshes. Typically for adaptively
+refined meshes, the number of processors used to distribute a single mesh has a
+negative impact on $\mathbb{E}$ but only up to a leveling off point, where the imbalance
+remains relatively constant for an increasing number of processors, and further refinement
+has very little impact on $\mathbb{E}$. Finally, $1/\mathbb{E}$ was shown to give an
+accurate representation of the slowdown in parallel scaling expected for the timing of
+a vcycle.
index 2d5fe0deb0319f2aac680d70f2f427388784947c..edbe4f8c48512c2bd2215bd3c8d6531a7c86bf82 100644 (file)
 <h1>Results</h1>
 
-The output that this program generates is, of course, the same as that
-of step-6, so you may see there for more results. On the
-other hand, since no tutorial program is a good one unless it has at
-least one colorful picture, here is, again, the solution:
+When you run the program, the screen output should look like the following:
+@code
+Cycle 0:
+   Number of active cells:       56 (2 global levels)
+   Workload imbalance:           1.14286
+   Number of degrees of freedom: 665 (by level: 117, 665)
+   Number of CG iterations:      10
 
 
-When run, the output of this program is
-<pre>
-Cycle 0:
-   Number of active cells:       20
-   Number of degrees of freedom: 25 (by level: 8, 25)
-   7 CG iterations needed to obtain convergence.
++---------------------------------------------+------------+------------+
+| Total wallclock time elapsed since start    |    0.0536s |            |
+|                                             |            |            |
+| Section                         | no. calls |  wall time | % of total |
++---------------------------------+-----------+------------+------------+
+| Assemble                        |         1 |    0.0026s |       4.8% |
+| Assemble multigrid              |         1 |   0.00303s |       5.6% |
+| Estimate                        |         1 |    0.0273s |        51% |
+| Setup                           |         1 |   0.00477s |       8.9% |
+| Setup multigrid                 |         1 |   0.00539s |        10% |
+| Solve                           |         1 |   0.00801s |        15% |
+| Solve: 1 GMG vcycle             |         1 |  0.000655s |       1.2% |
+| Solve: CG                       |         1 |   0.00472s |       8.8% |
+| Solve: GMG preconditioner setup |         1 |   0.00232s |       4.3% |
++---------------------------------+-----------+------------+------------+
+
 Cycle 1:
-   Number of active cells:       44
-   Number of degrees of freedom: 57 (by level: 8, 25, 48)
-   8 CG iterations needed to obtain convergence.
+   Number of active cells:       126 (3 global levels)
+   Workload imbalance:           1.17483
+   Number of degrees of freedom: 1672 (by level: 117, 665, 1100)
+   Number of CG iterations:      11
+
+
++---------------------------------------------+------------+------------+
+| Total wallclock time elapsed since start    |    0.0861s |            |
+|                                             |            |            |
+| Section                         | no. calls |  wall time | % of total |
++---------------------------------+-----------+------------+------------+
+| Assemble                        |         1 |   0.00578s |       6.7% |
+| Assemble multigrid              |         1 |   0.00745s |       8.7% |
+| Estimate                        |         1 |    0.0281s |        33% |
+| Refine grid                     |         1 |   0.00992s |        12% |
+| Setup                           |         1 |   0.00878s |        10% |
+| Setup multigrid                 |         1 |    0.0115s |        13% |
+| Solve                           |         1 |    0.0144s |        17% |
+| Solve: 1 GMG vcycle             |         1 |  0.000868s |         1% |
+| Solve: CG                       |         1 |   0.00879s |        10% |
+| Solve: GMG preconditioner setup |         1 |   0.00414s |       4.8% |
++---------------------------------+-----------+------------+------------+
+
 Cycle 2:
-   Number of active cells:       92
-   Number of degrees of freedom: 117 (by level: 8, 25, 80, 60)
-   9 CG iterations needed to obtain convergence.
-Cycle 3:
-   Number of active cells:       188
-   Number of degrees of freedom: 221 (by level: 8, 25, 80, 200)
-   12 CG iterations needed to obtain convergence.
-Cycle 4:
-   Number of active cells:       416
-   Number of degrees of freedom: 485 (by level: 8, 25, 89, 288, 280)
-   13 CG iterations needed to obtain convergence.
-Cycle 5:
-   Number of active cells:       800
-   Number of degrees of freedom: 925 (by level: 8, 25, 89, 288, 784, 132)
-   14 CG iterations needed to obtain convergence.
-Cycle 6:
-   Number of active cells:       1628
-   Number of degrees of freedom: 1865 (by level: 8, 25, 89, 304, 1000, 1164, 72)
-   14 CG iterations needed to obtain convergence.
-Cycle 7:
-   Number of active cells:       3194
-   Number of degrees of freedom: 3603 (by level: 8, 25, 89, 328, 1032, 2200, 1392)
-   16 CG iterations needed to obtain convergence.
-</pre>
-That's not perfect &mdash; we would have hoped for a constant number
-of iterations rather than one that increases as we get more and more
-degrees of freedom &mdash; but it is also not far away. The reason for
-this is easy enough to understand, however: since we have a strongly
-varying coefficient, the operators that we assembly by quadrature on
-the lower levels become worse and worse approximations of the operator
-on the finest level. Consequently, even if we had perfect solvers on
-the coarser levels, they would not be good preconditioners on the
-finest level. This theory is easily tested by comparing results when
-we use a constant coefficient: in that case, the number of iterations
-remains constant at 9 after the first three or four refinement steps.
-
-We can also compare what this program produces with how @ref step_5
-"step-5" performed. To solve the same problem as in step-5, the only
-two changes that are necessary are (i) to replace the body of the
-function <code>LaplaceProblem::refine_grid</code> by a call to
-<code>triangulation.refine_global(1)</code>, and (ii) to use the same
-SolverControl object and tolerance as in step-5 &mdash; the rest of the
-program remains unchanged. In that case, here is how the solvers used
-in step-5 and the multigrid solver used in the current program
-compare:
-<table align="center">
-<tr><th>cells</th><th>step-5</th><th>step-16</th></tr>
-<tr><td>20</td>   <td>13</td> <td>6</td> </tr>
-<tr><td>80</td>   <td>17</td> <td>7</td> </tr>
-<tr><td>320</td>  <td>29</td> <td>9</td> </tr>
-<tr><td>1280</td> <td>51</td> <td>10</td> </tr>
-<tr><td>5120</td> <td>94</td> <td>11</td> </tr>
-<tr><td>20480</td><td>180</td><td>13</td></tr>
+.
+.
+.
+@endcode
+Here, the timing of the `solve()` function is spilt up in 3 parts: setting
+up the multigrid preconditioner, execution of a single multigrid vcycle, and
+the CG solver. The vcycle that is timed is unnecessary for the overall solve
+and only meant to give an insight at the different costs for AMG and GMG.
+Also it should be noted that when using the AMG solver, ``Workload imbalance''
+is not included in the output since the hierarchy of coarse meshes are not
+required.
+
+In addition to the AMG and GMG solvers in this tutorial, included will be timings
+from a 3rd matrix-free (MF) GMG solver on the same problem (see possible extensions
+for a discussion on what is required for the matrix-free solver). We will refer to
+the GMG solver in tutorial as the matrix-based (MB) GMG solver.
+
+The following table gives the timings for setup, assembly, and solve for GMG and AMG
+on up to 256M DoFs and 7168 processors.
+<table align="center" class="doxtable">
+<tr>
+  <th></th>
+  <th>Procs</th>
+  <th>Cycle</th>
+  <th>DoFs</th>
+  <th>Imbalance</th>
+  <th></th>
+  <th>Setup</th>
+  <th>Setup GMG</th>
+  <th>Assemble</th>
+  <th>Assemble GMG</th>
+  <th>Solve</th>
+</tr>
+<tr>
+  <th>MF-GMG</th>
+  <td>112</th>
+  <td>13</th>
+  <td>4M</th>
+  <td>0.37</th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+</tr>
+<tr>
+  <th></th>
+  <td>448</th>
+  <td>15</th>
+  <td>16M</th>
+  <td>0.29</th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+</tr>
+<tr>
+  <th></th>
+  <td>1792</th>
+  <td>17</th>
+  <td>65M</th>
+  <td>0.22</th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+</tr>
+<tr>
+  <th></th>
+  <td>7168</th>
+  <td>19</th>
+  <td>256M</th>
+  <td>0.16</th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+</tr>
+<tr>
+  <th>MB-GMG</th>
+  <td>112</th>
+  <td>13</th>
+  <td>4M</th>
+  <td>0.37</th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+</tr>
+<tr>
+  <th></th>
+  <td>448</th>
+  <td>15</th>
+  <td>16M</th>
+  <td>0.29</th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+</tr>
+<tr>
+  <th></th>
+  <td>1792</th>
+  <td>17</th>
+  <td>65M</th>
+  <td>0.22</th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+</tr>
+<tr>
+  <th></th>
+  <td>7168</th>
+  <td>19</th>
+  <td>256M</th>
+  <td>0.16</th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+</tr>
+<tr>
+  <th>AMG</th>
+  <td>112</th>
+  <td>13</th>
+  <td>4M</th>
+  <td>-</th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+</tr>
+<tr>
+  <th></th>
+  <td>448</th>
+  <td>15</th>
+  <td>16M</th>
+  <td>-</th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+</tr>
+<tr>
+  <th></th>
+  <td>1792</th>
+  <td>17</th>
+  <td>65M</th>
+  <td>-</th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+</tr>
+<tr>
+  <th></th>
+  <td>7168</th>
+  <td>19</th>
+  <td>256M</th>
+  <td>-</th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+  <td></th>
+</tr>
 </table>
-This isn't only fewer iterations than in step-5 (each of which
-is, however, much more expensive) but more importantly, the number of
-iterations also grows much more slowly under mesh refinement (again,
-it would be almost constant if the coefficient was constant rather
-than strongly varying as chosen here). This justifies the common
-observation that, whenever possible, multigrid methods should be used
-for second order problems.
+
+The following figure gives the strong scaling for each method for cycle 16 (32M DoFs)
+and 19 (256M DoFs) on between 56 to 28672 processors.
+<img src="https://www.dealii.org/images/steps/developer/step-50-strong-scaling.png" alt="">
 
 
 <h3> Possible extensions </h3>
 
-A close inspection of this program's performance shows that it is mostly
-dominated by matrix-vector operations. step-37 shows one way
-how this can be avoided by working with matrix-free methods.
-
-Another avenue would be to use algebraic multigrid methods. The
-geometric multigrid method used here can at times be a bit awkward to
-implement because it needs all those additional data structures, and
-it becomes even more difficult if the program is to run in %parallel on
-machines coupled through MPI, for example. In that case, it would be
-simpler if one could use a black-box preconditioner that uses some
-sort of multigrid hierarchy for good performance but can figure out
-level matrices and similar things out by itself. Algebraic multigrid
-methods do exactly this, and we will use them in
-step-31 for the solution of a Stokes problem.
+<h4>Add matrix-free GMG preconditioner</h4>
+The results above include timings from a matrix-free GMG preconditioner
+which is not currently a part of this tutorial. See step-37 for an example
+of such a preconditioner for the Laplace equation.
+
+It should be noted that the MatrixFree class is only compatible with the
+dealii::LinearAlgebra::distributed::Vector class, while this tutorial uses either
+PETSc or Trilinos vectors. It may be of use to define functions which copy between
+two types of vectors, for example, for Trilinos vectors one could use the following:
+@code
+namespace ChangeVectorTypes
+{
+  void import(TrilinosWrappers::MPI::Vector &                       out,
+              const dealii::LinearAlgebra::ReadWriteVector<double> &rwv,
+              const VectorOperation::values                         operation)
+  {
+    Assert(out.size() == rwv.size(),
+           ExcMessage(
+             "Both vectors need to have the same size for import() to work!"));
+
+    Assert(out.locally_owned_elements() == rwv.get_stored_elements(),
+           ExcNotImplemented());
+
+    if (operation == VectorOperation::insert)
+      {
+        for (const auto idx : out.locally_owned_elements())
+          out[idx] = rwv[idx];
+      }
+    else if (operation == VectorOperation::add)
+      {
+        for (const auto idx : out.locally_owned_elements())
+          out[idx] += rwv[idx];
+      }
+    else
+      AssertThrow(false, ExcNotImplemented());
+
+    out.compress(operation);
+  }
+
+
+  void copy(TrilinosWrappers::MPI::Vector &                           out,
+            const dealii::LinearAlgebra::distributed::Vector<double> &in)
+  {
+    dealii::LinearAlgebra::ReadWriteVector<double> rwv(
+      out.locally_owned_elements());
+    rwv.import(in, VectorOperation::insert);
+    // This import function doesn't exist until after dealii 9.0
+    // Implemented above
+    import(out, rwv, VectorOperation::insert);
+  }
+
+  void copy(dealii::LinearAlgebra::distributed::Vector<double> &out,
+            const TrilinosWrappers::MPI::Vector &               in)
+  {
+    dealii::LinearAlgebra::ReadWriteVector<double> rwv;
+    rwv.reinit(in);
+    out.import(rwv, VectorOperation::insert);
+  }
+}
+@endcode
+
index 2fd65590a0f71d7d58ebe166a64b99346b9f72f4..c7b7df11ca861c3e6b1f845510891d5586ce70fa 100644 (file)
@@ -1 +1 @@
-Multigrid on adaptive meshes.
+Multigrid on adaptive meshes distributed in parallel.
diff --git a/examples/step-50/gmg_2d.prm b/examples/step-50/gmg_2d.prm
deleted file mode 100644 (file)
index 60e5257..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-# Listing of Parameters
-# ---------------------
-set assembler       = GMG
-set dim             = 2
-# Number of adaptive refinement steps.
-set n_steps         = 20
-set output          = true
-
-# Select how to refine. Options: global|kelly|estimator
-set refinement type = estimator
-set smoother dampen = 1.0
-set smoother steps  = 1
diff --git a/examples/step-50/gmg_mb_2d.prm b/examples/step-50/gmg_mb_2d.prm
new file mode 100644 (file)
index 0000000..59b264d
--- /dev/null
@@ -0,0 +1,19 @@
+# Listing of Parameters
+# ---------------------
+# The problem dimension.
+set dim             = 2
+
+# Number of adaptive refinement steps.
+set n_steps         = 16
+
+# Output graphical results.
+set output          = true
+
+# Dampen factor for the smoother.
+set smoother dampen = 1.0
+
+# Number of smoother steps.
+set smoother steps  = 1
+
+# Switch between GMG and AMG.
+set solver          = MB
diff --git a/examples/step-50/gmg_mb_3d.prm b/examples/step-50/gmg_mb_3d.prm
new file mode 100644 (file)
index 0000000..a9d4f86
--- /dev/null
@@ -0,0 +1,19 @@
+# Listing of Parameters
+# ---------------------
+# The problem dimension.
+set dim             = 3
+
+# Number of adaptive refinement steps.
+set n_steps         = 12
+
+# Output graphical results.
+set output          = true
+
+# Dampen factor for the smoother.
+set smoother dampen = 1.0
+
+# Number of smoother steps.
+set smoother steps  = 1
+
+# Switch between GMG and AMG.
+set solver          = MB
diff --git a/examples/step-50/gmg_mf_2d.prm b/examples/step-50/gmg_mf_2d.prm
new file mode 100644 (file)
index 0000000..e4a3e20
--- /dev/null
@@ -0,0 +1,19 @@
+# Listing of Parameters
+# ---------------------
+# The problem dimension.
+set dim             = 2
+
+# Number of adaptive refinement steps.
+set n_steps         = 16
+
+# Output graphical results.
+set output          = true
+
+# Dampen factor for the smoother.
+set smoother dampen = 1.0
+
+# Number of smoother steps.
+set smoother steps  = 1
+
+# Switch between GMG and AMG.
+set solver          = MF
diff --git a/examples/step-50/gmg_mf_3d.prm b/examples/step-50/gmg_mf_3d.prm
new file mode 100644 (file)
index 0000000..d0d1a35
--- /dev/null
@@ -0,0 +1,19 @@
+# Listing of Parameters
+# ---------------------
+# The problem dimension.
+set dim             = 3
+
+# Number of adaptive refinement steps.
+set n_steps         = 12
+
+# Output graphical results.
+set output          = true
+
+# Dampen factor for the smoother.
+set smoother dampen = 1.0
+
+# Number of smoother steps.
+set smoother steps  = 1
+
+# Switch between GMG and AMG.
+set solver          = MF
index 355ccb7090b854bb75f382de763f9dbb207fd785..10bf160960fb158b7e19ccd1fbe9acf4aac09a46 100644 (file)
@@ -42,6 +42,9 @@
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
 #include <deal.II/lac/solver_cg.h>
 #include <deal.II/lac/generic_linear_algebra.h>
+#include <deal.II/matrix_free/matrix_free.h>
+#include <deal.II/matrix_free/operators.h>
+#include <deal.II/matrix_free/fe_evaluation.h>
 #include <deal.II/multigrid/mg_coarse.h>
 #include <deal.II/multigrid/mg_constrained_dofs.h>
 #include <deal.II/multigrid/mg_matrix.h>
@@ -49,6 +52,7 @@
 #include <deal.II/multigrid/mg_tools.h>
 #include <deal.II/multigrid/mg_transfer.h>
 #include <deal.II/multigrid/multigrid.h>
+#include <deal.II/multigrid/mg_transfer_matrix_free.h>
 #include <deal.II/numerics/data_out.h>
 #include <deal.II/numerics/vector_tools.h>
 #include <deal.II/fe/fe_interface_values.h>
@@ -80,6 +84,41 @@ namespace LA
 
 using namespace dealii;
 
+
+
+#ifdef USE_PETSC_LA
+// No ChangeVectorTypes::copy() for PETSc vector types.
+// Vector::import() needs to be implemented.
+#else
+/**
+ * Matrix-free operators must use deal.II defined vectors, rest of the code is
+ * based on Trilinos vectors.
+ */
+namespace ChangeVectorTypes
+{
+  template <typename number>
+  void copy(TrilinosWrappers::MPI::Vector &out,
+            const dealii::LinearAlgebra::distributed::Vector<number> &in)
+  {
+    dealii::LinearAlgebra::ReadWriteVector<double> rwv(
+      out.locally_owned_elements());
+    rwv.import(in, VectorOperation::insert);
+    out.import(rwv, VectorOperation::insert);
+  }
+
+  template <typename number>
+  void copy(dealii::LinearAlgebra::distributed::Vector<number> &out,
+            const TrilinosWrappers::MPI::Vector &in)
+  {
+    dealii::LinearAlgebra::ReadWriteVector<double> rwv;
+    rwv.reinit(in);
+    out.import(rwv, VectorOperation::insert);
+  }
+} // namespace ChangeVectorTypes
+#endif
+
+
+
 template <int dim>
 class RightHandSide : public Function<dim>
 {
@@ -99,6 +138,10 @@ class Coefficient : public Function<dim>
 public:
   virtual double value(const Point<dim> & p,
                        const unsigned int component = 0) const override;
+
+  template <typename number>
+  VectorizedArray<number> value(const Point<dim, VectorizedArray<number>> &p,
+                                const unsigned int component = 0) const;
 };
 
 
@@ -114,6 +157,32 @@ double Coefficient<dim>::value(const Point<dim> &p, const unsigned int) const
 }
 
 
+template <int dim>
+template <typename number>
+VectorizedArray<number>
+Coefficient<dim>::value(const Point<dim, VectorizedArray<number>> &p,
+                        const unsigned int) const
+{
+  VectorizedArray<number> return_value = VectorizedArray<number>();
+  for (unsigned int i = 0; i < VectorizedArray<number>::size(); ++i)
+    {
+      bool found = false;
+      for (int d = 0; d < dim; ++d)
+        if (p[d][i] < -0.5)
+          {
+            return_value[i] = 100.0;
+            found           = true;
+            break;
+          }
+
+      if (!found)
+        return_value[i] = 1.0;
+    }
+
+  return return_value;
+}
+
+
 
 void average(std::vector<double> &values)
 {
@@ -128,16 +197,196 @@ void average(std::vector<double> &values)
 
 
 
+/**
+ * Matrix-free Laplace operator
+ */
+template <int dim, int fe_degree, typename number>
+class LaplaceOperator
+  : public MatrixFreeOperators::Base<dim,
+                                     LinearAlgebra::distributed::Vector<number>>
+{
+public:
+  LaplaceOperator();
+
+  void clear() override;
+
+  void evaluate_coefficient(const Coefficient<dim> &coefficient_function);
+  Table<1, VectorizedArray<number>> get_coefficient_table();
+
+  virtual void compute_diagonal() override;
+
+private:
+  virtual void apply_add(
+    LinearAlgebra::distributed::Vector<number> &      dst,
+    const LinearAlgebra::distributed::Vector<number> &src) const override;
+
+  void
+  local_apply(const MatrixFree<dim, number> &                   data,
+              LinearAlgebra::distributed::Vector<number> &      dst,
+              const LinearAlgebra::distributed::Vector<number> &src,
+              const std::pair<unsigned int, unsigned int> &cell_range) const;
+
+  void local_compute_diagonal(
+    const MatrixFree<dim, number> &              data,
+    LinearAlgebra::distributed::Vector<number> & dst,
+    const unsigned int &                         dummy,
+    const std::pair<unsigned int, unsigned int> &cell_range) const;
+
+  Table<1, VectorizedArray<number>> coefficient;
+};
+
+
+template <int dim, int fe_degree, typename number>
+LaplaceOperator<dim, fe_degree, number>::LaplaceOperator()
+  : MatrixFreeOperators::Base<dim, LinearAlgebra::distributed::Vector<number>>()
+{}
+
+
+template <int dim, int fe_degree, typename number>
+void LaplaceOperator<dim, fe_degree, number>::clear()
+{
+  coefficient.reinit(TableIndices<1>(0));
+  MatrixFreeOperators::Base<dim, LinearAlgebra::distributed::Vector<number>>::
+    clear();
+}
+
+
+template <int dim, int fe_degree, typename number>
+void LaplaceOperator<dim, fe_degree, number>::evaluate_coefficient(
+  const Coefficient<dim> &coefficient_function)
+{
+  const unsigned int n_cells = this->data->n_macro_cells();
+  FEEvaluation<dim, fe_degree, fe_degree + 1, 1, number> phi(*this->data);
+
+  coefficient.reinit(TableIndices<1>(n_cells));
+  for (unsigned int cell = 0; cell < n_cells; ++cell)
+    {
+      phi.reinit(cell);
+
+      VectorizedArray<number> averaged_value(0);
+      for (unsigned int q = 0; q < phi.n_q_points; ++q)
+        averaged_value += coefficient_function.value(phi.quadrature_point(q));
+      averaged_value /= phi.n_q_points;
+
+      coefficient(cell) = averaged_value;
+    }
+}
+
+
+template <int dim, int fe_degree, typename number>
+Table<1, VectorizedArray<number>>
+LaplaceOperator<dim, fe_degree, number>::get_coefficient_table()
+{
+  return coefficient;
+}
+
+
+template <int dim, int fe_degree, typename number>
+void LaplaceOperator<dim, fe_degree, number>::local_apply(
+  const MatrixFree<dim, number> &                   data,
+  LinearAlgebra::distributed::Vector<number> &      dst,
+  const LinearAlgebra::distributed::Vector<number> &src,
+  const std::pair<unsigned int, unsigned int> &     cell_range) const
+{
+  FEEvaluation<dim, fe_degree, fe_degree + 1, 1, number> phi(data);
+
+  for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell)
+    {
+      AssertDimension(coefficient.size(0), data.n_macro_cells());
+
+      phi.reinit(cell);
+      phi.read_dof_values(src);
+      phi.evaluate(false, true);
+      for (unsigned int q = 0; q < phi.n_q_points; ++q)
+        phi.submit_gradient(coefficient(cell) * phi.get_gradient(q), q);
+      phi.integrate(false, true);
+      phi.distribute_local_to_global(dst);
+    }
+}
+
+
+template <int dim, int fe_degree, typename number>
+void LaplaceOperator<dim, fe_degree, number>::apply_add(
+  LinearAlgebra::distributed::Vector<number> &      dst,
+  const LinearAlgebra::distributed::Vector<number> &src) const
+{
+  this->data->cell_loop(&LaplaceOperator::local_apply, this, dst, src);
+}
+
+
+template <int dim, int fe_degree, typename number>
+void LaplaceOperator<dim, fe_degree, number>::compute_diagonal()
+{
+  this->inverse_diagonal_entries.reset(
+    new DiagonalMatrix<LinearAlgebra::distributed::Vector<number>>());
+  LinearAlgebra::distributed::Vector<number> &inverse_diagonal =
+    this->inverse_diagonal_entries->get_vector();
+  this->data->initialize_dof_vector(inverse_diagonal);
+  unsigned int dummy = 0;
+  this->data->cell_loop(&LaplaceOperator::local_compute_diagonal,
+                        this,
+                        inverse_diagonal,
+                        dummy);
+
+  this->set_constrained_entries_to_one(inverse_diagonal);
+
+  for (unsigned int i = 0; i < inverse_diagonal.local_size(); ++i)
+    {
+      Assert(inverse_diagonal.local_element(i) > 0.,
+             ExcMessage("No diagonal entry in a positive definite operator "
+                        "should be zero"));
+      inverse_diagonal.local_element(i) =
+        1. / inverse_diagonal.local_element(i);
+    }
+}
+
+
+template <int dim, int fe_degree, typename number>
+void LaplaceOperator<dim, fe_degree, number>::local_compute_diagonal(
+  const MatrixFree<dim, number> &             data,
+  LinearAlgebra::distributed::Vector<number> &dst,
+  const unsigned int &,
+  const std::pair<unsigned int, unsigned int> &cell_range) const
+{
+  FEEvaluation<dim, fe_degree, fe_degree + 1, 1, number> phi(data);
+
+  AlignedVector<VectorizedArray<number>> diagonal(phi.dofs_per_cell);
+
+  for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell)
+    {
+      AssertDimension(coefficient.size(0), data.n_macro_cells());
+
+      phi.reinit(cell);
+      for (unsigned int i = 0; i < phi.dofs_per_cell; ++i)
+        {
+          for (unsigned int j = 0; j < phi.dofs_per_cell; ++j)
+            phi.submit_dof_value(VectorizedArray<number>(), j);
+          phi.submit_dof_value(make_vectorized_array<number>(1.), i);
+
+          phi.evaluate(false, true);
+          for (unsigned int q = 0; q < phi.n_q_points; ++q)
+            phi.submit_gradient(coefficient(cell) * phi.get_gradient(q), q);
+          phi.integrate(false, true);
+          diagonal[i] = phi.get_dof_value(i);
+        }
+      for (unsigned int i = 0; i < phi.dofs_per_cell; ++i)
+        phi.submit_dof_value(diagonal[i], i);
+      phi.distribute_local_to_global(dst);
+    }
+}
+
+
+
 struct Settings
 {
   bool try_parse(const std::string &prm_filename);
 
-  enum AssembleEnum
+  enum SolverType
   {
-    gmg,
+    gmg_mb,
+    gmg_mf,
     amg
-  } assembler;
-  std::string assembler_text;
+  } solver;
 
   int          dimension;
   double       smoother_dampen;
@@ -149,10 +398,15 @@ struct Settings
 template <int dim>
 class LaplaceProblem
 {
-  typedef LA::MPI::SparseMatrix       MatrixType;
-  typedef LA::MPI::Vector             VectorType;
-  typedef LA::MPI::PreconditionAMG    PreconditionAMG;
-  typedef LA::MPI::PreconditionJacobi PreconditionJacobi;
+  using MatrixType         = LA::MPI::SparseMatrix;
+  using VectorType         = LA::MPI::Vector;
+  using PreconditionAMG    = LA::MPI::PreconditionAMG;
+  using PreconditionJacobi = LA::MPI::PreconditionJacobi;
+
+  using MatrixFreeLevelMatrix  = LaplaceOperator<dim, 2, float>;
+  using MatrixFreeActiveMatrix = LaplaceOperator<dim, 2, double>;
+  using MatrixFreeLevelVector  = LinearAlgebra::distributed::Vector<float>;
+  using MatrixFreeActiveVector = LinearAlgebra::distributed::Vector<double>;
 
 public:
   LaplaceProblem(const Settings &settings);
@@ -163,6 +417,7 @@ private:
   void setup_multigrid();
   void assemble_system();
   void assemble_multigrid();
+  void assemble_rhs_for_matrix_free();
   void solve();
   void estimate();
   void refine_grid();
@@ -183,15 +438,18 @@ private:
   IndexSet                  locally_relevant_set;
   AffineConstraints<double> constraints;
 
-  MatrixType     system_matrix;
-  VectorType     solution;
-  VectorType     right_hand_side;
-  Vector<double> estimate_vector;
+  MatrixType             system_matrix;
+  MatrixFreeActiveMatrix mf_system_matrix;
+  VectorType             solution;
+  VectorType             right_hand_side;
+  Vector<double>         estimate_vector;
 
   MGLevelObject<MatrixType> mg_matrix;
   MGLevelObject<MatrixType> mg_interface_in;
   MGConstrainedDoFs         mg_constrained_dofs;
 
+  MGLevelObject<MatrixFreeLevelMatrix> mf_mg_matrix;
+
   TimerOutput computing_timer;
 };
 
@@ -203,16 +461,16 @@ LaplaceProblem<dim>::LaplaceProblem(const Settings &settings)
   , pcout(std::cout, (Utilities::MPI::this_mpi_process(mpi_communicator) == 0))
   , triangulation(mpi_communicator,
                   Triangulation<dim>::limit_level_difference_at_vertices,
-                  (settings.assembler == Settings::amg) ?
+                  (settings.solver == Settings::amg) ?
                     parallel::distributed::Triangulation<dim>::default_setting :
                     parallel::distributed::Triangulation<
                       dim>::construct_multigrid_hierarchy)
   , mapping()
   , fe(2)
   , dof_handler(triangulation)
-  , computing_timer(pcout, TimerOutput::summary, TimerOutput::wall_times)
+  , computing_timer(pcout, TimerOutput::never, TimerOutput::wall_times)
 {
-  GridGenerator::hyper_L(triangulation, -1, 1, /*colorize*/ false);
+  GridGenerator::hyper_L(triangulation, -1., 1., /*colorize*/ false);
   triangulation.refine_global(1);
 }
 
@@ -222,7 +480,7 @@ bool Settings::try_parse(const std::string &prm_filename)
   ParameterHandler prm;
   prm.declare_entry("dim", "2", Patterns::Integer(), "The problem dimension.");
   prm.declare_entry("n_steps",
-                    "20",
+                    "10",
                     Patterns::Integer(0),
                     "Number of adaptive refinement steps.");
   prm.declare_entry("smoother dampen",
@@ -230,36 +488,46 @@ bool Settings::try_parse(const std::string &prm_filename)
                     Patterns::Double(0.0),
                     "Dampen factor for the smoother.");
   prm.declare_entry("smoother steps",
-                    "2",
+                    "1",
                     Patterns::Integer(1),
                     "Number of smoother steps.");
-  prm.declare_entry("assembler",
-                    "GMG",
-                    Patterns::Selection("GMG|AMG"),
-                    "Switch between GMG and AMG.");
+  prm.declare_entry(
+    "solver",
+    "MF",
+    Patterns::Selection("MF|MB|AMG"),
+    "Switch between matrix-free GMG,  matrix-based GMG, and AMG.");
   prm.declare_entry("output",
                     "false",
                     Patterns::Bool(),
                     "Output graphical results.");
 
+  if (prm_filename.size() == 0)
+    {
+      // No .prm file provided? Print the default values and exit.
+      if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
+        prm.print_parameters(std::cout, ParameterHandler::Text);
+      return false;
+    }
+
   try
     {
       prm.parse_input(prm_filename);
     }
-  catch (...)
+  catch (std::exception &e)
     {
       if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
-        prm.print_parameters(std::cout, ParameterHandler::Text);
+        std::cerr << e.what() << std::endl;
       return false;
     }
 
-  if (prm.get("assembler") == "GMG")
-    this->assembler = gmg;
-  else if (prm.get("assembler") == "AMG")
-    this->assembler = amg;
+  if (prm.get("solver") == "MF")
+    this->solver = gmg_mf;
+  else if (prm.get("solver") == "MB")
+    this->solver = gmg_mb;
+  else if (prm.get("solver") == "AMG")
+    this->solver = amg;
   else
     AssertThrow(false, ExcNotImplemented());
-  this->assembler_text = prm.get("assembler");
 
   this->dimension       = prm.get_integer("dim");
   this->n_steps         = prm.get_integer("n_steps");
@@ -291,28 +559,48 @@ void LaplaceProblem<dim>::setup_system()
     mapping, dof_handler, 0, Functions::ZeroFunction<dim>(), constraints);
   constraints.close();
 
-#ifdef USE_PETSC_LA
-  DynamicSparsityPattern dsp(locally_relevant_set);
-  DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints);
-
-  SparsityTools::distribute_sparsity_pattern(dsp,
-                                             locally_owned_set,
-                                             mpi_communicator,
-                                             locally_relevant_set);
 
-  system_matrix.reinit(locally_owned_set,
-                       locally_owned_set,
-                       dsp,
-                       mpi_communicator);
+  if (settings.solver = Settings::gmg_mf)
+    {
+      typename MatrixFree<dim, double>::AdditionalData additional_data;
+      additional_data.tasks_parallel_scheme =
+        MatrixFree<dim, double>::AdditionalData::none;
+      additional_data.mapping_update_flags =
+        (update_gradients | update_JxW_values | update_quadrature_points);
+      std::shared_ptr<MatrixFree<dim, double>> mf_storage(
+        new MatrixFree<dim, double>());
+      mf_storage->reinit(dof_handler,
+                         constraints,
+                         QGauss<1>(fe.degree + 1),
+                         additional_data);
+      mf_system_matrix.initialize(mf_storage);
+      mf_system_matrix.evaluate_coefficient(Coefficient<dim>());
+    }
+  else
+    {
+#ifdef USE_PETSC_LA
+      DynamicSparsityPattern dsp(locally_relevant_set);
+      DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints);
+
+      SparsityTools::distribute_sparsity_pattern(dsp,
+                                                 locally_owned_set,
+                                                 mpi_communicator,
+                                                 locally_relevant_set);
+
+      system_matrix.reinit(locally_owned_set,
+                           locally_owned_set,
+                           dsp,
+                           mpi_communicator);
 #else
-  TrilinosWrappers::SparsityPattern dsp(locally_owned_set,
-                                        locally_owned_set,
-                                        locally_relevant_set,
-                                        MPI_COMM_WORLD);
-  DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints);
-  dsp.compress();
-  system_matrix.reinit(dsp);
+      TrilinosWrappers::SparsityPattern dsp(locally_owned_set,
+                                            locally_owned_set,
+                                            locally_relevant_set,
+                                            MPI_COMM_WORLD);
+      DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints);
+      dsp.compress();
+      system_matrix.reinit(dsp);
 #endif
+    }
 }
 
 
@@ -330,77 +618,120 @@ void LaplaceProblem<dim>::setup_multigrid()
   mg_constrained_dofs.make_zero_boundary_constraints(dof_handler, bset);
 
   const unsigned int n_levels = triangulation.n_global_levels();
-  mg_matrix.resize(0, n_levels - 1);
-  mg_matrix.clear_elements();
-  mg_interface_in.resize(0, n_levels - 1);
-  mg_interface_in.clear_elements();
+  if (settings.solver = Settings::gmg_mf)
+    {
+      mf_mg_matrix.resize(0, n_levels - 1);
 
-  for (unsigned int level = 0; level < n_levels; ++level)
+      for (unsigned int level = 0; level < n_levels; ++level)
+        {
+          IndexSet relevant_dofs;
+          DoFTools::extract_locally_relevant_level_dofs(dof_handler,
+                                                        level,
+                                                        relevant_dofs);
+          AffineConstraints<double> level_constraints;
+          level_constraints.reinit(relevant_dofs);
+          level_constraints.add_lines(
+            mg_constrained_dofs.get_boundary_indices(level));
+          level_constraints.close();
+
+          typename MatrixFree<dim, float>::AdditionalData additional_data;
+          additional_data.tasks_parallel_scheme =
+            MatrixFree<dim, float>::AdditionalData::none;
+          additional_data.mapping_update_flags =
+            (update_gradients | update_JxW_values | update_quadrature_points);
+          additional_data.mg_level = level;
+          std::shared_ptr<MatrixFree<dim, float>> mf_storage_level(
+            new MatrixFree<dim, float>());
+          mf_storage_level->reinit(dof_handler,
+                                   level_constraints,
+                                   QGauss<1>(fe.degree + 1),
+                                   additional_data);
+
+          mf_mg_matrix[level].initialize(mf_storage_level,
+                                         mg_constrained_dofs,
+                                         level);
+
+          mf_mg_matrix[level].evaluate_coefficient(Coefficient<dim>());
+          mf_mg_matrix[level].compute_diagonal();
+        }
+    }
+  else
     {
-      IndexSet dofset;
-      DoFTools::extract_locally_relevant_level_dofs(dof_handler, level, dofset);
+      mg_matrix.resize(0, n_levels - 1);
+      mg_matrix.clear_elements();
+      mg_interface_in.resize(0, n_levels - 1);
+      mg_interface_in.clear_elements();
 
-      {
+      for (unsigned int level = 0; level < n_levels; ++level)
+        {
+          IndexSet dofset;
+          DoFTools::extract_locally_relevant_level_dofs(dof_handler,
+                                                        level,
+                                                        dofset);
+
+          {
 #ifdef USE_PETSC_LA
-        DynamicSparsityPattern dsp(dofset);
-        MGTools::make_sparsity_pattern(dof_handler, dsp, level);
-        dsp.compress();
-        SparsityTools::distribute_sparsity_pattern(
-          dsp,
-          dof_handler.locally_owned_mg_dofs(level),
-          mpi_communicator,
-          dofset);
-
-        mg_matrix[level].reinit(dof_handler.locally_owned_mg_dofs(level),
-                                dof_handler.locally_owned_mg_dofs(level),
-                                dsp,
-                                mpi_communicator);
+            DynamicSparsityPattern dsp(dofset);
+            MGTools::make_sparsity_pattern(dof_handler, dsp, level);
+            dsp.compress();
+            SparsityTools::distribute_sparsity_pattern(
+              dsp,
+              dof_handler.locally_owned_mg_dofs(level),
+              mpi_communicator,
+              dofset);
+
+            mg_matrix[level].reinit(dof_handler.locally_owned_mg_dofs(level),
+                                    dof_handler.locally_owned_mg_dofs(level),
+                                    dsp,
+                                    mpi_communicator);
 #else
-        TrilinosWrappers::SparsityPattern dsp(
-          dof_handler.locally_owned_mg_dofs(level),
-          dof_handler.locally_owned_mg_dofs(level),
-          dofset,
-          mpi_communicator);
-        MGTools::make_sparsity_pattern(dof_handler, dsp, level);
-
-        dsp.compress();
-        mg_matrix[level].reinit(dsp);
+            TrilinosWrappers::SparsityPattern dsp(
+              dof_handler.locally_owned_mg_dofs(level),
+              dof_handler.locally_owned_mg_dofs(level),
+              dofset,
+              mpi_communicator);
+            MGTools::make_sparsity_pattern(dof_handler, dsp, level);
+
+            dsp.compress();
+            mg_matrix[level].reinit(dsp);
 #endif
-      }
+          }
 
-      {
+          {
 #ifdef USE_PETSC_LA
-        DynamicSparsityPattern dsp(dofset);
-        MGTools::make_interface_sparsity_pattern(dof_handler,
-                                                 mg_constrained_dofs,
-                                                 dsp,
-                                                 level);
-        dsp.compress();
-        SparsityTools::distribute_sparsity_pattern(
-          dsp,
-          dof_handler.locally_owned_mg_dofs(level),
-          mpi_communicator,
-          dofset);
-
-        mg_interface_in[level].reinit(dof_handler.locally_owned_mg_dofs(level),
-                                      dof_handler.locally_owned_mg_dofs(level),
-                                      dsp,
-                                      mpi_communicator);
+            DynamicSparsityPattern dsp(dofset);
+            MGTools::make_interface_sparsity_pattern(dof_handler,
+                                                     mg_constrained_dofs,
+                                                     dsp,
+                                                     level);
+            dsp.compress();
+            SparsityTools::distribute_sparsity_pattern(
+              dsp,
+              dof_handler.locally_owned_mg_dofs(level),
+              mpi_communicator,
+              dofset);
+
+            mg_interface_in[level].reinit(
+              dof_handler.locally_owned_mg_dofs(level),
+              dof_handler.locally_owned_mg_dofs(level),
+              dsp,
+              mpi_communicator);
 #else
-        TrilinosWrappers::SparsityPattern dsp(
-          dof_handler.locally_owned_mg_dofs(level),
-          dof_handler.locally_owned_mg_dofs(level),
-          dofset,
-          mpi_communicator);
-
-        MGTools::make_interface_sparsity_pattern(dof_handler,
-                                                 mg_constrained_dofs,
-                                                 dsp,
-                                                 level);
-        dsp.compress();
-        mg_interface_in[level].reinit(dsp);
+            TrilinosWrappers::SparsityPattern dsp(
+              dof_handler.locally_owned_mg_dofs(level),
+              dof_handler.locally_owned_mg_dofs(level),
+              dofset,
+              mpi_communicator);
+
+            MGTools::make_interface_sparsity_pattern(dof_handler,
+                                                     mg_constrained_dofs,
+                                                     dsp,
+                                                     level);
+            dsp.compress();
+            mg_interface_in[level].reinit(dsp);
 #endif
-      }
+          }
+        }
     }
 }
 
@@ -550,6 +881,71 @@ void LaplaceProblem<dim>::assemble_multigrid()
 }
 
 
+template <int dim>
+void LaplaceProblem<dim>::assemble_rhs_for_matrix_free()
+{
+  TimerOutput::Scope timing(computing_timer, "Assemble right hand side");
+
+  MatrixFreeActiveVector solution_copy;
+  MatrixFreeActiveVector right_hand_side_copy;
+  mf_system_matrix.initialize_dof_vector(solution_copy);
+  mf_system_matrix.initialize_dof_vector(right_hand_side_copy);
+
+  solution_copy = 0.;
+  constraints.distribute(solution_copy);
+  solution_copy.update_ghost_values();
+  right_hand_side_copy = 0;
+  const Table<1, VectorizedArray<double>> coefficient_table =
+    mf_system_matrix.get_coefficient_table();
+
+  RightHandSide<dim> right_hand_side_function;
+
+  FEEvaluation<dim, 2, 3, 1, double> phi(*mf_system_matrix.get_matrix_free());
+
+  for (unsigned int cell = 0;
+       cell < mf_system_matrix.get_matrix_free()->n_macro_cells();
+       ++cell)
+    {
+      phi.reinit(cell);
+      phi.read_dof_values_plain(solution_copy);
+      phi.evaluate(false, true, false);
+
+      for (unsigned int q = 0; q < phi.n_q_points; ++q)
+        {
+          // Submit gradient
+          phi.submit_gradient(-1.0 *
+                                (coefficient_table(cell) * phi.get_gradient(q)),
+                              q);
+
+          // Submit RHS value
+          VectorizedArray<double> rhs_value =
+            make_vectorized_array<double>(1.0);
+          for (unsigned int i = 0; i < VectorizedArray<double>::size(); ++i)
+            {
+              Point<dim> p;
+              for (unsigned int d = 0; d < dim; ++d)
+                p(d) = phi.quadrature_point(q)(d)[i];
+
+              rhs_value[i] = right_hand_side_function.value(p);
+            }
+          phi.submit_value(rhs_value, q);
+        }
+
+      phi.integrate(true, true);
+      phi.distribute_local_to_global(right_hand_side_copy);
+    }
+
+  right_hand_side_copy.compress(VectorOperation::add);
+#ifdef USE_PETSC_LA
+  AssertThrow(false,
+              ExcMessage("CopyVectorTypes::copy() not implemented for "
+                         "PETSc vector types."));
+#else
+  ChangeVectorTypes::copy(right_hand_side, right_hand_side_copy);
+#endif
+}
+
+
 template <int dim>
 void LaplaceProblem<dim>::solve()
 {
@@ -557,63 +953,114 @@ void LaplaceProblem<dim>::solve()
 
   SolverControl solver_control(1000, 1.e-10 * right_hand_side.l2_norm());
   solver_control.enable_history_data();
-  SolverCG<VectorType> solver(solver_control);
 
   solution = 0.;
 
-  if (settings.assembler == Settings::amg)
+  if (settings.solver == Settings::gmg_mf)
     {
-      computing_timer.enter_subsection("Solve: AMG preconditioner setup");
+      computing_timer.enter_subsection("Solve: Preconditioner setup");
 
-      PreconditionAMG                 prec;
-      PreconditionAMG::AdditionalData Amg_data;
+      MGTransferMatrixFree<dim, float> mg_transfer(mg_constrained_dofs);
+      mg_transfer.build(dof_handler);
+
+      SolverControl coarse_solver_control(1000, 1e-12, false, false);
+      SolverCG<MatrixFreeLevelVector> coarse_solver(coarse_solver_control);
+      PreconditionIdentity            identity;
+      MGCoarseGridIterativeSolver<MatrixFreeLevelVector,
+                                  SolverCG<MatrixFreeLevelVector>,
+                                  MatrixFreeLevelMatrix,
+                                  PreconditionIdentity>
+        coarse_grid_solver(coarse_solver, mf_mg_matrix[0], identity);
+
+      using Smoother = dealii::PreconditionJacobi<MatrixFreeLevelMatrix>;
+      MGSmootherPrecondition<MatrixFreeLevelMatrix,
+                             Smoother,
+                             MatrixFreeLevelVector>
+        smoother;
+      smoother.initialize(mf_mg_matrix,
+                          typename Smoother::AdditionalData(
+                            settings.smoother_dampen));
+      smoother.set_steps(settings.smoother_steps);
+
+      mg::Matrix<MatrixFreeLevelVector> mg_m(mf_mg_matrix);
+
+      MGLevelObject<
+        MatrixFreeOperators::MGInterfaceOperator<MatrixFreeLevelMatrix>>
+        mg_interface_matrices;
+      mg_interface_matrices.resize(0, triangulation.n_global_levels() - 1);
+      for (unsigned int level = 0; level < triangulation.n_global_levels();
+           ++level)
+        mg_interface_matrices[level].initialize(mf_mg_matrix[level]);
+      mg::Matrix<MatrixFreeLevelVector> mg_interface(mg_interface_matrices);
+
+      Multigrid<MatrixFreeLevelVector> mg(
+        mg_m, coarse_grid_solver, mg_transfer, smoother, smoother);
+      mg.set_edge_matrices(mg_interface, mg_interface);
+
+      PreconditionMG<dim,
+                     MatrixFreeLevelVector,
+                     MGTransferMatrixFree<dim, float>>
+        preconditioner(dof_handler, mg, mg_transfer);
+
+      MatrixFreeActiveVector solution_copy;
+      MatrixFreeActiveVector right_hand_side_copy;
+      mf_system_matrix.initialize_dof_vector(solution_copy);
+      mf_system_matrix.initialize_dof_vector(right_hand_side_copy);
 
 #ifdef USE_PETSC_LA
-      Amg_data.symmetric_operator = true;
+      AssertThrow(false,
+                  ExcMessage("CopyVectorTypes::copy() not implemented for "
+                             "PETSc vector types."));
 #else
-      Amg_data.elliptic              = true;
-      Amg_data.smoother_type         = "Jacobi";
-      Amg_data.higher_order_elements = true;
-      Amg_data.smoother_sweeps       = settings.smoother_steps;
-      Amg_data.aggregation_threshold = 0.02;
+      ChangeVectorTypes::copy(solution_copy, solution);
+      ChangeVectorTypes::copy(right_hand_side_copy, right_hand_side);
 #endif
+      computing_timer.leave_subsection("Solve: Preconditioner setup");
 
-      Amg_data.output_details = false;
-
-      prec.initialize(system_matrix, Amg_data);
-      computing_timer.leave_subsection("Solve: AMG preconditioner setup");
-
+      // Timing 1 vcycle
       {
-        TimerOutput::Scope timing(computing_timer, "Solve: 1 AMG vcycle");
-        prec.vmult(solution, right_hand_side);
+        TimerOutput::Scope timing(computing_timer, "Solve: 1 multigrid vcycle");
+        preconditioner.vmult(solution_copy, right_hand_side_copy);
       }
-      solution = 0.;
+      solution_copy = 0.;
 
       {
+        SolverCG<MatrixFreeActiveVector> solver(solver_control);
+
         TimerOutput::Scope timing(computing_timer, "Solve: CG");
-        solver.solve(system_matrix, solution, right_hand_side, prec);
+        solver.solve(mf_system_matrix,
+                     solution_copy,
+                     right_hand_side_copy,
+                     preconditioner);
       }
+
+      solution_copy.update_ghost_values();
+#ifdef USE_PETSC_LA
+      AssertThrow(false,
+                  ExcMessage("CopyVectorTypes::copy() not implemented for "
+                             "PETSc vector types."));
+#else
+      ChangeVectorTypes::copy(solution, solution_copy);
+#endif
       constraints.distribute(solution);
     }
-  else
+  else if (settings.solver == Settings::gmg_mb)
     {
-      computing_timer.enter_subsection("Solve: GMG preconditioner setup");
+      computing_timer.enter_subsection("Solve: Preconditioner setup");
 
       MGTransferPrebuilt<VectorType> mg_transfer(mg_constrained_dofs);
       mg_transfer.build(dof_handler);
 
-      MatrixType &         coarse_matrix = mg_matrix[0];
       SolverControl        coarse_solver_control(1000, 1e-12, false, false);
       SolverCG<VectorType> coarse_solver(coarse_solver_control);
       PreconditionIdentity identity;
-
       MGCoarseGridIterativeSolver<VectorType,
                                   SolverCG<VectorType>,
                                   MatrixType,
                                   PreconditionIdentity>
-        coarse_grid_solver(coarse_solver, coarse_matrix, identity);
+        coarse_grid_solver(coarse_solver, mg_matrix[0], identity);
 
-      typedef LA::MPI::PreconditionJacobi                      Smoother;
+      using Smoother = LA::MPI::PreconditionJacobi;
       MGSmootherPrecondition<MatrixType, Smoother, VectorType> smoother;
 
 #ifdef USE_PETSC_LA
@@ -640,31 +1087,62 @@ void LaplaceProblem<dim>::solve()
       PreconditionMG<dim, VectorType, MGTransferPrebuilt<VectorType>>
         preconditioner(dof_handler, mg, mg_transfer);
 
-      computing_timer.leave_subsection("Solve: GMG preconditioner setup");
+      computing_timer.leave_subsection("Solve: Preconditioner setup");
 
       {
-        TimerOutput::Scope timing(computing_timer, "Solve: 1 GMG vcycle");
+        TimerOutput::Scope timing(computing_timer, "Solve: 1 multigrid vcycle");
         preconditioner.vmult(solution, right_hand_side);
       }
       solution = 0.;
 
       {
+        SolverCG<VectorType> solver(solver_control);
+
         TimerOutput::Scope timing(computing_timer, "Solve: CG");
         solver.solve(system_matrix, solution, right_hand_side, preconditioner);
       }
 
       constraints.distribute(solution);
     }
+  else
+    {
+      computing_timer.enter_subsection("Solve: Preconditioner setup");
 
-  double rate = solver_control.final_reduction();
-  {
-    double r0 = right_hand_side.l2_norm();
-    double rn = solver_control.last_value();
-    rate      = 1.0 / solver_control.last_step() * log(r0 / rn) / log(10);
-  }
+      PreconditionAMG                 preconditioner;
+      PreconditionAMG::AdditionalData Amg_data;
+
+#ifdef USE_PETSC_LA
+      Amg_data.symmetric_operator = true;
+#else
+      Amg_data.elliptic = true;
+      Amg_data.smoother_type = "Jacobi";
+      Amg_data.higher_order_elements = true;
+      Amg_data.smoother_sweeps = settings.smoother_steps;
+      Amg_data.aggregation_threshold = 0.02;
+#endif
+
+      Amg_data.output_details = false;
 
-  pcout << "   CG iterations: " << solver_control.last_step()
-        << ", iters: " << 10.0 / rate << ", rate: " << rate << std::endl;
+      preconditioner.initialize(system_matrix, Amg_data);
+      computing_timer.leave_subsection("Solve: Preconditioner setup");
+
+      {
+        TimerOutput::Scope timing(computing_timer, "Solve: 1 multigrid vcycle");
+        preconditioner.vmult(solution, right_hand_side);
+      }
+      solution = 0.;
+
+      {
+        SolverCG<VectorType> solver(solver_control);
+
+        TimerOutput::Scope timing(computing_timer, "Solve: CG");
+        solver.solve(system_matrix, solution, right_hand_side, preconditioner);
+      }
+      constraints.distribute(solution);
+    }
+
+  pcout << "   Number of CG iterations:      " << solver_control.last_step()
+        << std::endl;
 }
 
 
@@ -743,7 +1221,7 @@ void LaplaceProblem<dim>::estimate()
   auto cell_worker = [&](const Iterator &  cell,
                          ScratchData<dim> &scratch_data,
                          CopyData &        copy_data) {
-    // assemble cell residual $h^2 \| f + \epsilon \triangle u \|_K^2$
+    /*assemble cell residual $h^2 \| f + \epsilon \triangle u \|_K^2$*/
 
     FEValues<dim> &fe_values = scratch_data.fe_values;
     fe_values.reinit(cell);
@@ -776,7 +1254,7 @@ void LaplaceProblem<dim>::estimate()
                          const unsigned int &nsf,
                          ScratchData<dim> &  scratch_data,
                          CopyData &          copy_data) {
-    // face term $\sum_F h_F \| [ \epsilon \nabla u \cdot n ] \|_F^2$
+    /* face term $\sum_F h_F \| [ \epsilon \nabla u \cdot n ] \|_F^2$*/
 
     FEInterfaceValues<dim> &fe_interface_values =
       scratch_data.fe_interface_values;
@@ -790,7 +1268,7 @@ void LaplaceProblem<dim>::estimate()
 
     const double nu1 = coefficient.value(cell->center());
     const double nu2 = coefficient.value(ncell->center());
-    const double h   = cell->face(f)->measure(); // TODO: FEIV.measure
+    const double h   = cell->face(f)->measure();
 
     std::vector<Tensor<1, dim>> grad_u[2];
 
@@ -871,14 +1349,13 @@ void LaplaceProblem<dim>::output_results(const unsigned int cycle)
 {
   TimerOutput::Scope timing(computing_timer, "Output results");
 
-  DataOut<dim> data_out;
-
   VectorType temp_solution;
   temp_solution.reinit(locally_owned_set,
                        locally_relevant_set,
                        mpi_communicator);
   temp_solution = solution;
 
+  DataOut<dim> data_out;
   data_out.attach_dof_handler(dof_handler);
   data_out.add_data_vector(temp_solution, "solution");
   Vector<float> subdomain(triangulation.n_active_cells());
@@ -896,32 +1373,10 @@ void LaplaceProblem<dim>::output_results(const unsigned int cycle)
 
   data_out.build_patches(0);
 
-  const std::string filename =
-    ("solution-" + Utilities::int_to_string(cycle, 5) + "." +
-     Utilities::int_to_string(triangulation.locally_owned_subdomain(), 4) +
-     ".vtu");
-  std::ofstream output(filename.c_str());
-  data_out.write_vtu(output);
+  const std::string master = data_out.write_vtu_with_pvtu_record(
+    "", "solution", cycle, mpi_communicator, 2 /*n_digits*/, 1 /*n_groups*/);
 
-  if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
-    {
-      std::vector<std::string> filenames;
-      for (unsigned int i = 0;
-           i < Utilities::MPI::n_mpi_processes(mpi_communicator);
-           ++i)
-        filenames.push_back(std::string("solution-") +
-                            Utilities::int_to_string(cycle, 5) + "." +
-                            Utilities::int_to_string(i, 4) + ".vtu");
-      const std::string pvtu_master_filename =
-        ("solution-" + Utilities::int_to_string(cycle, 5) + ".pvtu");
-      std::ofstream pvtu_master(pvtu_master_filename.c_str());
-      data_out.write_pvtu_record(pvtu_master, filenames);
-
-      const std::string visit_master_filename =
-        ("solution-" + Utilities::int_to_string(cycle, 5) + ".visit");
-      std::ofstream visit_master(visit_master_filename.c_str());
-      DataOutBase::write_visit_record(visit_master, filenames);
-    }
+  pcout << "   Wrote " << master << std::endl;
 }
 
 
@@ -936,7 +1391,7 @@ void LaplaceProblem<dim>::run()
 
       pcout << "   Number of active cells:       "
             << triangulation.n_global_active_cells();
-      if (settings.assembler == Settings::gmg)
+      if (settings.solver != Settings::amg)
         pcout << " (" << triangulation.n_global_levels() << " global levels)"
               << std::endl
               << "   Workload imbalance:           "
@@ -944,11 +1399,11 @@ void LaplaceProblem<dim>::run()
       pcout << std::endl;
 
       setup_system();
-      if (settings.assembler == Settings::gmg)
+      if (settings.solver != Settings::amg)
         setup_multigrid();
 
       pcout << "   Number of degrees of freedom: " << dof_handler.n_dofs();
-      if (settings.assembler != Settings::amg)
+      if (settings.solver != Settings::amg)
         {
           pcout << " (by level: ";
           for (unsigned int level = 0; level < triangulation.n_global_levels();
@@ -959,9 +1414,14 @@ void LaplaceProblem<dim>::run()
         }
       pcout << std::endl;
 
-      assemble_system();
-      if (settings.assembler == Settings::gmg)
-        assemble_multigrid();
+      if (settings.solver == Settings::gmg_mf)
+        assemble_rhs_for_matrix_free();
+      else
+        {
+          assemble_system();
+          if (settings.solver == Settings::gmg_mb)
+            assemble_multigrid();
+        }
 
       solve();
       estimate();
@@ -977,8 +1437,8 @@ void LaplaceProblem<dim>::run()
 
 int main(int argc, char *argv[])
 {
-  dealii::Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
   using namespace dealii;
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
 
   Settings settings;
   if (!settings.try_parse((argc > 1) ? (argv[1]) : ""))

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.