]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Add results
authortcclevenger <tcleven@clemson.edu>
Thu, 23 Apr 2020 21:53:42 +0000 (17:53 -0400)
committerTimo Heister <timo.heister@gmail.com>
Sat, 9 May 2020 17:41:55 +0000 (13:41 -0400)
doc/doxygen/tutorial/tutorial.h.in
examples/step-50/doc/builds-on
examples/step-50/doc/intro.dox
examples/step-50/doc/results.dox
examples/step-50/step-50.cc

index de48ed344d6421e1395edc3c8d05fe50e4be3f73..a087a46422e5ed92f54c38ac2455a907ee6c691f 100644 (file)
  *   <tr valign="top">
  *       <td>step-50</td>
  *       <td> Geometric multigrid on adaptive meshes distributed in parallel.
- *       <br/> Keywords: Multigrid, MGLevelObject, MGConstrainedDoFs, IndexSet, MGTools, PreconditionMG, FEInterfaceValues, MeshWorker::mesh_loop()
+ *       <br/> Keywords: Multigrid, MGLevelObject, MGConstrainedDoFs, IndexSet, MGTools, PreconditionMG, MatrixFree, FEInterfaceValues, MeshWorker::mesh_loop()
  *       </td></tr>
  *
  *   <tr valign="top">
index 79df0eea06d4503d784a1f387a9070c0514770ee..d18176116de77aaa9201d97332720006340b323c 100644 (file)
@@ -1 +1 @@
-step-16 step-40
+step-16 step-37 step-40
index eaffbcbd427e8b9fe5fb94b89ddb3d6f1e05f189..a383912a9b730df482167d8c5ef9921170bea06b 100644 (file)
@@ -19,12 +19,11 @@ libraries is described in the <a href="../../readme.html" target="body">README</
 <h1>Introduction</h1>
 
 
-This example shows the usage of the multilevel functions in deal.II on distributed meshes
-and gives a comparison between geometric and algebraic multigrid methods. The algebraic
-multigrid (AMG) preconditioner is the same used in step-40, and the geometric multigrid
-(GMG) preconditioner is based on the one used in step-16. Here we discuss the
-necessary changes needed for parallel computations.
-
+This example shows the usage of the multilevel functions in deal.II on distributed
+meshes and gives a comparison between geometric and algebraic multigrid methods.
+The algebraic multigrid (AMG) preconditioner is the same used in step-40. Two geometric
+multigrid (GMG) preconditioners are considered: a matrix-based version similar to that
+in step-16 (but for parallel computations) and a matrix-free version discussed in step-37.
 
 <h3>The testcase</h3>
 
@@ -32,30 +31,31 @@ We consider the variable-coefficient Laplacian weak formulation
 @f{align*}
  (\epsilon \nabla u, \nabla v) = (f,v) \quad \forall v \in V_h
 @f}
-on the domain $\Omega = [-1,1]^\text{dim} \setminus [0,1]^\text{dim}$ (an L-shaped domain for 2D and a Fichera corner for 3D) with
-$\epsilon = 1$ if $\min(x,y,z)>-\frac{1}{2}$ and $\epsilon = 100$ otherwise. The
-boundary conditions are $u=0$ on the whole boundary and the right-hand side is $f=1$.
-We use continuous Q2 elements to discretize $V_h$ and use a residual-based, cell-wise a
-posteriori error estimator $e(K) = e_{\text{cell}}(K) + e_{\text{face}}(K)$ from
-_CITE EST PAPER_ with
+on the domain $\Omega = [-1,1]^\text{dim} \setminus [0,1]^\text{dim}$ (an L-shaped domain
+for 2D and a Fichera corner for 3D) with $\epsilon = 1$ if $\min(x,y,z)>-\frac{1}{2}$ and
+$\epsilon = 100$ otherwise. The boundary conditions are $u=0$ on the whole boundary and
+the right-hand side is $f=1$. We use continuous Q2 elements to discretize $V_h$ and use a
+residual-based, cell-wise a posteriori error estimator
+$e(K) = e_{\text{cell}}(K) + e_{\text{face}}(K)$ from _CITE EST PAPER_ with
 @f{align*}
  e_{\text{cell}}(K) = h^2 \| f + \epsilon \triangle u \|_K^2, \qquad
  e_{\text{face}}(K) = \sum_F h_F \| [ \epsilon \nabla u \cdot n ] \|_F^2.
 @f}
-The following figure visualizes the solution and refinement for 2D
-<img src="https://www.dealii.org/images/steps/developer/step-50-2d-solution.png" alt="">
-and for 3D, the solution(left) and a slice for $x$ close to the
-center of the domain showing the adaptively refined mesh (right) are depicted here
+The following figure visualizes the solution and refinement for 2D:
+<img width="400px" src="https://www.dealii.org/images/steps/developer/step-50-2d-solution.png" alt="">
+In 3D, the solution looks similar (see below). On the left you can see the solution and on the right we show a slice for $x$ close to the
+center of the domain showing the adaptively refined mesh.
 <table width="60%" align="center">
   <tr>
     <td align="center">
-      <img src="https://www.dealii.org/images/steps/developer/step-50-3d-solution.png" alt="">
+      <img width="400px" src="https://www.dealii.org/images/steps/developer/step-50-3d-solution.png" alt="">
     </td>
     <td align="center">
-      <img src="https://www.dealii.org/images/steps/developer/step-50-refinement.png" alt="">
+      <img width="400px" src="https://www.dealii.org/images/steps/developer/step-50-refinement.png" alt="">
     </td>
   </tr>
 </table>
+Both in 2D and 3D you can see the adaptive refinement picking up the corner singularity and the inner singularity where the viscosity jumps, while the interface along the line that separates the two viscosities is (correctly) not refined as it is resolved adequately.
 
 
 <h3>Workload imbalance</h3>
@@ -72,21 +72,24 @@ of the active mesh, and the right image gives the multilevel hierarchy of cells.
 colors and numbers represent the different processors. The circular nodes in the tree
 are the non-active cells which are distributed using the ``first-child rule''.
 
-<img src="https://www.dealii.org/images/steps/developer/step-50-workload-example.png" alt="">
+<img width="400px" src="https://www.dealii.org/images/steps/developer/step-50-workload-example.png" alt="">
 
-Included among the output to screen in this example is a value ``Workload imbalance''
-given by the function MGTools::workload_imbalance(). This value, which will be denoted
+Included among the output to screen in this example is a value ``Partition efficiency''
+given by 1/MGTools::workload_imbalance(). This value, which will be denoted
 by $\mathbb{E}$,  quantifies the overhead produced by not having a perfect work balance
 on each level of the multigrid hierarchy (as is evident from the example above).
 
-For defining $\mathbb{E}$, let $N_{\ell}$ be the number of cells on level $\ell$
-(both active and non-active cells) and $N_{\ell,p}$ of the subset owned by processor
+For defining $\mathbb{E}$, it is important to note that, as we are using local smoothing
+to define the multigrid hierarchy (see the @ref mg_paper "multigrid paper" for a description of
+local smoothing), the refinement level of a cell corresponds to that cell's multigrid
+level. Now, let $N_{\ell}$ be the number of cells on level $\ell$
+(both active and non-active cells) and $N_{\ell,p}$ be the subset owned by process
 $p$. Assuming that the workload for any one processor is proportional to the number
 of cells owned by that processor, the optimal workload per processor is given by
 @f{align*}
-W_{\text{opt}}=\frac1{n_{p}}\sum_{\ell}\sum_{p}N_{\ell,p}=\frac1{n_{p}}\sum_{\ell} N_{\ell}.
+W_{\text{opt}} = \sum_{\ell}\frac1{n_{p}}\sum_{p}N_{\ell,p}=\frac1{n_{p}}\sum_{\ell} N_{\ell}.
 @f}
-Next, assuming a synchronization of work on each level (i.e., on each level of a vcycle,
+Next, assuming a synchronization of work on each level (i.e., on each level of a V-cycle,
 work must be completed by all processors before moving on to the next level), the
 limiting effort on each level is given by
 @f{align*}
@@ -109,14 +112,25 @@ W &= \sum_\ell W_\ell = 1 + 2 + 3 = 6
 \\
 \mathbb{E} &= \frac{W_{\text{opt}}}{W} = \frac12.
 @f}
+The value MGTools::workload_imbalance()$= 1/\mathbb{E}$ then represents the factor increase
+in timings we expect for GMG methods (vmults, assembly, etc.) due to the imbalance of the
+mesh partition.
 
 _CITE MG PAPER_ contains a full discussion of the partition efficiency model
-and the effect the imbalance has on the GMG vcycle timing. In summary, the value
-of $\mathbb{E}$ is highly dependent on the type a mesh refinement used and has
-optimal value $\mathbb{E} = 1$ for globally refined meshes. Typically for adaptively
+and the effect the imbalance has on the GMG V-cycle timing. In summary, the value
+of $\mathbb{E}$ is highly dependent on the degree of local mesh refinement used and has
+an optimal value $\mathbb{E} \approx 1$ for globally refined meshes. Typically for adaptively
 refined meshes, the number of processors used to distribute a single mesh has a
 negative impact on $\mathbb{E}$ but only up to a leveling off point, where the imbalance
 remains relatively constant for an increasing number of processors, and further refinement
 has very little impact on $\mathbb{E}$. Finally, $1/\mathbb{E}$ was shown to give an
 accurate representation of the slowdown in parallel scaling expected for the timing of
-a vcycle.
+a V-cycle.
+
+It should be noted that there is potential for some asynchronous work between multigrid
+levels, specifically with purely nearest neighbor MPI communication, and an adaptive mesh
+could be constructed such that the efficiency model would far overestimate the V-cycle slowdown
+due to the asynchronous work ``covering up'' the imbalance (which assumes synchronization over levels).
+However, for most realistic adaptive meshes the expectation is that this asynchronous work will
+only cover up a very small portion of the imbalance and the efficiency model will describe the
+slowdown very well.
index edbe4f8c48512c2bd2215bd3c8d6531a7c86bf82..06caf6bbec9e01b1c0542fcb6094d53be571816c 100644 (file)
@@ -7,22 +7,23 @@ Cycle 0:
    Workload imbalance:           1.14286
    Number of degrees of freedom: 665 (by level: 117, 665)
    Number of CG iterations:      10
+   Wrote solution_00.pvtu
 
 
 +---------------------------------------------+------------+------------+
-| Total wallclock time elapsed since start    |    0.0536s |            |
+| Total wallclock time elapsed since start    |    0.0457s |            |
 |                                             |            |            |
 | Section                         | no. calls |  wall time | % of total |
 +---------------------------------+-----------+------------+------------+
-| Assemble                        |         1 |    0.0026s |       4.8% |
-| Assemble multigrid              |         1 |   0.00303s |       5.6% |
-| Estimate                        |         1 |    0.0273s |        51% |
-| Setup                           |         1 |   0.00477s |       8.9% |
-| Setup multigrid                 |         1 |   0.00539s |        10% |
-| Solve                           |         1 |   0.00801s |        15% |
-| Solve: 1 GMG vcycle             |         1 |  0.000655s |       1.2% |
-| Solve: CG                       |         1 |   0.00472s |       8.8% |
-| Solve: GMG preconditioner setup |         1 |   0.00232s |       4.3% |
+| Assemble right hand side        |         1 |  0.000241s |      0.53% |
+| Estimate                        |         1 |    0.0288s |        63% |
+| Output results                  |         1 |   0.00219s |       4.8% |
+| Setup                           |         1 |   0.00264s |       5.8% |
+| Setup multigrid                 |         1 |   0.00261s |       5.7% |
+| Solve                           |         1 |   0.00355s |       7.8% |
+| Solve: 1 multigrid vcycle       |         1 |  0.000315s |      0.69% |
+| Solve: CG                       |         1 |   0.00186s |       4.1% |
+| Solve: Preconditioner setup     |         1 |  0.000968s |       2.1% |
 +---------------------------------+-----------+------------+------------+
 
 Cycle 1:
@@ -30,23 +31,24 @@ Cycle 1:
    Workload imbalance:           1.17483
    Number of degrees of freedom: 1672 (by level: 117, 665, 1100)
    Number of CG iterations:      11
+   Wrote solution_01.pvtu
 
 
 +---------------------------------------------+------------+------------+
-| Total wallclock time elapsed since start    |    0.0861s |            |
+| Total wallclock time elapsed since start    |    0.0433s |            |
 |                                             |            |            |
 | Section                         | no. calls |  wall time | % of total |
 +---------------------------------+-----------+------------+------------+
-| Assemble                        |         1 |   0.00578s |       6.7% |
-| Assemble multigrid              |         1 |   0.00745s |       8.7% |
-| Estimate                        |         1 |    0.0281s |        33% |
-| Refine grid                     |         1 |   0.00992s |        12% |
-| Setup                           |         1 |   0.00878s |        10% |
-| Setup multigrid                 |         1 |    0.0115s |        13% |
-| Solve                           |         1 |    0.0144s |        17% |
-| Solve: 1 GMG vcycle             |         1 |  0.000868s |         1% |
-| Solve: CG                       |         1 |   0.00879s |        10% |
-| Solve: GMG preconditioner setup |         1 |   0.00414s |       4.8% |
+| Assemble right hand side        |         1 |  0.000286s |      0.66% |
+| Estimate                        |         1 |    0.0272s |        63% |
+| Output results                  |         1 |   0.00333s |       7.7% |
+| Refine grid                     |         1 |   0.00196s |       4.5% |
+| Setup                           |         1 |    0.0023s |       5.3% |
+| Setup multigrid                 |         1 |   0.00262s |         6% |
+| Solve                           |         1 |   0.00549s |        13% |
+| Solve: 1 multigrid vcycle       |         1 |  0.000343s |      0.79% |
+| Solve: CG                       |         1 |   0.00293s |       6.8% |
+| Solve: Preconditioner setup     |         1 |   0.00174s |         4% |
 +---------------------------------+-----------+------------+------------+
 
 Cycle 2:
@@ -54,258 +56,160 @@ Cycle 2:
 .
 .
 @endcode
-Here, the timing of the `solve()` function is spilt up in 3 parts: setting
+Here, the timing of the `solve()` function is split up in 3 parts: setting
 up the multigrid preconditioner, execution of a single multigrid vcycle, and
 the CG solver. The vcycle that is timed is unnecessary for the overall solve
 and only meant to give an insight at the different costs for AMG and GMG.
 Also it should be noted that when using the AMG solver, ``Workload imbalance''
-is not included in the output since the hierarchy of coarse meshes are not
+is not included in the output since the hierarchy of coarse meshes is not
 required.
 
-In addition to the AMG and GMG solvers in this tutorial, included will be timings
-from a 3rd matrix-free (MF) GMG solver on the same problem (see possible extensions
-for a discussion on what is required for the matrix-free solver). We will refer to
-the GMG solver in tutorial as the matrix-based (MB) GMG solver.
+All results in this section are gathered on Intel Xeon Platinum 8280 (Cascade
+Lake) nodes which have 56 cores and 192GB per node and support AVX-512 instructions,
+allowing for vectorization over 8 doubles (vectorization used only in the matrix-free
+computations). The code is compiled using gcc 7.1.0 with intel-mpi 17.0.3. Trilinos
+12.10.1 is used for the matrix-based GMG/AMG computations.
+
+The following table gives weak scale timings for this program on up to 256M DoFs
+and 7168 processors. Here, $\mathbb{E}$ is the partition efficiency from the
+ introduction (also equal to 1.0/workload imbalance), ``Setup'' is a combination
+of setup, setup multigrid, assemble, and assemble multigrid from the timing blocks,
+and ``Prec'' is the preconditioner setup. Ideally all times would stay constant
+over each problem size for the individual solvers, but since the partition
+efficiency decreases from 0.371 to 0.161 from largest to smallest problem size,
+we expect to see an approximately $0.371/0.161=2.3$ times increase in timings
+for GMG.
 
-The following table gives the timings for setup, assembly, and solve for GMG and AMG
-on up to 256M DoFs and 7168 processors.
 <table align="center" class="doxtable">
 <tr>
+  <th colspan="4"></th>
   <th></th>
-  <th>Procs</th>
-  <th>Cycle</th>
-  <th>DoFs</th>
-  <th>Imbalance</th>
+  <th colspan="4">MF-GMG</th>
   <th></th>
-  <th>Setup</th>
-  <th>Setup GMG</th>
-  <th>Assemble</th>
-  <th>Assemble GMG</th>
-  <th>Solve</th>
-</tr>
-<tr>
-  <th>MF-GMG</th>
-  <td>112</th>
-  <td>13</th>
-  <td>4M</th>
-  <td>0.37</th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-</tr>
-<tr>
+  <th colspan="4">MB-GMG</th>
   <th></th>
-  <td>448</th>
-  <td>15</th>
-  <td>16M</th>
-  <td>0.29</th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
+  <th colspan="4">AMG</th>
 </tr>
 <tr>
+  <th align="right">Procs</th>
+  <th align="right">Cycle</th>
+  <th align="right">DoFs</th>
+  <th align="right">$\mathbb{E}$</th>
   <th></th>
-  <td>1792</th>
-  <td>17</th>
-  <td>65M</th>
-  <td>0.22</th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-</tr>
-<tr>
+  <th align="right">Setup</th>
+  <th align="right">Prec</th>
+  <th align="right">Solve</th>
+  <th align="right">Total</th>
   <th></th>
-  <td>7168</th>
-  <td>19</th>
-  <td>256M</th>
-  <td>0.16</th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-</tr>
-<tr>
-  <th>MB-GMG</th>
-  <td>112</th>
-  <td>13</th>
-  <td>4M</th>
-  <td>0.37</th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-</tr>
-<tr>
+  <th align="right">Setup</th>
+  <th align="right">Prec</th>
+  <th align="right">Solve</th>
+  <th align="right">Total</th>
   <th></th>
-  <td>448</th>
-  <td>15</th>
-  <td>16M</th>
-  <td>0.29</th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
+  <th align="right">Setup</th>
+  <th align="right">Prec</th>
+  <th align="right">Solve</th>
+  <th align="right">Total</th>
 </tr>
 <tr>
-  <th></th>
-  <td>1792</th>
-  <td>17</th>
-  <td>65M</th>
-  <td>0.22</th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
+  <td align="right">112</th>
+  <td align="right">13</th>
+  <td align="right">4M</th>
+  <td align="right">0.37</th>
+  <td></th>
+  <td align="right">0.742</th>
+  <td align="right">0.393</th>
+  <td align="right">0.200</th>
+  <td align="right">1.335</th>
+  <td></th>
+  <td align="right">1.714</th>
+  <td align="right">2.934</th>
+  <td align="right">0.716</th>
+  <td align="right">5.364</th>
+  <td></th>
+  <td align="right">1.544</th>
+  <td align="right">0.456</th>
+  <td align="right">1.150</th>
+  <td align="right">3.150</th>
 </tr>
 <tr>
-  <th></th>
-  <td>7168</th>
-  <td>19</th>
-  <td>256M</th>
-  <td>0.16</th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
+  <td align="right">448</th>
+  <td align="right">15</th>
+  <td align="right">16M</th>
+  <td align="right">0.29</th>
+  <td></th>
+  <td align="right">0.884</th>
+  <td align="right">0.535</th>
+  <td align="right">0.253</th>
+  <td align="right">1.672</th>
+  <td></th>
+  <td align="right">1.927</th>
+  <td align="right">3.776</th>
+  <td align="right">1.190</th>
+  <td align="right">6.893</th>
+  <td></th>
+  <td align="right">1.544</th>
+  <td align="right">0.456</th>
+  <td align="right">1.150</th>
+  <td align="right">3.150</th>
 </tr>
 <tr>
-  <th>AMG</th>
-  <td>112</th>
-  <td>13</th>
-  <td>4M</th>
-  <td>-</th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
+  <td align="right">1792</th>
+  <td align="right">17</th>
+  <td align="right">65M</th>
+  <td align="right">0.22</th>
+  <td></th>
+  <td align="right">1.122</th>
+  <td align="right">0.686</th>
+  <td align="right">0.309</th>
+  <td align="right">2.117</th>
+  <td></th>
+  <td align="right">2.171</th>
+  <td align="right">4.862</th>
+  <td align="right">1.660</th>
+  <td align="right">8.693</th>
+  <td></th>
+  <td align="right">1.654</th>
+  <td align="right">0.546</th>
+  <td align="right">1.460</th>
+  <td align="right">3.660</th>
 </tr>
 <tr>
-  <th></th>
-  <td>448</th>
-  <td>15</th>
-  <td>16M</th>
-  <td>-</th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-</tr>
-<tr>
-  <th></th>
-  <td>1792</th>
-  <td>17</th>
-  <td>65M</th>
-  <td>-</th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-</tr>
-<tr>
-  <th></th>
-  <td>7168</th>
-  <td>19</th>
-  <td>256M</th>
-  <td>-</th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
-  <td></th>
+  <td align="right">7168</th>
+  <td align="right">19</th>
+  <td align="right">256M</th>
+  <td align="right">0.16</th>
+  <td></th>
+  <td align="right">1.214</th>
+  <td align="right">0.893</th>
+  <td align="right">0.521</th>
+  <td align="right">2.628</th>
+  <td></th>
+  <td align="right">2.386</th>
+  <td align="right">7.260</th>
+  <td align="right">2.560</th>
+  <td align="right">12.206</th>
+  <td></th>
+  <td align="right">1.844</th>
+  <td align="right">1.010</th>
+  <td align="right">1.890</th>
+  <td align="right">4.744</th>
 </tr>
 </table>
 
-The following figure gives the strong scaling for each method for cycle 16 (32M DoFs)
-and 19 (256M DoFs) on between 56 to 28672 processors.
-<img src="https://www.dealii.org/images/steps/developer/step-50-strong-scaling.png" alt="">
-
-
-<h3> Possible extensions </h3>
-
-<h4>Add matrix-free GMG preconditioner</h4>
-The results above include timings from a matrix-free GMG preconditioner
-which is not currently a part of this tutorial. See step-37 for an example
-of such a preconditioner for the Laplace equation.
-
-It should be noted that the MatrixFree class is only compatible with the
-dealii::LinearAlgebra::distributed::Vector class, while this tutorial uses either
-PETSc or Trilinos vectors. It may be of use to define functions which copy between
-two types of vectors, for example, for Trilinos vectors one could use the following:
-@code
-namespace ChangeVectorTypes
-{
-  void import(TrilinosWrappers::MPI::Vector &                       out,
-              const dealii::LinearAlgebra::ReadWriteVector<double> &rwv,
-              const VectorOperation::values                         operation)
-  {
-    Assert(out.size() == rwv.size(),
-           ExcMessage(
-             "Both vectors need to have the same size for import() to work!"));
+The following figure gives the strong scaling for each method for cycle 16
+(32M DoFs) and 19 (256M DoFs) on between 56 to 28672 processors. While the
+matrix-based GMG solver and AMG scale similarly and have a similar time to
+solution, the matrix-free GMG solver scales much better and solves the finer
+problem in roughly the same time as the AMG solver for the coarser mesh with
+only an eighth of the number of unknowns.
 
-    Assert(out.locally_owned_elements() == rwv.get_stored_elements(),
-           ExcNotImplemented());
+<img width="400px" src="https://www.dealii.org/images/steps/developer/step-50-strong-scaling.png" alt="">
 
-    if (operation == VectorOperation::insert)
-      {
-        for (const auto idx : out.locally_owned_elements())
-          out[idx] = rwv[idx];
-      }
-    else if (operation == VectorOperation::add)
-      {
-        for (const auto idx : out.locally_owned_elements())
-          out[idx] += rwv[idx];
-      }
-    else
-      AssertThrow(false, ExcNotImplemented());
 
-    out.compress(operation);
-  }
+<h3> Possible extensions </h3>
 
+We currently don't have suggestions for possible extensions.
 
-  void copy(TrilinosWrappers::MPI::Vector &                           out,
-            const dealii::LinearAlgebra::distributed::Vector<double> &in)
-  {
-    dealii::LinearAlgebra::ReadWriteVector<double> rwv(
-      out.locally_owned_elements());
-    rwv.import(in, VectorOperation::insert);
-    // This import function doesn't exist until after dealii 9.0
-    // Implemented above
-    import(out, rwv, VectorOperation::insert);
-  }
 
-  void copy(dealii::LinearAlgebra::distributed::Vector<double> &out,
-            const TrilinosWrappers::MPI::Vector &               in)
-  {
-    dealii::LinearAlgebra::ReadWriteVector<double> rwv;
-    rwv.reinit(in);
-    out.import(rwv, VectorOperation::insert);
-  }
-}
-@endcode
 
index 10bf160960fb158b7e19ccd1fbe9acf4aac09a46..66f71cba5232565f0798a2c342e48328445bc685 100644 (file)
@@ -17,7 +17,7 @@
  * Author: Thomas C. Clevenger, Clemson University
  *         Timo Heister, Clemson University
  *         Guido Kanschat, Heidelberg University
- *         Martin Kronbichler, TU Munich
+ *         Martin Kronbichler, Technical University of Munich
  */
 
 #include <deal.II/base/conditional_ostream.h>
@@ -64,8 +64,8 @@
 #include <memory>
 
 
-// uncomment the following #define if you have PETSc and Trilinos installed
-// and you prefer using Trilinos in this example:
+// Comment the following \#define if you have PETSc and Trilinos installed
+// and you prefer using PETSc in this example:
 #define FORCE_USE_OF_TRILINOS
 
 namespace LA
@@ -85,11 +85,6 @@ namespace LA
 using namespace dealii;
 
 
-
-#ifdef USE_PETSC_LA
-// No ChangeVectorTypes::copy() for PETSc vector types.
-// Vector::import() needs to be implemented.
-#else
 /**
  * Matrix-free operators must use deal.II defined vectors, rest of the code is
  * based on Trilinos vectors.
@@ -97,25 +92,37 @@ using namespace dealii;
 namespace ChangeVectorTypes
 {
   template <typename number>
-  void copy(TrilinosWrappers::MPI::Vector &out,
+  void copy(LA::MPI::Vector &                                         out,
             const dealii::LinearAlgebra::distributed::Vector<number> &in)
   {
     dealii::LinearAlgebra::ReadWriteVector<double> rwv(
       out.locally_owned_elements());
     rwv.import(in, VectorOperation::insert);
+#ifdef USE_PETSC_LA
+    AssertThrow(false,
+                ExcMessage("CopyVectorTypes::copy() not implemented for "
+                           "PETSc vector types."));
+#else
     out.import(rwv, VectorOperation::insert);
+#endif
   }
 
   template <typename number>
   void copy(dealii::LinearAlgebra::distributed::Vector<number> &out,
-            const TrilinosWrappers::MPI::Vector &in)
+            const LA::MPI::Vector &                             in)
   {
     dealii::LinearAlgebra::ReadWriteVector<double> rwv;
+#ifdef USE_PETSC_LA
+    (void)in;
+    AssertThrow(false,
+                ExcMessage("CopyVectorTypes::copy() not implemented for "
+                           "PETSc vector types."));
+#else
     rwv.reinit(in);
+#endif
     out.import(rwv, VectorOperation::insert);
   }
 } // namespace ChangeVectorTypes
-#endif
 
 
 
@@ -128,6 +135,14 @@ public:
   {
     return 1.0;
   }
+
+  template <typename number>
+  VectorizedArray<number>
+  value(const Point<dim, VectorizedArray<number>> & /*p*/,
+        const unsigned int /*component*/ = 0) const
+  {
+    return VectorizedArray<number>(1.0);
+  }
 };
 
 
@@ -136,12 +151,19 @@ template <int dim>
 class Coefficient : public Function<dim>
 {
 public:
-  virtual double value(const Point<dim> & p,
-                       const unsigned int component = 0) const override;
+  virtual double value(const Point<dim> &p,
+                       const unsigned int /*component*/ = 0) const override;
 
   template <typename number>
   VectorizedArray<number> value(const Point<dim, VectorizedArray<number>> &p,
-                                const unsigned int component = 0) const;
+                                const unsigned int /*component*/ = 0) const;
+
+  template <typename number>
+  number average_value(const std::vector<Point<dim, number>> &points) const;
+
+  template <typename number>
+  std::shared_ptr<Table<2, VectorizedArray<number>>> create_coefficient_table(
+    const MatrixFree<dim, number, VectorizedArray<number>> &mf_storage) const;
 };
 
 
@@ -183,196 +205,49 @@ Coefficient<dim>::value(const Point<dim, VectorizedArray<number>> &p,
 }
 
 
-
-void average(std::vector<double> &values)
+template <int dim>
+template <typename number>
+number Coefficient<dim>::average_value(
+  const std::vector<Point<dim, number>> &points) const
 {
-  double sum = 0.0;
-  for (unsigned int i = 0; i < values.size(); ++i)
-    sum += values[i];
-  sum /= values.size();
+  number average(0);
+  for (unsigned int i = 0; i < points.size(); ++i)
+    average += value(points[i]);
+  average /= points.size();
 
-  for (unsigned int i = 0; i < values.size(); ++i)
-    values[i] = sum;
+  return average;
 }
 
 
 
-/**
- * Matrix-free Laplace operator
- */
-template <int dim, int fe_degree, typename number>
-class LaplaceOperator
-  : public MatrixFreeOperators::Base<dim,
-                                     LinearAlgebra::distributed::Vector<number>>
-{
-public:
-  LaplaceOperator();
-
-  void clear() override;
-
-  void evaluate_coefficient(const Coefficient<dim> &coefficient_function);
-  Table<1, VectorizedArray<number>> get_coefficient_table();
-
-  virtual void compute_diagonal() override;
-
-private:
-  virtual void apply_add(
-    LinearAlgebra::distributed::Vector<number> &      dst,
-    const LinearAlgebra::distributed::Vector<number> &src) const override;
-
-  void
-  local_apply(const MatrixFree<dim, number> &                   data,
-              LinearAlgebra::distributed::Vector<number> &      dst,
-              const LinearAlgebra::distributed::Vector<number> &src,
-              const std::pair<unsigned int, unsigned int> &cell_range) const;
-
-  void local_compute_diagonal(
-    const MatrixFree<dim, number> &              data,
-    LinearAlgebra::distributed::Vector<number> & dst,
-    const unsigned int &                         dummy,
-    const std::pair<unsigned int, unsigned int> &cell_range) const;
-
-  Table<1, VectorizedArray<number>> coefficient;
-};
-
-
-template <int dim, int fe_degree, typename number>
-LaplaceOperator<dim, fe_degree, number>::LaplaceOperator()
-  : MatrixFreeOperators::Base<dim, LinearAlgebra::distributed::Vector<number>>()
-{}
-
-
-template <int dim, int fe_degree, typename number>
-void LaplaceOperator<dim, fe_degree, number>::clear()
+template <int dim>
+template <typename number>
+std::shared_ptr<Table<2, VectorizedArray<number>>>
+Coefficient<dim>::create_coefficient_table(
+  const MatrixFree<dim, number, VectorizedArray<number>> &mf_storage) const
 {
-  coefficient.reinit(TableIndices<1>(0));
-  MatrixFreeOperators::Base<dim, LinearAlgebra::distributed::Vector<number>>::
-    clear();
-}
+  std::shared_ptr<Table<2, VectorizedArray<number>>> coefficient_table;
+  coefficient_table = std::make_shared<Table<2, VectorizedArray<number>>>();
 
+  FEEvaluation<dim, -1, 0, 1, number> fe_eval(mf_storage);
 
-template <int dim, int fe_degree, typename number>
-void LaplaceOperator<dim, fe_degree, number>::evaluate_coefficient(
-  const Coefficient<dim> &coefficient_function)
-{
-  const unsigned int n_cells = this->data->n_macro_cells();
-  FEEvaluation<dim, fe_degree, fe_degree + 1, 1, number> phi(*this->data);
+  const unsigned int n_cells    = mf_storage.n_macro_cells();
+  const unsigned int n_q_points = fe_eval.n_q_points;
 
-  coefficient.reinit(TableIndices<1>(n_cells));
+  coefficient_table->reinit(n_cells, 1);
   for (unsigned int cell = 0; cell < n_cells; ++cell)
     {
-      phi.reinit(cell);
-
-      VectorizedArray<number> averaged_value(0);
-      for (unsigned int q = 0; q < phi.n_q_points; ++q)
-        averaged_value += coefficient_function.value(phi.quadrature_point(q));
-      averaged_value /= phi.n_q_points;
-
-      coefficient(cell) = averaged_value;
-    }
-}
-
-
-template <int dim, int fe_degree, typename number>
-Table<1, VectorizedArray<number>>
-LaplaceOperator<dim, fe_degree, number>::get_coefficient_table()
-{
-  return coefficient;
-}
-
-
-template <int dim, int fe_degree, typename number>
-void LaplaceOperator<dim, fe_degree, number>::local_apply(
-  const MatrixFree<dim, number> &                   data,
-  LinearAlgebra::distributed::Vector<number> &      dst,
-  const LinearAlgebra::distributed::Vector<number> &src,
-  const std::pair<unsigned int, unsigned int> &     cell_range) const
-{
-  FEEvaluation<dim, fe_degree, fe_degree + 1, 1, number> phi(data);
-
-  for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell)
-    {
-      AssertDimension(coefficient.size(0), data.n_macro_cells());
-
-      phi.reinit(cell);
-      phi.read_dof_values(src);
-      phi.evaluate(false, true);
-      for (unsigned int q = 0; q < phi.n_q_points; ++q)
-        phi.submit_gradient(coefficient(cell) * phi.get_gradient(q), q);
-      phi.integrate(false, true);
-      phi.distribute_local_to_global(dst);
-    }
-}
-
-
-template <int dim, int fe_degree, typename number>
-void LaplaceOperator<dim, fe_degree, number>::apply_add(
-  LinearAlgebra::distributed::Vector<number> &      dst,
-  const LinearAlgebra::distributed::Vector<number> &src) const
-{
-  this->data->cell_loop(&LaplaceOperator::local_apply, this, dst, src);
-}
+      fe_eval.reinit(cell);
 
+      std::vector<Point<dim, VectorizedArray<number>>> points(n_q_points);
+      for (unsigned int q = 0; q < n_q_points; ++q)
+        points[q] = fe_eval.quadrature_point(q);
+      VectorizedArray<number> averaged_value = average_value(points);
 
-template <int dim, int fe_degree, typename number>
-void LaplaceOperator<dim, fe_degree, number>::compute_diagonal()
-{
-  this->inverse_diagonal_entries.reset(
-    new DiagonalMatrix<LinearAlgebra::distributed::Vector<number>>());
-  LinearAlgebra::distributed::Vector<number> &inverse_diagonal =
-    this->inverse_diagonal_entries->get_vector();
-  this->data->initialize_dof_vector(inverse_diagonal);
-  unsigned int dummy = 0;
-  this->data->cell_loop(&LaplaceOperator::local_compute_diagonal,
-                        this,
-                        inverse_diagonal,
-                        dummy);
-
-  this->set_constrained_entries_to_one(inverse_diagonal);
-
-  for (unsigned int i = 0; i < inverse_diagonal.local_size(); ++i)
-    {
-      Assert(inverse_diagonal.local_element(i) > 0.,
-             ExcMessage("No diagonal entry in a positive definite operator "
-                        "should be zero"));
-      inverse_diagonal.local_element(i) =
-        1. / inverse_diagonal.local_element(i);
+      (*coefficient_table)(cell, 0) = averaged_value;
     }
-}
-
-
-template <int dim, int fe_degree, typename number>
-void LaplaceOperator<dim, fe_degree, number>::local_compute_diagonal(
-  const MatrixFree<dim, number> &             data,
-  LinearAlgebra::distributed::Vector<number> &dst,
-  const unsigned int &,
-  const std::pair<unsigned int, unsigned int> &cell_range) const
-{
-  FEEvaluation<dim, fe_degree, fe_degree + 1, 1, number> phi(data);
-
-  AlignedVector<VectorizedArray<number>> diagonal(phi.dofs_per_cell);
-
-  for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell)
-    {
-      AssertDimension(coefficient.size(0), data.n_macro_cells());
 
-      phi.reinit(cell);
-      for (unsigned int i = 0; i < phi.dofs_per_cell; ++i)
-        {
-          for (unsigned int j = 0; j < phi.dofs_per_cell; ++j)
-            phi.submit_dof_value(VectorizedArray<number>(), j);
-          phi.submit_dof_value(make_vectorized_array<number>(1.), i);
-
-          phi.evaluate(false, true);
-          for (unsigned int q = 0; q < phi.n_q_points; ++q)
-            phi.submit_gradient(coefficient(cell) * phi.get_gradient(q), q);
-          phi.integrate(false, true);
-          diagonal[i] = phi.get_dof_value(i);
-        }
-      for (unsigned int i = 0; i < phi.dofs_per_cell; ++i)
-        phi.submit_dof_value(diagonal[i], i);
-      phi.distribute_local_to_global(dst);
-    }
+  return coefficient_table;
 }
 
 
@@ -395,7 +270,7 @@ struct Settings
   bool         output;
 };
 
-template <int dim>
+template <int dim, int degree>
 class LaplaceProblem
 {
   using MatrixType         = LA::MPI::SparseMatrix;
@@ -403,8 +278,19 @@ class LaplaceProblem
   using PreconditionAMG    = LA::MPI::PreconditionAMG;
   using PreconditionJacobi = LA::MPI::PreconditionJacobi;
 
-  using MatrixFreeLevelMatrix  = LaplaceOperator<dim, 2, float>;
-  using MatrixFreeActiveMatrix = LaplaceOperator<dim, 2, double>;
+  using MatrixFreeLevelMatrix = MatrixFreeOperators::LaplaceOperator<
+    dim,
+    degree,
+    degree + 1,
+    1,
+    LinearAlgebra::distributed::Vector<float>>;
+  using MatrixFreeActiveMatrix = MatrixFreeOperators::LaplaceOperator<
+    dim,
+    degree,
+    degree + 1,
+    1,
+    LinearAlgebra::distributed::Vector<double>>;
+
   using MatrixFreeLevelVector  = LinearAlgebra::distributed::Vector<float>;
   using MatrixFreeActiveVector = LinearAlgebra::distributed::Vector<double>;
 
@@ -417,7 +303,7 @@ private:
   void setup_multigrid();
   void assemble_system();
   void assemble_multigrid();
-  void assemble_rhs_for_matrix_free();
+  void assemble_rhs();
   void solve();
   void estimate();
   void refine_grid();
@@ -454,8 +340,8 @@ private:
 };
 
 
-template <int dim>
-LaplaceProblem<dim>::LaplaceProblem(const Settings &settings)
+template <int dim, int degree>
+LaplaceProblem<dim, degree>::LaplaceProblem(const Settings &settings)
   : settings(settings)
   , mpi_communicator(MPI_COMM_WORLD)
   , pcout(std::cout, (Utilities::MPI::this_mpi_process(mpi_communicator) == 0))
@@ -466,7 +352,7 @@ LaplaceProblem<dim>::LaplaceProblem(const Settings &settings)
                     parallel::distributed::Triangulation<
                       dim>::construct_multigrid_hierarchy)
   , mapping()
-  , fe(2)
+  , fe(degree)
   , dof_handler(triangulation)
   , computing_timer(pcout, TimerOutput::never, TimerOutput::wall_times)
 {
@@ -540,8 +426,8 @@ bool Settings::try_parse(const std::string &prm_filename)
 }
 
 
-template <int dim>
-void LaplaceProblem<dim>::setup_system()
+template <int dim, int degree>
+void LaplaceProblem<dim, degree>::setup_system()
 {
   TimerOutput::Scope timing(computing_timer, "Setup");
 
@@ -559,8 +445,7 @@ void LaplaceProblem<dim>::setup_system()
     mapping, dof_handler, 0, Functions::ZeroFunction<dim>(), constraints);
   constraints.close();
 
-
-  if (settings.solver = Settings::gmg_mf)
+  if (settings.solver == Settings::gmg_mf)
     {
       typename MatrixFree<dim, double>::AdditionalData additional_data;
       additional_data.tasks_parallel_scheme =
@@ -571,12 +456,16 @@ void LaplaceProblem<dim>::setup_system()
         new MatrixFree<dim, double>());
       mf_storage->reinit(dof_handler,
                          constraints,
-                         QGauss<1>(fe.degree + 1),
+                         QGauss<1>(degree + 1),
                          additional_data);
+
       mf_system_matrix.initialize(mf_storage);
-      mf_system_matrix.evaluate_coefficient(Coefficient<dim>());
+
+      const Coefficient<dim> coefficient;
+      mf_system_matrix.set_coefficient(
+        coefficient.create_coefficient_table(*mf_storage));
     }
-  else
+  else /*gmg_mb or amg*/
     {
 #ifdef USE_PETSC_LA
       DynamicSparsityPattern dsp(locally_relevant_set);
@@ -604,8 +493,9 @@ void LaplaceProblem<dim>::setup_system()
 }
 
 
-template <int dim>
-void LaplaceProblem<dim>::setup_multigrid()
+
+template <int dim, int degree>
+void LaplaceProblem<dim, degree>::setup_multigrid()
 {
   TimerOutput::Scope timing(computing_timer, "Setup multigrid");
 
@@ -618,7 +508,7 @@ void LaplaceProblem<dim>::setup_multigrid()
   mg_constrained_dofs.make_zero_boundary_constraints(dof_handler, bset);
 
   const unsigned int n_levels = triangulation.n_global_levels();
-  if (settings.solver = Settings::gmg_mf)
+  if (settings.solver == Settings::gmg_mf)
     {
       mf_mg_matrix.resize(0, n_levels - 1);
 
@@ -644,18 +534,21 @@ void LaplaceProblem<dim>::setup_multigrid()
             new MatrixFree<dim, float>());
           mf_storage_level->reinit(dof_handler,
                                    level_constraints,
-                                   QGauss<1>(fe.degree + 1),
+                                   QGauss<1>(degree + 1),
                                    additional_data);
 
           mf_mg_matrix[level].initialize(mf_storage_level,
                                          mg_constrained_dofs,
                                          level);
 
-          mf_mg_matrix[level].evaluate_coefficient(Coefficient<dim>());
+          const Coefficient<dim> coefficient;
+          mf_mg_matrix[level].set_coefficient(
+            coefficient.create_coefficient_table(*mf_storage_level));
+
           mf_mg_matrix[level].compute_diagonal();
         }
     }
-  else
+  else /*gmg_mb*/
     {
       mg_matrix.resize(0, n_levels - 1);
       mg_matrix.clear_elements();
@@ -736,12 +629,12 @@ void LaplaceProblem<dim>::setup_multigrid()
 }
 
 
-template <int dim>
-void LaplaceProblem<dim>::assemble_system()
+template <int dim, int degree>
+void LaplaceProblem<dim, degree>::assemble_system()
 {
   TimerOutput::Scope timing(computing_timer, "Assemble");
 
-  const QGauss<dim> quadrature_formula(fe.degree + 1);
+  const QGauss<dim> quadrature_formula(degree + 1);
 
   FEValues<dim> fe_values(fe,
                           quadrature_formula,
@@ -757,7 +650,6 @@ void LaplaceProblem<dim>::assemble_system()
   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
 
   const Coefficient<dim> coefficient;
-  std::vector<double>    coefficient_values(n_q_points);
   RightHandSide<dim>     rhs;
   std::vector<double>    rhs_values(n_q_points);
 
@@ -769,11 +661,8 @@ void LaplaceProblem<dim>::assemble_system()
 
         fe_values.reinit(cell);
 
-        coefficient.value_list(fe_values.get_quadrature_points(),
-                               coefficient_values);
-        average(coefficient_values);
-        const double coefficient_value = coefficient_values[0];
-
+        const double coefficient_value =
+          coefficient.average_value(fe_values.get_quadrature_points());
         rhs.value_list(fe_values.get_quadrature_points(), rhs_values);
 
         for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
@@ -803,12 +692,12 @@ void LaplaceProblem<dim>::assemble_system()
 }
 
 
-template <int dim>
-void LaplaceProblem<dim>::assemble_multigrid()
+template <int dim, int degree>
+void LaplaceProblem<dim, degree>::assemble_multigrid()
 {
   TimerOutput::Scope timing(computing_timer, "Assemble multigrid");
 
-  QGauss<dim> quadrature_formula(1 + fe.degree);
+  QGauss<dim> quadrature_formula(degree + 1);
 
   FEValues<dim> fe_values(fe,
                           quadrature_formula,
@@ -823,7 +712,6 @@ void LaplaceProblem<dim>::assemble_multigrid()
   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
 
   const Coefficient<dim> coefficient;
-  std::vector<double>    coefficient_values(n_q_points);
 
   std::vector<AffineConstraints<double>> boundary_constraints(
     triangulation.n_global_levels());
@@ -846,10 +734,8 @@ void LaplaceProblem<dim>::assemble_multigrid()
         cell_matrix = 0;
         fe_values.reinit(cell);
 
-        coefficient.value_list(fe_values.get_quadrature_points(),
-                               coefficient_values);
-        average(coefficient_values);
-        const double coefficient_value = coefficient_values[0];
+        const double coefficient_value =
+          coefficient.average_value(fe_values.get_quadrature_points());
 
         for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
           for (unsigned int i = 0; i < dofs_per_cell; ++i)
@@ -881,10 +767,10 @@ void LaplaceProblem<dim>::assemble_multigrid()
 }
 
 
-template <int dim>
-void LaplaceProblem<dim>::assemble_rhs_for_matrix_free()
+template <int dim, int degree>
+void LaplaceProblem<dim, degree>::assemble_rhs()
 {
-  TimerOutput::Scope timing(computing_timer, "Assemble right hand side");
+  TimerOutput::Scope timing(computing_timer, "Assemble right-hand side");
 
   MatrixFreeActiveVector solution_copy;
   MatrixFreeActiveVector right_hand_side_copy;
@@ -895,8 +781,8 @@ void LaplaceProblem<dim>::assemble_rhs_for_matrix_free()
   constraints.distribute(solution_copy);
   solution_copy.update_ghost_values();
   right_hand_side_copy = 0;
-  const Table<1, VectorizedArray<double>> coefficient_table =
-    mf_system_matrix.get_coefficient_table();
+  const Table<2, VectorizedArray<double>> &coefficient =
+    *(mf_system_matrix.get_coefficient());
 
   RightHandSide<dim> right_hand_side_function;
 
@@ -914,40 +800,24 @@ void LaplaceProblem<dim>::assemble_rhs_for_matrix_free()
         {
           // Submit gradient
           phi.submit_gradient(-1.0 *
-                                (coefficient_table(cell) * phi.get_gradient(q)),
+                                (coefficient(cell, 0) * phi.get_gradient(q)),
                               q);
 
           // Submit RHS value
-          VectorizedArray<double> rhs_value =
-            make_vectorized_array<double>(1.0);
-          for (unsigned int i = 0; i < VectorizedArray<double>::size(); ++i)
-            {
-              Point<dim> p;
-              for (unsigned int d = 0; d < dim; ++d)
-                p(d) = phi.quadrature_point(q)(d)[i];
-
-              rhs_value[i] = right_hand_side_function.value(p);
-            }
-          phi.submit_value(rhs_value, q);
+          phi.submit_value(
+            right_hand_side_function.value(phi.quadrature_point(q)), q);
         }
 
-      phi.integrate(true, true);
-      phi.distribute_local_to_global(right_hand_side_copy);
+      phi.integrate_scatter(true, true, right_hand_side_copy);
     }
 
   right_hand_side_copy.compress(VectorOperation::add);
-#ifdef USE_PETSC_LA
-  AssertThrow(false,
-              ExcMessage("CopyVectorTypes::copy() not implemented for "
-                         "PETSc vector types."));
-#else
   ChangeVectorTypes::copy(right_hand_side, right_hand_side_copy);
-#endif
 }
 
 
-template <int dim>
-void LaplaceProblem<dim>::solve()
+template <int dim, int degree>
+void LaplaceProblem<dim, degree>::solve()
 {
   TimerOutput::Scope timing(computing_timer, "Solve");
 
@@ -1007,14 +877,8 @@ void LaplaceProblem<dim>::solve()
       mf_system_matrix.initialize_dof_vector(solution_copy);
       mf_system_matrix.initialize_dof_vector(right_hand_side_copy);
 
-#ifdef USE_PETSC_LA
-      AssertThrow(false,
-                  ExcMessage("CopyVectorTypes::copy() not implemented for "
-                             "PETSc vector types."));
-#else
       ChangeVectorTypes::copy(solution_copy, solution);
       ChangeVectorTypes::copy(right_hand_side_copy, right_hand_side);
-#endif
       computing_timer.leave_subsection("Solve: Preconditioner setup");
 
       // Timing 1 vcycle
@@ -1035,13 +899,7 @@ void LaplaceProblem<dim>::solve()
       }
 
       solution_copy.update_ghost_values();
-#ifdef USE_PETSC_LA
-      AssertThrow(false,
-                  ExcMessage("CopyVectorTypes::copy() not implemented for "
-                             "PETSc vector types."));
-#else
       ChangeVectorTypes::copy(solution, solution_copy);
-#endif
       constraints.distribute(solution);
     }
   else if (settings.solver == Settings::gmg_mb)
@@ -1104,7 +962,7 @@ void LaplaceProblem<dim>::solve()
 
       constraints.distribute(solution);
     }
-  else
+  else /*amg*/
     {
       computing_timer.enter_subsection("Solve: Preconditioner setup");
 
@@ -1114,10 +972,10 @@ void LaplaceProblem<dim>::solve()
 #ifdef USE_PETSC_LA
       Amg_data.symmetric_operator = true;
 #else
-      Amg_data.elliptic = true;
-      Amg_data.smoother_type = "Jacobi";
+      Amg_data.elliptic              = true;
+      Amg_data.smoother_type         = "Jacobi";
       Amg_data.higher_order_elements = true;
-      Amg_data.smoother_sweeps = settings.smoother_steps;
+      Amg_data.smoother_sweeps       = settings.smoother_steps;
       Amg_data.aggregation_threshold = 0.02;
 #endif
 
@@ -1201,8 +1059,8 @@ struct CopyData
 };
 
 
-template <int dim>
-void LaplaceProblem<dim>::estimate()
+template <int dim, int degree>
+void LaplaceProblem<dim, degree>::estimate()
 {
   TimerOutput::Scope timing(computing_timer, "Estimate");
 
@@ -1218,11 +1076,10 @@ void LaplaceProblem<dim>::estimate()
 
   using Iterator = typename DoFHandler<dim>::active_cell_iterator;
 
+  // assembler for cell residual $h^2 \| f + \epsilon \triangle u \|_K^2$
   auto cell_worker = [&](const Iterator &  cell,
                          ScratchData<dim> &scratch_data,
                          CopyData &        copy_data) {
-    /*assemble cell residual $h^2 \| f + \epsilon \triangle u \|_K^2$*/
-
     FEValues<dim> &fe_values = scratch_data.fe_values;
     fe_values.reinit(cell);
 
@@ -1246,6 +1103,8 @@ void LaplaceProblem<dim>::estimate()
     copy_data.value = std::sqrt(value);
   };
 
+  // assembler for face term $\sum_F h_F \| [ \epsilon \nabla u \cdot n ]
+  // \|_F^2$
   auto face_worker = [&](const Iterator &    cell,
                          const unsigned int &f,
                          const unsigned int &sf,
@@ -1254,8 +1113,6 @@ void LaplaceProblem<dim>::estimate()
                          const unsigned int &nsf,
                          ScratchData<dim> &  scratch_data,
                          CopyData &          copy_data) {
-    /* face term $\sum_F h_F \| [ \epsilon \nabla u \cdot n ] \|_F^2$*/
-
     FEInterfaceValues<dim> &fe_interface_values =
       scratch_data.fe_interface_values;
     fe_interface_values.reinit(cell, f, sf, ncell, nf, nsf);
@@ -1305,7 +1162,7 @@ void LaplaceProblem<dim>::estimate()
         estimate_vector[cdf.cell_indices[j]] += cdf.values[j];
   };
 
-  const unsigned int n_gauss_points = dof_handler.get_fe().degree + 1;
+  const unsigned int n_gauss_points = degree + 1;
   ScratchData<dim>   scratch_data(mapping,
                                 fe,
                                 n_gauss_points,
@@ -1330,8 +1187,8 @@ void LaplaceProblem<dim>::estimate()
 
 
 
-template <int dim>
-void LaplaceProblem<dim>::refine_grid()
+template <int dim, int degree>
+void LaplaceProblem<dim, degree>::refine_grid()
 {
   TimerOutput::Scope timing(computing_timer, "Refine grid");
 
@@ -1344,8 +1201,8 @@ void LaplaceProblem<dim>::refine_grid()
 
 
 
-template <int dim>
-void LaplaceProblem<dim>::output_results(const unsigned int cycle)
+template <int dim, int degree>
+void LaplaceProblem<dim, degree>::output_results(const unsigned int cycle)
 {
   TimerOutput::Scope timing(computing_timer, "Output results");
 
@@ -1380,8 +1237,8 @@ void LaplaceProblem<dim>::output_results(const unsigned int cycle)
 }
 
 
-template <int dim>
-void LaplaceProblem<dim>::run()
+template <int dim, int degree>
+void LaplaceProblem<dim, degree>::run()
 {
   for (unsigned int cycle = 0; cycle < settings.n_steps; ++cycle)
     {
@@ -1391,19 +1248,22 @@ void LaplaceProblem<dim>::run()
 
       pcout << "   Number of active cells:       "
             << triangulation.n_global_active_cells();
-      if (settings.solver != Settings::amg)
+      if (settings.solver == Settings::gmg_mf ||
+          settings.solver == Settings::gmg_mb)
         pcout << " (" << triangulation.n_global_levels() << " global levels)"
               << std::endl
-              << "   Workload imbalance:           "
-              << MGTools::workload_imbalance(triangulation);
+              << "   Partition efficiency:         "
+              << 1.0 / MGTools::workload_imbalance(triangulation);
       pcout << std::endl;
 
       setup_system();
-      if (settings.solver != Settings::amg)
+      if (settings.solver == Settings::gmg_mf ||
+          settings.solver == Settings::gmg_mb)
         setup_multigrid();
 
       pcout << "   Number of degrees of freedom: " << dof_handler.n_dofs();
-      if (settings.solver != Settings::amg)
+      if (settings.solver == Settings::gmg_mf ||
+          settings.solver == Settings::gmg_mb)
         {
           pcout << " (by level: ";
           for (unsigned int level = 0; level < triangulation.n_global_levels();
@@ -1415,8 +1275,8 @@ void LaplaceProblem<dim>::run()
       pcout << std::endl;
 
       if (settings.solver == Settings::gmg_mf)
-        assemble_rhs_for_matrix_free();
-      else
+        assemble_rhs();
+      else /*gmg_mb or amg*/
         {
           assemble_system();
           if (settings.solver == Settings::gmg_mb)
@@ -1448,12 +1308,12 @@ int main(int argc, char *argv[])
     {
       if (settings.dimension == 2)
         {
-          LaplaceProblem<2> test(settings);
+          LaplaceProblem<2, 2> test(settings);
           test.run();
         }
       else if (settings.dimension == 3)
         {
-          LaplaceProblem<3> test(settings);
+          LaplaceProblem<3, 2> test(settings);
           test.run();
         }
     }

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.