From 23137e280b83f3609c697fdda28b6abc437804a7 Mon Sep 17 00:00:00 2001 From: tcclevenger Date: Thu, 23 Apr 2020 17:53:42 -0400 Subject: [PATCH] Add results --- doc/doxygen/tutorial/tutorial.h.in | 2 +- examples/step-50/doc/builds-on | 2 +- examples/step-50/doc/intro.dox | 72 +++-- examples/step-50/doc/results.dox | 386 ++++++++++--------------- examples/step-50/step-50.cc | 440 ++++++++++------------------- 5 files changed, 340 insertions(+), 562 deletions(-) diff --git a/doc/doxygen/tutorial/tutorial.h.in b/doc/doxygen/tutorial/tutorial.h.in index de48ed344d..a087a46422 100644 --- a/doc/doxygen/tutorial/tutorial.h.in +++ b/doc/doxygen/tutorial/tutorial.h.in @@ -467,7 +467,7 @@ * * step-50 * Geometric multigrid on adaptive meshes distributed in parallel. - *
Keywords: Multigrid, MGLevelObject, MGConstrainedDoFs, IndexSet, MGTools, PreconditionMG, FEInterfaceValues, MeshWorker::mesh_loop() + *
Keywords: Multigrid, MGLevelObject, MGConstrainedDoFs, IndexSet, MGTools, PreconditionMG, MatrixFree, FEInterfaceValues, MeshWorker::mesh_loop() * * * diff --git a/examples/step-50/doc/builds-on b/examples/step-50/doc/builds-on index 79df0eea06..d18176116d 100644 --- a/examples/step-50/doc/builds-on +++ b/examples/step-50/doc/builds-on @@ -1 +1 @@ -step-16 step-40 +step-16 step-37 step-40 diff --git a/examples/step-50/doc/intro.dox b/examples/step-50/doc/intro.dox index eaffbcbd42..a383912a9b 100644 --- a/examples/step-50/doc/intro.dox +++ b/examples/step-50/doc/intro.dox @@ -19,12 +19,11 @@ libraries is described in the READMEIntroduction -This example shows the usage of the multilevel functions in deal.II on distributed meshes -and gives a comparison between geometric and algebraic multigrid methods. The algebraic -multigrid (AMG) preconditioner is the same used in step-40, and the geometric multigrid -(GMG) preconditioner is based on the one used in step-16. Here we discuss the -necessary changes needed for parallel computations. - +This example shows the usage of the multilevel functions in deal.II on distributed +meshes and gives a comparison between geometric and algebraic multigrid methods. +The algebraic multigrid (AMG) preconditioner is the same used in step-40. Two geometric +multigrid (GMG) preconditioners are considered: a matrix-based version similar to that +in step-16 (but for parallel computations) and a matrix-free version discussed in step-37.

The testcase

@@ -32,30 +31,31 @@ We consider the variable-coefficient Laplacian weak formulation @f{align*} (\epsilon \nabla u, \nabla v) = (f,v) \quad \forall v \in V_h @f} -on the domain $\Omega = [-1,1]^\text{dim} \setminus [0,1]^\text{dim}$ (an L-shaped domain for 2D and a Fichera corner for 3D) with -$\epsilon = 1$ if $\min(x,y,z)>-\frac{1}{2}$ and $\epsilon = 100$ otherwise. The -boundary conditions are $u=0$ on the whole boundary and the right-hand side is $f=1$. -We use continuous Q2 elements to discretize $V_h$ and use a residual-based, cell-wise a -posteriori error estimator $e(K) = e_{\text{cell}}(K) + e_{\text{face}}(K)$ from -_CITE EST PAPER_ with +on the domain $\Omega = [-1,1]^\text{dim} \setminus [0,1]^\text{dim}$ (an L-shaped domain +for 2D and a Fichera corner for 3D) with $\epsilon = 1$ if $\min(x,y,z)>-\frac{1}{2}$ and +$\epsilon = 100$ otherwise. The boundary conditions are $u=0$ on the whole boundary and +the right-hand side is $f=1$. We use continuous Q2 elements to discretize $V_h$ and use a +residual-based, cell-wise a posteriori error estimator +$e(K) = e_{\text{cell}}(K) + e_{\text{face}}(K)$ from _CITE EST PAPER_ with @f{align*} e_{\text{cell}}(K) = h^2 \| f + \epsilon \triangle u \|_K^2, \qquad e_{\text{face}}(K) = \sum_F h_F \| [ \epsilon \nabla u \cdot n ] \|_F^2. @f} -The following figure visualizes the solution and refinement for 2D - -and for 3D, the solution(left) and a slice for $x$ close to the -center of the domain showing the adaptively refined mesh (right) are depicted here +The following figure visualizes the solution and refinement for 2D: + +In 3D, the solution looks similar (see below). On the left you can see the solution and on the right we show a slice for $x$ close to the +center of the domain showing the adaptively refined mesh.
- + - +
+Both in 2D and 3D you can see the adaptive refinement picking up the corner singularity and the inner singularity where the viscosity jumps, while the interface along the line that separates the two viscosities is (correctly) not refined as it is resolved adequately.

Workload imbalance

@@ -72,21 +72,24 @@ of the active mesh, and the right image gives the multilevel hierarchy of cells. colors and numbers represent the different processors. The circular nodes in the tree are the non-active cells which are distributed using the ``first-child rule''. - + -Included among the output to screen in this example is a value ``Workload imbalance'' -given by the function MGTools::workload_imbalance(). This value, which will be denoted +Included among the output to screen in this example is a value ``Partition efficiency'' +given by 1/MGTools::workload_imbalance(). This value, which will be denoted by $\mathbb{E}$, quantifies the overhead produced by not having a perfect work balance on each level of the multigrid hierarchy (as is evident from the example above). -For defining $\mathbb{E}$, let $N_{\ell}$ be the number of cells on level $\ell$ -(both active and non-active cells) and $N_{\ell,p}$ of the subset owned by processor +For defining $\mathbb{E}$, it is important to note that, as we are using local smoothing +to define the multigrid hierarchy (see the @ref mg_paper "multigrid paper" for a description of +local smoothing), the refinement level of a cell corresponds to that cell's multigrid +level. Now, let $N_{\ell}$ be the number of cells on level $\ell$ +(both active and non-active cells) and $N_{\ell,p}$ be the subset owned by process $p$. Assuming that the workload for any one processor is proportional to the number of cells owned by that processor, the optimal workload per processor is given by @f{align*} -W_{\text{opt}}=\frac1{n_{p}}\sum_{\ell}\sum_{p}N_{\ell,p}=\frac1{n_{p}}\sum_{\ell} N_{\ell}. +W_{\text{opt}} = \sum_{\ell}\frac1{n_{p}}\sum_{p}N_{\ell,p}=\frac1{n_{p}}\sum_{\ell} N_{\ell}. @f} -Next, assuming a synchronization of work on each level (i.e., on each level of a vcycle, +Next, assuming a synchronization of work on each level (i.e., on each level of a V-cycle, work must be completed by all processors before moving on to the next level), the limiting effort on each level is given by @f{align*} @@ -109,14 +112,25 @@ W &= \sum_\ell W_\ell = 1 + 2 + 3 = 6 \\ \mathbb{E} &= \frac{W_{\text{opt}}}{W} = \frac12. @f} +The value MGTools::workload_imbalance()$= 1/\mathbb{E}$ then represents the factor increase +in timings we expect for GMG methods (vmults, assembly, etc.) due to the imbalance of the +mesh partition. _CITE MG PAPER_ contains a full discussion of the partition efficiency model -and the effect the imbalance has on the GMG vcycle timing. In summary, the value -of $\mathbb{E}$ is highly dependent on the type a mesh refinement used and has -optimal value $\mathbb{E} = 1$ for globally refined meshes. Typically for adaptively +and the effect the imbalance has on the GMG V-cycle timing. In summary, the value +of $\mathbb{E}$ is highly dependent on the degree of local mesh refinement used and has +an optimal value $\mathbb{E} \approx 1$ for globally refined meshes. Typically for adaptively refined meshes, the number of processors used to distribute a single mesh has a negative impact on $\mathbb{E}$ but only up to a leveling off point, where the imbalance remains relatively constant for an increasing number of processors, and further refinement has very little impact on $\mathbb{E}$. Finally, $1/\mathbb{E}$ was shown to give an accurate representation of the slowdown in parallel scaling expected for the timing of -a vcycle. +a V-cycle. + +It should be noted that there is potential for some asynchronous work between multigrid +levels, specifically with purely nearest neighbor MPI communication, and an adaptive mesh +could be constructed such that the efficiency model would far overestimate the V-cycle slowdown +due to the asynchronous work ``covering up'' the imbalance (which assumes synchronization over levels). +However, for most realistic adaptive meshes the expectation is that this asynchronous work will +only cover up a very small portion of the imbalance and the efficiency model will describe the +slowdown very well. diff --git a/examples/step-50/doc/results.dox b/examples/step-50/doc/results.dox index edbe4f8c48..06caf6bbec 100644 --- a/examples/step-50/doc/results.dox +++ b/examples/step-50/doc/results.dox @@ -7,22 +7,23 @@ Cycle 0: Workload imbalance: 1.14286 Number of degrees of freedom: 665 (by level: 117, 665) Number of CG iterations: 10 + Wrote solution_00.pvtu +---------------------------------------------+------------+------------+ -| Total wallclock time elapsed since start | 0.0536s | | +| Total wallclock time elapsed since start | 0.0457s | | | | | | | Section | no. calls | wall time | % of total | +---------------------------------+-----------+------------+------------+ -| Assemble | 1 | 0.0026s | 4.8% | -| Assemble multigrid | 1 | 0.00303s | 5.6% | -| Estimate | 1 | 0.0273s | 51% | -| Setup | 1 | 0.00477s | 8.9% | -| Setup multigrid | 1 | 0.00539s | 10% | -| Solve | 1 | 0.00801s | 15% | -| Solve: 1 GMG vcycle | 1 | 0.000655s | 1.2% | -| Solve: CG | 1 | 0.00472s | 8.8% | -| Solve: GMG preconditioner setup | 1 | 0.00232s | 4.3% | +| Assemble right hand side | 1 | 0.000241s | 0.53% | +| Estimate | 1 | 0.0288s | 63% | +| Output results | 1 | 0.00219s | 4.8% | +| Setup | 1 | 0.00264s | 5.8% | +| Setup multigrid | 1 | 0.00261s | 5.7% | +| Solve | 1 | 0.00355s | 7.8% | +| Solve: 1 multigrid vcycle | 1 | 0.000315s | 0.69% | +| Solve: CG | 1 | 0.00186s | 4.1% | +| Solve: Preconditioner setup | 1 | 0.000968s | 2.1% | +---------------------------------+-----------+------------+------------+ Cycle 1: @@ -30,23 +31,24 @@ Cycle 1: Workload imbalance: 1.17483 Number of degrees of freedom: 1672 (by level: 117, 665, 1100) Number of CG iterations: 11 + Wrote solution_01.pvtu +---------------------------------------------+------------+------------+ -| Total wallclock time elapsed since start | 0.0861s | | +| Total wallclock time elapsed since start | 0.0433s | | | | | | | Section | no. calls | wall time | % of total | +---------------------------------+-----------+------------+------------+ -| Assemble | 1 | 0.00578s | 6.7% | -| Assemble multigrid | 1 | 0.00745s | 8.7% | -| Estimate | 1 | 0.0281s | 33% | -| Refine grid | 1 | 0.00992s | 12% | -| Setup | 1 | 0.00878s | 10% | -| Setup multigrid | 1 | 0.0115s | 13% | -| Solve | 1 | 0.0144s | 17% | -| Solve: 1 GMG vcycle | 1 | 0.000868s | 1% | -| Solve: CG | 1 | 0.00879s | 10% | -| Solve: GMG preconditioner setup | 1 | 0.00414s | 4.8% | +| Assemble right hand side | 1 | 0.000286s | 0.66% | +| Estimate | 1 | 0.0272s | 63% | +| Output results | 1 | 0.00333s | 7.7% | +| Refine grid | 1 | 0.00196s | 4.5% | +| Setup | 1 | 0.0023s | 5.3% | +| Setup multigrid | 1 | 0.00262s | 6% | +| Solve | 1 | 0.00549s | 13% | +| Solve: 1 multigrid vcycle | 1 | 0.000343s | 0.79% | +| Solve: CG | 1 | 0.00293s | 6.8% | +| Solve: Preconditioner setup | 1 | 0.00174s | 4% | +---------------------------------+-----------+------------+------------+ Cycle 2: @@ -54,258 +56,160 @@ Cycle 2: . . @endcode -Here, the timing of the `solve()` function is spilt up in 3 parts: setting +Here, the timing of the `solve()` function is split up in 3 parts: setting up the multigrid preconditioner, execution of a single multigrid vcycle, and the CG solver. The vcycle that is timed is unnecessary for the overall solve and only meant to give an insight at the different costs for AMG and GMG. Also it should be noted that when using the AMG solver, ``Workload imbalance'' -is not included in the output since the hierarchy of coarse meshes are not +is not included in the output since the hierarchy of coarse meshes is not required. -In addition to the AMG and GMG solvers in this tutorial, included will be timings -from a 3rd matrix-free (MF) GMG solver on the same problem (see possible extensions -for a discussion on what is required for the matrix-free solver). We will refer to -the GMG solver in tutorial as the matrix-based (MB) GMG solver. +All results in this section are gathered on Intel Xeon Platinum 8280 (Cascade +Lake) nodes which have 56 cores and 192GB per node and support AVX-512 instructions, +allowing for vectorization over 8 doubles (vectorization used only in the matrix-free +computations). The code is compiled using gcc 7.1.0 with intel-mpi 17.0.3. Trilinos +12.10.1 is used for the matrix-based GMG/AMG computations. + +The following table gives weak scale timings for this program on up to 256M DoFs +and 7168 processors. Here, $\mathbb{E}$ is the partition efficiency from the + introduction (also equal to 1.0/workload imbalance), ``Setup'' is a combination +of setup, setup multigrid, assemble, and assemble multigrid from the timing blocks, +and ``Prec'' is the preconditioner setup. Ideally all times would stay constant +over each problem size for the individual solvers, but since the partition +efficiency decreases from 0.371 to 0.161 from largest to smallest problem size, +we expect to see an approximately $0.371/0.161=2.3$ times increase in timings +for GMG. -The following table gives the timings for setup, assembly, and solve for GMG and AMG -on up to 256M DoFs and 7168 processors. + - - - - + - - - - - - - - - - + - + + + + - - + + + + - - - - - + + + + - + + + - - - - - - - - - - - - - -
ProcsCycleDoFsImbalanceMF-GMG SetupSetup GMGAssembleAssemble GMGSolve
MF-GMG112 - 13 - 4M - 0.37 - - - - - - -
MB-GMG 448 - 15 - 16M - 0.29 - - - - - - + AMG
ProcsCycleDoFs$\mathbb{E}$ 1792 - 17 - 65M - 0.22 - - - - - - -
SetupPrecSolveTotal 7168 - 19 - 256M - 0.16 - - - - - - -
MB-GMG112 - 13 - 4M - 0.37 - - - - - - -
SetupPrecSolveTotal 448 - 15 - 16M - 0.29 - - - - - - + SetupPrecSolveTotal
1792 - 17 - 65M - 0.22 - - - - - - + 112 + 13 + 4M + 0.37 + + 0.742 + 0.393 + 0.200 + 1.335 + + 1.714 + 2.934 + 0.716 + 5.364 + + 1.544 + 0.456 + 1.150 + 3.150
7168 - 19 - 256M - 0.16 - - - - - - + 448 + 15 + 16M + 0.29 + + 0.884 + 0.535 + 0.253 + 1.672 + + 1.927 + 3.776 + 1.190 + 6.893 + + 1.544 + 0.456 + 1.150 + 3.150
AMG112 - 13 - 4M - - - - - - - - + 1792 + 17 + 65M + 0.22 + + 1.122 + 0.686 + 0.309 + 2.117 + + 2.171 + 4.862 + 1.660 + 8.693 + + 1.654 + 0.546 + 1.460 + 3.660
448 - 15 - 16M - - - - - - - - -
1792 - 17 - 65M - - - - - - - - -
7168 - 19 - 256M - - - - - - - - + 7168 + 19 + 256M + 0.16 + + 1.214 + 0.893 + 0.521 + 2.628 + + 2.386 + 7.260 + 2.560 + 12.206 + + 1.844 + 1.010 + 1.890 + 4.744
-The following figure gives the strong scaling for each method for cycle 16 (32M DoFs) -and 19 (256M DoFs) on between 56 to 28672 processors. - - - -

Possible extensions

- -

Add matrix-free GMG preconditioner

-The results above include timings from a matrix-free GMG preconditioner -which is not currently a part of this tutorial. See step-37 for an example -of such a preconditioner for the Laplace equation. - -It should be noted that the MatrixFree class is only compatible with the -dealii::LinearAlgebra::distributed::Vector class, while this tutorial uses either -PETSc or Trilinos vectors. It may be of use to define functions which copy between -two types of vectors, for example, for Trilinos vectors one could use the following: -@code -namespace ChangeVectorTypes -{ - void import(TrilinosWrappers::MPI::Vector & out, - const dealii::LinearAlgebra::ReadWriteVector &rwv, - const VectorOperation::values operation) - { - Assert(out.size() == rwv.size(), - ExcMessage( - "Both vectors need to have the same size for import() to work!")); +The following figure gives the strong scaling for each method for cycle 16 +(32M DoFs) and 19 (256M DoFs) on between 56 to 28672 processors. While the +matrix-based GMG solver and AMG scale similarly and have a similar time to +solution, the matrix-free GMG solver scales much better and solves the finer +problem in roughly the same time as the AMG solver for the coarser mesh with +only an eighth of the number of unknowns. - Assert(out.locally_owned_elements() == rwv.get_stored_elements(), - ExcNotImplemented()); + - if (operation == VectorOperation::insert) - { - for (const auto idx : out.locally_owned_elements()) - out[idx] = rwv[idx]; - } - else if (operation == VectorOperation::add) - { - for (const auto idx : out.locally_owned_elements()) - out[idx] += rwv[idx]; - } - else - AssertThrow(false, ExcNotImplemented()); - out.compress(operation); - } +

Possible extensions

+We currently don't have suggestions for possible extensions. - void copy(TrilinosWrappers::MPI::Vector & out, - const dealii::LinearAlgebra::distributed::Vector &in) - { - dealii::LinearAlgebra::ReadWriteVector rwv( - out.locally_owned_elements()); - rwv.import(in, VectorOperation::insert); - // This import function doesn't exist until after dealii 9.0 - // Implemented above - import(out, rwv, VectorOperation::insert); - } - void copy(dealii::LinearAlgebra::distributed::Vector &out, - const TrilinosWrappers::MPI::Vector & in) - { - dealii::LinearAlgebra::ReadWriteVector rwv; - rwv.reinit(in); - out.import(rwv, VectorOperation::insert); - } -} -@endcode diff --git a/examples/step-50/step-50.cc b/examples/step-50/step-50.cc index 10bf160960..66f71cba52 100644 --- a/examples/step-50/step-50.cc +++ b/examples/step-50/step-50.cc @@ -17,7 +17,7 @@ * Author: Thomas C. Clevenger, Clemson University * Timo Heister, Clemson University * Guido Kanschat, Heidelberg University - * Martin Kronbichler, TU Munich + * Martin Kronbichler, Technical University of Munich */ #include @@ -64,8 +64,8 @@ #include -// uncomment the following #define if you have PETSc and Trilinos installed -// and you prefer using Trilinos in this example: +// Comment the following \#define if you have PETSc and Trilinos installed +// and you prefer using PETSc in this example: #define FORCE_USE_OF_TRILINOS namespace LA @@ -85,11 +85,6 @@ namespace LA using namespace dealii; - -#ifdef USE_PETSC_LA -// No ChangeVectorTypes::copy() for PETSc vector types. -// Vector::import() needs to be implemented. -#else /** * Matrix-free operators must use deal.II defined vectors, rest of the code is * based on Trilinos vectors. @@ -97,25 +92,37 @@ using namespace dealii; namespace ChangeVectorTypes { template - void copy(TrilinosWrappers::MPI::Vector &out, + void copy(LA::MPI::Vector & out, const dealii::LinearAlgebra::distributed::Vector &in) { dealii::LinearAlgebra::ReadWriteVector rwv( out.locally_owned_elements()); rwv.import(in, VectorOperation::insert); +#ifdef USE_PETSC_LA + AssertThrow(false, + ExcMessage("CopyVectorTypes::copy() not implemented for " + "PETSc vector types.")); +#else out.import(rwv, VectorOperation::insert); +#endif } template void copy(dealii::LinearAlgebra::distributed::Vector &out, - const TrilinosWrappers::MPI::Vector &in) + const LA::MPI::Vector & in) { dealii::LinearAlgebra::ReadWriteVector rwv; +#ifdef USE_PETSC_LA + (void)in; + AssertThrow(false, + ExcMessage("CopyVectorTypes::copy() not implemented for " + "PETSc vector types.")); +#else rwv.reinit(in); +#endif out.import(rwv, VectorOperation::insert); } } // namespace ChangeVectorTypes -#endif @@ -128,6 +135,14 @@ public: { return 1.0; } + + template + VectorizedArray + value(const Point> & /*p*/, + const unsigned int /*component*/ = 0) const + { + return VectorizedArray(1.0); + } }; @@ -136,12 +151,19 @@ template class Coefficient : public Function { public: - virtual double value(const Point & p, - const unsigned int component = 0) const override; + virtual double value(const Point &p, + const unsigned int /*component*/ = 0) const override; template VectorizedArray value(const Point> &p, - const unsigned int component = 0) const; + const unsigned int /*component*/ = 0) const; + + template + number average_value(const std::vector> &points) const; + + template + std::shared_ptr>> create_coefficient_table( + const MatrixFree> &mf_storage) const; }; @@ -183,196 +205,49 @@ Coefficient::value(const Point> &p, } - -void average(std::vector &values) +template +template +number Coefficient::average_value( + const std::vector> &points) const { - double sum = 0.0; - for (unsigned int i = 0; i < values.size(); ++i) - sum += values[i]; - sum /= values.size(); + number average(0); + for (unsigned int i = 0; i < points.size(); ++i) + average += value(points[i]); + average /= points.size(); - for (unsigned int i = 0; i < values.size(); ++i) - values[i] = sum; + return average; } -/** - * Matrix-free Laplace operator - */ -template -class LaplaceOperator - : public MatrixFreeOperators::Base> -{ -public: - LaplaceOperator(); - - void clear() override; - - void evaluate_coefficient(const Coefficient &coefficient_function); - Table<1, VectorizedArray> get_coefficient_table(); - - virtual void compute_diagonal() override; - -private: - virtual void apply_add( - LinearAlgebra::distributed::Vector & dst, - const LinearAlgebra::distributed::Vector &src) const override; - - void - local_apply(const MatrixFree & data, - LinearAlgebra::distributed::Vector & dst, - const LinearAlgebra::distributed::Vector &src, - const std::pair &cell_range) const; - - void local_compute_diagonal( - const MatrixFree & data, - LinearAlgebra::distributed::Vector & dst, - const unsigned int & dummy, - const std::pair &cell_range) const; - - Table<1, VectorizedArray> coefficient; -}; - - -template -LaplaceOperator::LaplaceOperator() - : MatrixFreeOperators::Base>() -{} - - -template -void LaplaceOperator::clear() +template +template +std::shared_ptr>> +Coefficient::create_coefficient_table( + const MatrixFree> &mf_storage) const { - coefficient.reinit(TableIndices<1>(0)); - MatrixFreeOperators::Base>:: - clear(); -} + std::shared_ptr>> coefficient_table; + coefficient_table = std::make_shared>>(); + FEEvaluation fe_eval(mf_storage); -template -void LaplaceOperator::evaluate_coefficient( - const Coefficient &coefficient_function) -{ - const unsigned int n_cells = this->data->n_macro_cells(); - FEEvaluation phi(*this->data); + const unsigned int n_cells = mf_storage.n_macro_cells(); + const unsigned int n_q_points = fe_eval.n_q_points; - coefficient.reinit(TableIndices<1>(n_cells)); + coefficient_table->reinit(n_cells, 1); for (unsigned int cell = 0; cell < n_cells; ++cell) { - phi.reinit(cell); - - VectorizedArray averaged_value(0); - for (unsigned int q = 0; q < phi.n_q_points; ++q) - averaged_value += coefficient_function.value(phi.quadrature_point(q)); - averaged_value /= phi.n_q_points; - - coefficient(cell) = averaged_value; - } -} - - -template -Table<1, VectorizedArray> -LaplaceOperator::get_coefficient_table() -{ - return coefficient; -} - - -template -void LaplaceOperator::local_apply( - const MatrixFree & data, - LinearAlgebra::distributed::Vector & dst, - const LinearAlgebra::distributed::Vector &src, - const std::pair & cell_range) const -{ - FEEvaluation phi(data); - - for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell) - { - AssertDimension(coefficient.size(0), data.n_macro_cells()); - - phi.reinit(cell); - phi.read_dof_values(src); - phi.evaluate(false, true); - for (unsigned int q = 0; q < phi.n_q_points; ++q) - phi.submit_gradient(coefficient(cell) * phi.get_gradient(q), q); - phi.integrate(false, true); - phi.distribute_local_to_global(dst); - } -} - - -template -void LaplaceOperator::apply_add( - LinearAlgebra::distributed::Vector & dst, - const LinearAlgebra::distributed::Vector &src) const -{ - this->data->cell_loop(&LaplaceOperator::local_apply, this, dst, src); -} + fe_eval.reinit(cell); + std::vector>> points(n_q_points); + for (unsigned int q = 0; q < n_q_points; ++q) + points[q] = fe_eval.quadrature_point(q); + VectorizedArray averaged_value = average_value(points); -template -void LaplaceOperator::compute_diagonal() -{ - this->inverse_diagonal_entries.reset( - new DiagonalMatrix>()); - LinearAlgebra::distributed::Vector &inverse_diagonal = - this->inverse_diagonal_entries->get_vector(); - this->data->initialize_dof_vector(inverse_diagonal); - unsigned int dummy = 0; - this->data->cell_loop(&LaplaceOperator::local_compute_diagonal, - this, - inverse_diagonal, - dummy); - - this->set_constrained_entries_to_one(inverse_diagonal); - - for (unsigned int i = 0; i < inverse_diagonal.local_size(); ++i) - { - Assert(inverse_diagonal.local_element(i) > 0., - ExcMessage("No diagonal entry in a positive definite operator " - "should be zero")); - inverse_diagonal.local_element(i) = - 1. / inverse_diagonal.local_element(i); + (*coefficient_table)(cell, 0) = averaged_value; } -} - - -template -void LaplaceOperator::local_compute_diagonal( - const MatrixFree & data, - LinearAlgebra::distributed::Vector &dst, - const unsigned int &, - const std::pair &cell_range) const -{ - FEEvaluation phi(data); - - AlignedVector> diagonal(phi.dofs_per_cell); - - for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell) - { - AssertDimension(coefficient.size(0), data.n_macro_cells()); - phi.reinit(cell); - for (unsigned int i = 0; i < phi.dofs_per_cell; ++i) - { - for (unsigned int j = 0; j < phi.dofs_per_cell; ++j) - phi.submit_dof_value(VectorizedArray(), j); - phi.submit_dof_value(make_vectorized_array(1.), i); - - phi.evaluate(false, true); - for (unsigned int q = 0; q < phi.n_q_points; ++q) - phi.submit_gradient(coefficient(cell) * phi.get_gradient(q), q); - phi.integrate(false, true); - diagonal[i] = phi.get_dof_value(i); - } - for (unsigned int i = 0; i < phi.dofs_per_cell; ++i) - phi.submit_dof_value(diagonal[i], i); - phi.distribute_local_to_global(dst); - } + return coefficient_table; } @@ -395,7 +270,7 @@ struct Settings bool output; }; -template +template class LaplaceProblem { using MatrixType = LA::MPI::SparseMatrix; @@ -403,8 +278,19 @@ class LaplaceProblem using PreconditionAMG = LA::MPI::PreconditionAMG; using PreconditionJacobi = LA::MPI::PreconditionJacobi; - using MatrixFreeLevelMatrix = LaplaceOperator; - using MatrixFreeActiveMatrix = LaplaceOperator; + using MatrixFreeLevelMatrix = MatrixFreeOperators::LaplaceOperator< + dim, + degree, + degree + 1, + 1, + LinearAlgebra::distributed::Vector>; + using MatrixFreeActiveMatrix = MatrixFreeOperators::LaplaceOperator< + dim, + degree, + degree + 1, + 1, + LinearAlgebra::distributed::Vector>; + using MatrixFreeLevelVector = LinearAlgebra::distributed::Vector; using MatrixFreeActiveVector = LinearAlgebra::distributed::Vector; @@ -417,7 +303,7 @@ private: void setup_multigrid(); void assemble_system(); void assemble_multigrid(); - void assemble_rhs_for_matrix_free(); + void assemble_rhs(); void solve(); void estimate(); void refine_grid(); @@ -454,8 +340,8 @@ private: }; -template -LaplaceProblem::LaplaceProblem(const Settings &settings) +template +LaplaceProblem::LaplaceProblem(const Settings &settings) : settings(settings) , mpi_communicator(MPI_COMM_WORLD) , pcout(std::cout, (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)) @@ -466,7 +352,7 @@ LaplaceProblem::LaplaceProblem(const Settings &settings) parallel::distributed::Triangulation< dim>::construct_multigrid_hierarchy) , mapping() - , fe(2) + , fe(degree) , dof_handler(triangulation) , computing_timer(pcout, TimerOutput::never, TimerOutput::wall_times) { @@ -540,8 +426,8 @@ bool Settings::try_parse(const std::string &prm_filename) } -template -void LaplaceProblem::setup_system() +template +void LaplaceProblem::setup_system() { TimerOutput::Scope timing(computing_timer, "Setup"); @@ -559,8 +445,7 @@ void LaplaceProblem::setup_system() mapping, dof_handler, 0, Functions::ZeroFunction(), constraints); constraints.close(); - - if (settings.solver = Settings::gmg_mf) + if (settings.solver == Settings::gmg_mf) { typename MatrixFree::AdditionalData additional_data; additional_data.tasks_parallel_scheme = @@ -571,12 +456,16 @@ void LaplaceProblem::setup_system() new MatrixFree()); mf_storage->reinit(dof_handler, constraints, - QGauss<1>(fe.degree + 1), + QGauss<1>(degree + 1), additional_data); + mf_system_matrix.initialize(mf_storage); - mf_system_matrix.evaluate_coefficient(Coefficient()); + + const Coefficient coefficient; + mf_system_matrix.set_coefficient( + coefficient.create_coefficient_table(*mf_storage)); } - else + else /*gmg_mb or amg*/ { #ifdef USE_PETSC_LA DynamicSparsityPattern dsp(locally_relevant_set); @@ -604,8 +493,9 @@ void LaplaceProblem::setup_system() } -template -void LaplaceProblem::setup_multigrid() + +template +void LaplaceProblem::setup_multigrid() { TimerOutput::Scope timing(computing_timer, "Setup multigrid"); @@ -618,7 +508,7 @@ void LaplaceProblem::setup_multigrid() mg_constrained_dofs.make_zero_boundary_constraints(dof_handler, bset); const unsigned int n_levels = triangulation.n_global_levels(); - if (settings.solver = Settings::gmg_mf) + if (settings.solver == Settings::gmg_mf) { mf_mg_matrix.resize(0, n_levels - 1); @@ -644,18 +534,21 @@ void LaplaceProblem::setup_multigrid() new MatrixFree()); mf_storage_level->reinit(dof_handler, level_constraints, - QGauss<1>(fe.degree + 1), + QGauss<1>(degree + 1), additional_data); mf_mg_matrix[level].initialize(mf_storage_level, mg_constrained_dofs, level); - mf_mg_matrix[level].evaluate_coefficient(Coefficient()); + const Coefficient coefficient; + mf_mg_matrix[level].set_coefficient( + coefficient.create_coefficient_table(*mf_storage_level)); + mf_mg_matrix[level].compute_diagonal(); } } - else + else /*gmg_mb*/ { mg_matrix.resize(0, n_levels - 1); mg_matrix.clear_elements(); @@ -736,12 +629,12 @@ void LaplaceProblem::setup_multigrid() } -template -void LaplaceProblem::assemble_system() +template +void LaplaceProblem::assemble_system() { TimerOutput::Scope timing(computing_timer, "Assemble"); - const QGauss quadrature_formula(fe.degree + 1); + const QGauss quadrature_formula(degree + 1); FEValues fe_values(fe, quadrature_formula, @@ -757,7 +650,6 @@ void LaplaceProblem::assemble_system() std::vector local_dof_indices(dofs_per_cell); const Coefficient coefficient; - std::vector coefficient_values(n_q_points); RightHandSide rhs; std::vector rhs_values(n_q_points); @@ -769,11 +661,8 @@ void LaplaceProblem::assemble_system() fe_values.reinit(cell); - coefficient.value_list(fe_values.get_quadrature_points(), - coefficient_values); - average(coefficient_values); - const double coefficient_value = coefficient_values[0]; - + const double coefficient_value = + coefficient.average_value(fe_values.get_quadrature_points()); rhs.value_list(fe_values.get_quadrature_points(), rhs_values); for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) @@ -803,12 +692,12 @@ void LaplaceProblem::assemble_system() } -template -void LaplaceProblem::assemble_multigrid() +template +void LaplaceProblem::assemble_multigrid() { TimerOutput::Scope timing(computing_timer, "Assemble multigrid"); - QGauss quadrature_formula(1 + fe.degree); + QGauss quadrature_formula(degree + 1); FEValues fe_values(fe, quadrature_formula, @@ -823,7 +712,6 @@ void LaplaceProblem::assemble_multigrid() std::vector local_dof_indices(dofs_per_cell); const Coefficient coefficient; - std::vector coefficient_values(n_q_points); std::vector> boundary_constraints( triangulation.n_global_levels()); @@ -846,10 +734,8 @@ void LaplaceProblem::assemble_multigrid() cell_matrix = 0; fe_values.reinit(cell); - coefficient.value_list(fe_values.get_quadrature_points(), - coefficient_values); - average(coefficient_values); - const double coefficient_value = coefficient_values[0]; + const double coefficient_value = + coefficient.average_value(fe_values.get_quadrature_points()); for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) for (unsigned int i = 0; i < dofs_per_cell; ++i) @@ -881,10 +767,10 @@ void LaplaceProblem::assemble_multigrid() } -template -void LaplaceProblem::assemble_rhs_for_matrix_free() +template +void LaplaceProblem::assemble_rhs() { - TimerOutput::Scope timing(computing_timer, "Assemble right hand side"); + TimerOutput::Scope timing(computing_timer, "Assemble right-hand side"); MatrixFreeActiveVector solution_copy; MatrixFreeActiveVector right_hand_side_copy; @@ -895,8 +781,8 @@ void LaplaceProblem::assemble_rhs_for_matrix_free() constraints.distribute(solution_copy); solution_copy.update_ghost_values(); right_hand_side_copy = 0; - const Table<1, VectorizedArray> coefficient_table = - mf_system_matrix.get_coefficient_table(); + const Table<2, VectorizedArray> &coefficient = + *(mf_system_matrix.get_coefficient()); RightHandSide right_hand_side_function; @@ -914,40 +800,24 @@ void LaplaceProblem::assemble_rhs_for_matrix_free() { // Submit gradient phi.submit_gradient(-1.0 * - (coefficient_table(cell) * phi.get_gradient(q)), + (coefficient(cell, 0) * phi.get_gradient(q)), q); // Submit RHS value - VectorizedArray rhs_value = - make_vectorized_array(1.0); - for (unsigned int i = 0; i < VectorizedArray::size(); ++i) - { - Point p; - for (unsigned int d = 0; d < dim; ++d) - p(d) = phi.quadrature_point(q)(d)[i]; - - rhs_value[i] = right_hand_side_function.value(p); - } - phi.submit_value(rhs_value, q); + phi.submit_value( + right_hand_side_function.value(phi.quadrature_point(q)), q); } - phi.integrate(true, true); - phi.distribute_local_to_global(right_hand_side_copy); + phi.integrate_scatter(true, true, right_hand_side_copy); } right_hand_side_copy.compress(VectorOperation::add); -#ifdef USE_PETSC_LA - AssertThrow(false, - ExcMessage("CopyVectorTypes::copy() not implemented for " - "PETSc vector types.")); -#else ChangeVectorTypes::copy(right_hand_side, right_hand_side_copy); -#endif } -template -void LaplaceProblem::solve() +template +void LaplaceProblem::solve() { TimerOutput::Scope timing(computing_timer, "Solve"); @@ -1007,14 +877,8 @@ void LaplaceProblem::solve() mf_system_matrix.initialize_dof_vector(solution_copy); mf_system_matrix.initialize_dof_vector(right_hand_side_copy); -#ifdef USE_PETSC_LA - AssertThrow(false, - ExcMessage("CopyVectorTypes::copy() not implemented for " - "PETSc vector types.")); -#else ChangeVectorTypes::copy(solution_copy, solution); ChangeVectorTypes::copy(right_hand_side_copy, right_hand_side); -#endif computing_timer.leave_subsection("Solve: Preconditioner setup"); // Timing 1 vcycle @@ -1035,13 +899,7 @@ void LaplaceProblem::solve() } solution_copy.update_ghost_values(); -#ifdef USE_PETSC_LA - AssertThrow(false, - ExcMessage("CopyVectorTypes::copy() not implemented for " - "PETSc vector types.")); -#else ChangeVectorTypes::copy(solution, solution_copy); -#endif constraints.distribute(solution); } else if (settings.solver == Settings::gmg_mb) @@ -1104,7 +962,7 @@ void LaplaceProblem::solve() constraints.distribute(solution); } - else + else /*amg*/ { computing_timer.enter_subsection("Solve: Preconditioner setup"); @@ -1114,10 +972,10 @@ void LaplaceProblem::solve() #ifdef USE_PETSC_LA Amg_data.symmetric_operator = true; #else - Amg_data.elliptic = true; - Amg_data.smoother_type = "Jacobi"; + Amg_data.elliptic = true; + Amg_data.smoother_type = "Jacobi"; Amg_data.higher_order_elements = true; - Amg_data.smoother_sweeps = settings.smoother_steps; + Amg_data.smoother_sweeps = settings.smoother_steps; Amg_data.aggregation_threshold = 0.02; #endif @@ -1201,8 +1059,8 @@ struct CopyData }; -template -void LaplaceProblem::estimate() +template +void LaplaceProblem::estimate() { TimerOutput::Scope timing(computing_timer, "Estimate"); @@ -1218,11 +1076,10 @@ void LaplaceProblem::estimate() using Iterator = typename DoFHandler::active_cell_iterator; + // assembler for cell residual $h^2 \| f + \epsilon \triangle u \|_K^2$ auto cell_worker = [&](const Iterator & cell, ScratchData &scratch_data, CopyData & copy_data) { - /*assemble cell residual $h^2 \| f + \epsilon \triangle u \|_K^2$*/ - FEValues &fe_values = scratch_data.fe_values; fe_values.reinit(cell); @@ -1246,6 +1103,8 @@ void LaplaceProblem::estimate() copy_data.value = std::sqrt(value); }; + // assembler for face term $\sum_F h_F \| [ \epsilon \nabla u \cdot n ] + // \|_F^2$ auto face_worker = [&](const Iterator & cell, const unsigned int &f, const unsigned int &sf, @@ -1254,8 +1113,6 @@ void LaplaceProblem::estimate() const unsigned int &nsf, ScratchData & scratch_data, CopyData & copy_data) { - /* face term $\sum_F h_F \| [ \epsilon \nabla u \cdot n ] \|_F^2$*/ - FEInterfaceValues &fe_interface_values = scratch_data.fe_interface_values; fe_interface_values.reinit(cell, f, sf, ncell, nf, nsf); @@ -1305,7 +1162,7 @@ void LaplaceProblem::estimate() estimate_vector[cdf.cell_indices[j]] += cdf.values[j]; }; - const unsigned int n_gauss_points = dof_handler.get_fe().degree + 1; + const unsigned int n_gauss_points = degree + 1; ScratchData scratch_data(mapping, fe, n_gauss_points, @@ -1330,8 +1187,8 @@ void LaplaceProblem::estimate() -template -void LaplaceProblem::refine_grid() +template +void LaplaceProblem::refine_grid() { TimerOutput::Scope timing(computing_timer, "Refine grid"); @@ -1344,8 +1201,8 @@ void LaplaceProblem::refine_grid() -template -void LaplaceProblem::output_results(const unsigned int cycle) +template +void LaplaceProblem::output_results(const unsigned int cycle) { TimerOutput::Scope timing(computing_timer, "Output results"); @@ -1380,8 +1237,8 @@ void LaplaceProblem::output_results(const unsigned int cycle) } -template -void LaplaceProblem::run() +template +void LaplaceProblem::run() { for (unsigned int cycle = 0; cycle < settings.n_steps; ++cycle) { @@ -1391,19 +1248,22 @@ void LaplaceProblem::run() pcout << " Number of active cells: " << triangulation.n_global_active_cells(); - if (settings.solver != Settings::amg) + if (settings.solver == Settings::gmg_mf || + settings.solver == Settings::gmg_mb) pcout << " (" << triangulation.n_global_levels() << " global levels)" << std::endl - << " Workload imbalance: " - << MGTools::workload_imbalance(triangulation); + << " Partition efficiency: " + << 1.0 / MGTools::workload_imbalance(triangulation); pcout << std::endl; setup_system(); - if (settings.solver != Settings::amg) + if (settings.solver == Settings::gmg_mf || + settings.solver == Settings::gmg_mb) setup_multigrid(); pcout << " Number of degrees of freedom: " << dof_handler.n_dofs(); - if (settings.solver != Settings::amg) + if (settings.solver == Settings::gmg_mf || + settings.solver == Settings::gmg_mb) { pcout << " (by level: "; for (unsigned int level = 0; level < triangulation.n_global_levels(); @@ -1415,8 +1275,8 @@ void LaplaceProblem::run() pcout << std::endl; if (settings.solver == Settings::gmg_mf) - assemble_rhs_for_matrix_free(); - else + assemble_rhs(); + else /*gmg_mb or amg*/ { assemble_system(); if (settings.solver == Settings::gmg_mb) @@ -1448,12 +1308,12 @@ int main(int argc, char *argv[]) { if (settings.dimension == 2) { - LaplaceProblem<2> test(settings); + LaplaceProblem<2, 2> test(settings); test.run(); } else if (settings.dimension == 3) { - LaplaceProblem<3> test(settings); + LaplaceProblem<3, 2> test(settings); test.run(); } } -- 2.39.5