From 11cd762e009815d3c543ff768a8ced10a45cc412 Mon Sep 17 00:00:00 2001 From: Timo Heister Date: Fri, 17 Jun 2016 13:32:05 -0700 Subject: [PATCH] add step-55 --- examples/step-55/CMakeLists.txt | 55 ++ examples/step-55/doc/builds-on | 1 + examples/step-55/doc/intro.dox | 167 ++++++ examples/step-55/doc/kind | 1 + examples/step-55/doc/results.dox | 237 +++++++++ examples/step-55/doc/tooltip | 1 + examples/step-55/reference.py | 37 ++ examples/step-55/step-55.cc | 874 +++++++++++++++++++++++++++++++ 8 files changed, 1373 insertions(+) create mode 100644 examples/step-55/CMakeLists.txt create mode 100644 examples/step-55/doc/builds-on create mode 100644 examples/step-55/doc/intro.dox create mode 100644 examples/step-55/doc/kind create mode 100644 examples/step-55/doc/results.dox create mode 100644 examples/step-55/doc/tooltip create mode 100644 examples/step-55/reference.py create mode 100644 examples/step-55/step-55.cc diff --git a/examples/step-55/CMakeLists.txt b/examples/step-55/CMakeLists.txt new file mode 100644 index 0000000000..17744c4f4e --- /dev/null +++ b/examples/step-55/CMakeLists.txt @@ -0,0 +1,55 @@ +## +# CMake script +## + +# Set the name of the project and target: +SET(TARGET "step-55") + +# Declare all source files the target consists of. Here, this is only +# the one step-X.cc file, but as you expand your project you may wish +# to add other source files as well. If your project becomes much larger, +# you may want to either replace the following statement by something like +# FILE(GLOB_RECURSE TARGET_SRC "source/*.cc") +# FILE(GLOB_RECURSE TARGET_INC "include/*.h") +# SET(TARGET_SRC ${TARGET_SRC} ${TARGET_INC}) +# or switch altogether to the large project CMakeLists.txt file discussed +# in the "CMake in user projects" page accessible from the "User info" +# page of the documentation. +SET(TARGET_SRC + ${TARGET}.cc + ) + +# Usually, you will not need to modify anything beyond this point... + +CMAKE_MINIMUM_REQUIRED(VERSION 2.8.8) + +FIND_PACKAGE(deal.II 8.5.0 QUIET + HINTS ${deal.II_DIR} ${DEAL_II_DIR} ../ ../../ $ENV{DEAL_II_DIR} + ) +IF(NOT ${deal.II_FOUND}) + MESSAGE(FATAL_ERROR "\n" + "*** Could not locate a (sufficiently recent) version of deal.II. ***\n\n" + "You may want to either pass a flag -DDEAL_II_DIR=/path/to/deal.II to cmake\n" + "or set an environment variable \"DEAL_II_DIR\" that contains this path." + ) +ENDIF() + +# +# Are all dependencies fulfilled? +# +IF(NOT (DEAL_II_WITH_PETSC OR DEAL_II_WITH_TRILINOS) OR NOT DEAL_II_WITH_P4EST) + MESSAGE(FATAL_ERROR " +Error! The deal.II library found at ${DEAL_II_PATH} was not configured with + DEAL_II_WITH_PETSC = ON + DEAL_II_WITH_P4EST = ON +or + DEAL_II_WITH_TRILINOS = ON + DEAL_II_WITH_P4EST = ON +One or both of these combinations are OFF in your installation but at least one is required for this tutorial step." + ) +ENDIF() + +DEAL_II_INITIALIZE_CACHED_VARIABLES() +SET(CLEAN_UP_FILES *.log *.gmv *.gnuplot *.gpl *.eps *.pov *.vtk *.ucd *.d2 *.vtu *.pvtu) +PROJECT(${TARGET}) +DEAL_II_INVOKE_AUTOPILOT() diff --git a/examples/step-55/doc/builds-on b/examples/step-55/doc/builds-on new file mode 100644 index 0000000000..d641cd82dd --- /dev/null +++ b/examples/step-55/doc/builds-on @@ -0,0 +1 @@ +step-40 step-22 diff --git a/examples/step-55/doc/intro.dox b/examples/step-55/doc/intro.dox new file mode 100644 index 0000000000..d8e463ce63 --- /dev/null +++ b/examples/step-55/doc/intro.dox @@ -0,0 +1,167 @@ +
+ +This program was contributed by Timo Heister. Special thanks to Sander +Rhebergen for the inspiration to finally write this tutorial. + +This material is based upon work partially supported by National Science +Foundation grant DMS1522191 and the Computational Infrastructure in +Geodynamics initiative (CIG), through the National Science Foundation under +Award No. EAR-0949446 and The University of California-Davis. + +The authors would like to thank the Isaac Newton Institute for +Mathematical Sciences, Cambridge, for support and hospitality during +the programme Melt in the Mantle where work on this tutorial was +undertaken. This work was supported by EPSRC grant no EP/K032208/1. + + + +@note As a prerequisite of this program, you need to have PETSc or Trilinos +and the p4est library installed. The installation of deal.II together with +these additional libraries is described in the README file. + + +

Introduction

+ +Building on step-40, this tutorial shows how to solve linear PDEs with several +components in parallel using MPI with PETSc or Trilinos for the linear +algebra. For this, we return to the Stokes equations as discussed in +step-22. The motivation for writing this tutorial is to provide an +intermediate step (pun intended) between step-40 (parallel Laplace) and +step-32 (parallel coupled Stokes with Boussinesq for a time dependent +problem). + +The learning outcomes for this tutorial are: + +- You are able to solve PDEs with several variables in parallel and can + apply this to different problems. + +- You understand the concept of optimal preconditioners and are able to check + this for a particular problem. + +- You are able to construct manufactured solutions using the free computer + algreba system SymPy (https://sympy.org). + +- You can implement various other tasks for parallel programs: error + computation, writing graphical output, etc. + +- You can visualize vector fields, stream lines, and contours of vector + quantities. + +We are solving for a velocity $\textbf{u}$ and pressure $p$ that satisfy the +Stokes equation, which reads +@f{eqnarray*} + - \triangle \textbf{u} + \nabla p &=& \textbf{f}, \\ + -\textrm{div}\; \textbf{u} &=& 0. +@f} + + +

Optimal preconditioners

+ +Make sure that you read (even better: try) what is described in "Block Schur +complement preconditioner" in the "Possible Extensions" section in step-22. +Like described there, we are going to solve the block system using a Krylov +method and a block preconditioner. + +Our goal here is to construct a very simple (maybe the simplest?) optimal +preconditioner for the linear system. A preconditioner is called "optimal" or +"of optimal complexity", if the number of iterations of the preconditioned +system is independent of the mesh size $h$. You can extend that definition to +also require indepence of the number of processors used (we will discuss that +in the results section), the computational domain and the mesh quality, the +test case itself, the polynomial degree of the finite element space, and more. + +Why is a constant number of iterations considered to be "optimal"? Assume the +discretized PDE gives a linear system with N unknowns. Because the matrix +coming from the FEM discretization is sparse, a matrix-vector product can be +done in O(N) time. A preconditioner application can also only be O(N) at best +(for example doable with multigrid methods). If the number of iterations +required to solve the linear system is independent of $h$ (and therefore N), +the total cost of solving the system will be O(N). It is not possible to beat +this complexity, because even looking at all the entries of the right-hand +side already takes O(N) time. + +The preconditioner described here is even simpler than the one described in +step-22 and will typically require more iterations and consequently time to +solve. When considering preconditioners, optimality is not the only important +metric. But an optimal and expensive preconditioner is typically more +desirable than a cheaper, non-optimal one. This is because, eventually, as the +mesh size becomes smaller and smaller and linear problems become bigger and +bigger, the former will eventually beat the latter. + +

The solver and preconditioner

+ +We precondition the linear system +@f{eqnarray*} + \left(\begin{array}{cc} + A & B^T \\ B & 0 + \end{array}\right) + \left(\begin{array}{c} + U \\ P + \end{array}\right) + = + \left(\begin{array}{c} + F \\ 0 + \end{array}\right), +@f} + +with the block diagonal preconditioner +@f{eqnarray*} + P^{-1} + = + \left(\begin{array}{cc} + A & 0 \\ 0 & S + \end{array}\right) ^{-1}, + = + \left(\begin{array}{cc} + A^{-1} & 0 \\ 0 & S^{-1} + \end{array}\right), +@f} +where $S=-BA^{-1} B^T$ is the Schur complement. + +With this choice of $P$, assuming that we handle $A^{-1}$ and $S^{-1}$ exactly +(which is an "idealized" situation), the preconditioned linear system has +three distinct eigenvalues independent of $h$ and is therefore "optimal". See +section 6.2.1 (especially p. 292) in "Finite Elements and Fast Iterative +Solvers: with Applications in Incompressible Fluid Dynamics" by Elman, +Silvester, and Wathen (Oxford University Press (UK), 2005). For comparison, +using the ideal version of the upper block-triangular preconditioner in +step-22 (also used in step-56) would have all eigenvalues be equal to one. + +We will use approximations of the inverse operations in $P^{-1}$ that are +(nearly) independent of $h$. In this situation, one can again show, that the +eigenvalues are independent of $h$. For the Krylov method we choose MINRES, +which is attractive for the analysis (iteration count is proven to be +independent of $h$, see the remainder of the chapter 6.2.1 in the book +mentioned above), great from the computational standpoint (simpler and cheaper +than GMRES for example), and applicable (matrix and preconditioner are +symmetric). + +For the approximations we will use a CG solve with the mass matrix in the +pressure space for approximating the action of $S^{-1}$. Note that the mass +matrix is spectrally equivalent to $S$. We can expect the number of CG +iterations to be independent of $h$, even with a simple preconditioner like +ILU. + +For the approximation of the velocity block $A$ we will perform a single AMG +V-cycle. In practice this choice is not exactly independent of $h$, which can +explain the slight increase in iteration numbers. A possible explanation is +that the coarsest level will be solved exactly and the number of levels and +size of the coarsest matrix is not predictable. + + +

The testcase

+ +We will construct a manufactured solution based on the Kovasznay problem +("Laminar flow behind a two-dimensional grid" by Kovasznay, 1948). You can +also check out http://www.cfm.brown.edu/crunch/nektar/Thesis.Html/node60.html +for the test case. + +We have to cheat here, though, because we are not solving the non-linear +Navier-Stokes equations, but the linear Stokes system without convective +term. Therefore, to recreate the exact same solution, we use the method of +manufactured solutions with the solution of the Kovasznay problem. This will +effectively move the convective term into the right-hand side $f$. + +The right-hand side is computed using the script "reference.py" and we use +the exact solution for boundary conditions and error computation. diff --git a/examples/step-55/doc/kind b/examples/step-55/doc/kind new file mode 100644 index 0000000000..e62f4e7222 --- /dev/null +++ b/examples/step-55/doc/kind @@ -0,0 +1 @@ +fluids diff --git a/examples/step-55/doc/results.dox b/examples/step-55/doc/results.dox new file mode 100644 index 0000000000..be6be9e4e6 --- /dev/null +++ b/examples/step-55/doc/results.dox @@ -0,0 +1,237 @@ +

Results

+ +As expected from the discussion above, the number of iterations is independent +of the number of processors and only very slightly dependent on $h$: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PETScnumber of processors
cycledofs1248163264128
06594949495151514949
124675252525252545453
295395656565456565456
3375075757575757565756
41487395859575957575757
55923876060595959595959
623644196262616161616161
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Trilinosnumber of processors
cycledofs1248163264128
06593737373737373737
124679289898286817878
2953910299969595888395
33750710710510499100969690
4148739112112111111127126115117
5592387116115114112118120131130
62364419130126120120121122121123
+ +While the PETSc results show a constant number of iterations, the iterations +increase when using Trilinos. This is likely because of the different settings +used for the AMG preconditioner. For performance reasons we do not allow +coarsening below a couple thousand unknowns. As the coarse solver is an exact +solve (we are using LU by default), a change in number of levels will +influence the quality of a V-cycle. Therefore, a V-cycle is closer to an exact +solver for smaller problem sizes. + + +

Possibilities for extensions

+ +

Investigate Trilinos iterations

+ +Play with the smoothers, smoothing steps, and other properties for the +Trilinos AMG to achieve an optimal preconditioner. + +

Solve the Oseen problem instead of the Stokes system

+ +This change requires changing the outer solver to GMRES or BiCGStab, because +the system is no longer symmetric. + +You can prescribe the the exact flow solution as $b$ in the convective term $b +\cdot \nabla u$. This should give the same solution as the original problem, +if you set the right hand side to zero. + + diff --git a/examples/step-55/doc/tooltip b/examples/step-55/doc/tooltip new file mode 100644 index 0000000000..9c3d5ddd6d --- /dev/null +++ b/examples/step-55/doc/tooltip @@ -0,0 +1 @@ +Solving the Stokes problem in parallel. diff --git a/examples/step-55/reference.py b/examples/step-55/reference.py new file mode 100644 index 0000000000..67ade52483 --- /dev/null +++ b/examples/step-55/reference.py @@ -0,0 +1,37 @@ +from sympy import * +from sympy.printing import print_ccode +from sympy.physics.vector import ReferenceFrame, gradient, divergence +from sympy.vector import CoordSysCartesian + +R = ReferenceFrame('R'); +x = R[0]; y = R[1]; + +a=-0.5; b=1.5; +visc=1e-1; +lambda_=(1/(2*visc)-sqrt(1/(4*visc**2)+4*pi**2)); +print(" visc=%f" % visc) + +u=[0,0] +u[0]=1-exp(lambda_*x)*cos(2*pi*y); +u[1]=lambda_/(2*pi)*exp(lambda_*x)*sin(2*pi*y); +p=(exp(3*lambda_)-exp(-lambda_))/(8*lambda_)-exp(2*lambda_*x)/2; +p=p - integrate(p, (x,a,b)); + +grad_p = gradient(p, R).to_matrix(R) +f0 = -divergence(visc*gradient(u[0], R), R) + grad_p[0]; +f1 = -divergence(visc*gradient(u[1], R), R) + grad_p[1]; +f2 = divergence(u[0]*R.x + u[1]*R.y, R); + +print("\n * RHS:") +print(ccode(f0, assign_to = "values[0]")); +print(ccode(f1, assign_to = "values[1]")); +print(ccode(f2, assign_to = "values[2]")); + + +print("\n * ExactSolution:") +print(ccode(u[0], assign_to = "values[0]")); +print(ccode(u[1], assign_to = "values[1]")); +print(ccode(p, assign_to = "values[2]")); + +print("") +print("pressure mean:", N(integrate(p,(x,a,b)))) diff --git a/examples/step-55/step-55.cc b/examples/step-55/step-55.cc new file mode 100644 index 0000000000..ad539c6c3b --- /dev/null +++ b/examples/step-55/step-55.cc @@ -0,0 +1,874 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2016 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE at + * the top level of the deal.II distribution. + * + * --------------------------------------------------------------------- + + * + * Author: Timo Heister, Clemson University, 2016 + */ + +#include +#include +#include + +// The following chunk out code is identical to step-40 and allows +// switching between PETSc and Trilinos: + +#include + +//#define FORCE_USE_OF_TRILINOS + +namespace LA +{ +#if defined(DEAL_II_WITH_PETSC) && !(defined(DEAL_II_WITH_TRILINOS) && defined(FORCE_USE_OF_TRILINOS)) + using namespace dealii::LinearAlgebraPETSc; +# define USE_PETSC_LA +#elif defined(DEAL_II_WITH_TRILINOS) + using namespace dealii::LinearAlgebraTrilinos; +#else +# error DEAL_II_WITH_PETSC or DEAL_II_WITH_TRILINOS required +#endif +} + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace Step56 +{ + using namespace dealii; + + // @sect3{Linear solvers and preconditioners} + + // We need a few helper classes to represent our solver strategy + // described in the introduction. + + namespace LinearSolvers + { + // This class exposes the action of applying the inverse of a giving + // matrix via the function InverseMatrix::vmult(). Internally, the + // inverse is not formed explicitly. Instead, a linear solver with CG + // is performed. This class extends the InverseMatrix class in step-20 + // with an option to specify a preconditioner, and to allow for different + // vector types + // in the vmult function. + template + class InverseMatrix : public Subscriptor + { + public: + InverseMatrix (const Matrix &m, + const Preconditioner &preconditioner); + + template + void vmult (VectorType &dst, + const VectorType &src) const; + + private: + const SmartPointer matrix; + const Preconditioner &preconditioner; + }; + + + template + InverseMatrix:: + InverseMatrix (const Matrix &m, + const Preconditioner &preconditioner) + : + matrix (&m), + preconditioner (preconditioner) + {} + + + + template + template + void + InverseMatrix:: + vmult (VectorType &dst, + const VectorType &src) const + { + SolverControl solver_control (src.size(), 1e-8*src.l2_norm()); + SolverCG cg (solver_control); + dst = 0; + + try + { + cg.solve (*matrix, dst, src, preconditioner); + } + catch (std::exception &e) + { + Assert (false, ExcMessage(e.what())); + } + } + + + // The class A template class for a simple block diagonal preconditioner + // for 2x2 matrices. + template + class BlockDiagonalPreconditioner : public Subscriptor + { + public: + BlockDiagonalPreconditioner ( + const PreconditionerA &preconditioner_A, + const PreconditionerS &preconditioner_S); + + void vmult (LA::MPI::BlockVector &dst, + const LA::MPI::BlockVector &src) const; + + private: + const PreconditionerA &preconditioner_A; + const PreconditionerS &preconditioner_S; + }; + + template + BlockDiagonalPreconditioner:: + BlockDiagonalPreconditioner ( + const PreconditionerA &preconditioner_A, + const PreconditionerS &preconditioner_S) + : + preconditioner_A (preconditioner_A), + preconditioner_S (preconditioner_S) + {} + + + template + void + BlockDiagonalPreconditioner:: + vmult (LA::MPI::BlockVector &dst, + const LA::MPI::BlockVector &src) const + { + preconditioner_A.vmult (dst.block(0), src.block(0)); + preconditioner_S.vmult(dst.block(1), src.block(1)); + } + + } + + // @sect3{Problem setup} + + // The following classes represent the right hand side and the exact + // solution for the test problem. + + template + class RightHandSide : public Function + { + public: + RightHandSide () : Function(dim+1) {} + + virtual void vector_value (const Point &p, + Vector &value) const; + + }; + + + template + void + RightHandSide::vector_value (const Point &p, + Vector &values) const + { + const double R_x = p[0]; + const double R_y = p[1]; + + values[0] = -1.0L/2.0L*(-2*sqrt(25.0 + 4*pow(M_PI, 2)) + 10.0)*exp(R_x*(-2*sqrt(25.0 + 4*pow(M_PI, 2)) + 10.0)) - 0.4*pow(M_PI, 2)*exp(R_x*(-sqrt(25.0 + 4*pow(M_PI, 2)) + 5.0))*cos(2*R_y*M_PI) + 0.1*pow(-sqrt(25.0 + 4*pow(M_PI, 2)) + 5.0, 2)*exp(R_x*(-sqrt(25.0 + 4*pow(M_PI, 2)) + 5.0))*cos(2*R_y*M_PI); + values[1] = 0.2*M_PI*(-sqrt(25.0 + 4*pow(M_PI, 2)) + 5.0)*exp(R_x*(-sqrt(25.0 + 4*pow(M_PI, 2)) + 5.0))*sin(2*R_y*M_PI) - 0.05*pow(-sqrt(25.0 + 4*pow(M_PI, 2)) + 5.0, 3)*exp(R_x*(-sqrt(25.0 + 4*pow(M_PI, 2)) + 5.0))*sin(2*R_y*M_PI)/M_PI; + values[2] = 0; + } + + + template + class ExactSolution : public Function + { + public: + ExactSolution () : Function(dim+1) {} + + virtual void vector_value (const Point &p, + Vector &value) const; + }; + + template + void + ExactSolution::vector_value (const Point &p, + Vector &values) const + { + const double R_x = p[0]; + const double R_y = p[1]; + + values[0] = -exp(R_x*(-sqrt(25.0 + 4*pow(M_PI, 2)) + 5.0))*cos(2*R_y*M_PI) + 1; + values[1] = (1.0L/2.0L)*(-sqrt(25.0 + 4*pow(M_PI, 2)) + 5.0)*exp(R_x*(-sqrt(25.0 + 4*pow(M_PI, 2)) + 5.0))*sin(2*R_y*M_PI)/M_PI; + values[2] = -1.0L/2.0L*exp(R_x*(-2*sqrt(25.0 + 4*pow(M_PI, 2)) + 10.0)) - 2.0*(-6538034.74494422 + 0.0134758939981709*exp(4*sqrt(25.0 + 4*pow(M_PI, 2))))/(-80.0*exp(3*sqrt(25.0 + 4*pow(M_PI, 2))) + 16.0*sqrt(25.0 + 4*pow(M_PI, 2))*exp(3*sqrt(25.0 + 4*pow(M_PI, 2)))) - 1634508.68623606*exp(-3.0*sqrt(25.0 + 4*pow(M_PI, 2)))/(-10.0 + 2.0*sqrt(25.0 + 4*pow(M_PI, 2))) + (-0.00673794699908547*exp(sqrt(25.0 + 4*pow(M_PI, 2))) + 3269017.37247211*exp(-3*sqrt(25.0 + 4*pow(M_PI, 2))))/(-8*sqrt(25.0 + 4*pow(M_PI, 2)) + 40.0) + 0.00336897349954273*exp(1.0*sqrt(25.0 + 4*pow(M_PI, 2)))/(-10.0 + 2.0*sqrt(25.0 + 4*pow(M_PI, 2))); + } + + + + // @sect3{The main program} + // + // The main class is very similar to step-40, except that matrices and + // vectors are now block versions, and we store a std::vector + // for owned and relevant DoFs instead of a single IndexSet. We have + // exactly two IndexSets, one for all velocity unknowns and one for all + // pressure unknowns. + template + class StokesProblem + { + public: + StokesProblem (unsigned int velocity_degree); + + void run (); + + private: + void make_grid (); + void setup_system (); + void assemble_system (); + void solve (); + void refine_grid (); + void output_results (const unsigned int cycle) const; + + unsigned int velocity_degree; + double viscosity; + MPI_Comm mpi_communicator; + + FESystem fe; + parallel::distributed::Triangulation triangulation; + DoFHandler dof_handler; + + std::vector owned_partitioning; + std::vector relevant_partitioning; + + ConstraintMatrix constraints; + + LA::MPI::BlockSparseMatrix system_matrix; + LA::MPI::BlockSparseMatrix preconditioner_matrix; + LA::MPI::BlockVector locally_relevant_solution; + LA::MPI::BlockVector system_rhs; + + ConditionalOStream pcout; + TimerOutput computing_timer; + }; + + + + + template + StokesProblem::StokesProblem (unsigned int velocity_degree) + : + velocity_degree (velocity_degree), + viscosity (0.1), + mpi_communicator (MPI_COMM_WORLD), + fe (FE_Q(velocity_degree), dim, + FE_Q(velocity_degree-1), 1), + triangulation (mpi_communicator, + typename Triangulation::MeshSmoothing + (Triangulation::smoothing_on_refinement | + Triangulation::smoothing_on_coarsening)), + dof_handler (triangulation), + pcout (std::cout, + (Utilities::MPI::this_mpi_process(mpi_communicator) + == 0)), + computing_timer (mpi_communicator, + pcout, + TimerOutput::summary, + TimerOutput::wall_times) + {} + + + // The Kovasnay flow is defined on the domain [-0.5, 1.5]^2, which we + // create by passing the min and max values to GridGenerator::hyper_cube. + template + void StokesProblem::make_grid() + { + GridGenerator::hyper_cube (triangulation, -0.5, 1.5); + triangulation.refine_global (3); + } + + // @sect3{System Setup} + // + // The construction of the block matrices and vectors is new compared to + // step-40 and is different compared to serial codes like step-22, because + // we need to supply the set of rows that belong to our processor. + template + void StokesProblem::setup_system () + { + TimerOutput::Scope t(computing_timer, "setup"); + + dof_handler.distribute_dofs (fe); + + // Put all dim velocities into block 0 and the pressure into block 1, + // then reorder the unknowns by block. Finally count how many unknowns + // we have per block. + std::vector stokes_sub_blocks (dim+1,0); + stokes_sub_blocks[dim] = 1; + DoFRenumbering::component_wise (dof_handler, stokes_sub_blocks); + + std::vector dofs_per_block (2); + DoFTools::count_dofs_per_block (dof_handler, dofs_per_block, + stokes_sub_blocks); + + const unsigned int n_u = dofs_per_block[0], + n_p = dofs_per_block[1]; + + pcout << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << " (" << n_u << '+' << n_p << ')' + << std::endl; + + // We split up the IndexSet for locally owned and locally relevant DoFs + // into two IndexSets based on how we want to create the block matrices + // and vectors. + owned_partitioning.resize(2); + owned_partitioning[0] = dof_handler.locally_owned_dofs ().get_view(0, n_u); + owned_partitioning[1] = dof_handler.locally_owned_dofs ().get_view(n_u, n_u+n_p); + + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs (dof_handler, + locally_relevant_dofs); + relevant_partitioning.resize(2); + relevant_partitioning[0] = locally_relevant_dofs.get_view(0, n_u); + relevant_partitioning[1] = locally_relevant_dofs.get_view(n_u, n_u+n_p); + + // Setting up the constraints for boundary conditions and hanging nodes + // is identical to step-40. Rven though we don't have any hanging nodes + // because we only perform global refinement, it is still a good idea + // to put this function call in, in case adaptive refinement gets + // introduced later. + { + constraints.reinit (locally_relevant_dofs); + + FEValuesExtractors::Vector velocities(0); + DoFTools::make_hanging_node_constraints (dof_handler, + constraints); + VectorTools::interpolate_boundary_values (dof_handler, + 0, + ExactSolution(), + constraints, + fe.component_mask(velocities)); + constraints.close (); + } + + // Now we create the system matrix based on a BlockDynamicSparsityPattern. + // We know that we won't have coupling between different velocity + // components (because we use the laplace and not the deformation tensor) + // and no coupling between pressure with its test functions, so we use + // a Table to communicate this coupling information to + // DoFTools::make_sparsity_pattern. + { + system_matrix.clear (); + + Table<2,DoFTools::Coupling> coupling (dim+1, dim+1); + for (unsigned int c=0; c coupling (dim+1, dim+1); + for (unsigned int c=0; c will create a ghosted + // vector. + locally_relevant_solution.reinit (owned_partitioning, relevant_partitioning, mpi_communicator); + system_rhs.reinit (owned_partitioning, mpi_communicator); + } + + + + + // @sect3{Assembly} + // + // This function assembles the system matrix, the preconditioner matrix, + // and the right hand side. The code is pretty standard. + template + void StokesProblem::assemble_system () + { + TimerOutput::Scope t(computing_timer, "assembly"); + + system_matrix = 0; + preconditioner_matrix = 0; + system_rhs = 0; + + const QGauss quadrature_formula(velocity_degree+1); + + FEValues fe_values (fe, quadrature_formula, + update_values | update_gradients | + update_quadrature_points | + update_JxW_values); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + FullMatrix cell_matrix2 (dofs_per_cell, dofs_per_cell); + Vector cell_rhs (dofs_per_cell); + + const RightHandSide right_hand_side; + std::vector > rhs_values (n_q_points, + Vector(dim+1)); + + std::vector > grad_phi_u (dofs_per_cell); + std::vector div_phi_u (dofs_per_cell); + std::vector phi_p (dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + const FEValuesExtractors::Vector velocities (0); + const FEValuesExtractors::Scalar pressure (dim); + + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + if (cell->is_locally_owned()) + { + cell_matrix = 0; + cell_matrix2 = 0; + cell_rhs = 0; + + fe_values.reinit (cell); + right_hand_side.vector_value_list(fe_values.get_quadrature_points(), + rhs_values); + for (unsigned int q=0; qget_dof_indices (local_dof_indices); + constraints.distribute_local_to_global (cell_matrix, + cell_rhs, + local_dof_indices, + system_matrix, + system_rhs); + + constraints.distribute_local_to_global (cell_matrix2, + local_dof_indices, + preconditioner_matrix); + + } + + system_matrix.compress (VectorOperation::add); + preconditioner_matrix.compress (VectorOperation::add); + system_rhs.compress (VectorOperation::add); + } + + + + // @sect3{Solving} + // + // This function solves the linear system with MINRES with a block diagonal + // preconditioner and AMG for the two diagonal blocks as described in the + // introduction. The preconditioner applies a v cycle to the 0,0 block + // and a CG with the mass matrix for the 1,1 block (the Schur complement). + template + void StokesProblem::solve () + { + TimerOutput::Scope t(computing_timer, "solve"); + + LA::MPI::PreconditionAMG prec_A; + { + LA::MPI::PreconditionAMG::AdditionalData data; + +#ifdef USE_PETSC_LA + data.symmetric_operator = true; +#else +// data.n_cycles = 1; +// data.higher_order_elements = true; +// data.elliptic = true; +// data.smoother_sweeps = 5; +// data.smoother_overlap = 1; + +// std::vector > constant_modes; +// FEValuesExtractors::Vector velocity_components(0); +// DoFTools::extract_constant_modes (dof_handler, +// fe.component_mask(velocity_components), +// constant_modes); +// data.constant_modes = constant_modes; +#endif + prec_A.initialize(system_matrix.block(0,0), data); + } + + LA::MPI::PreconditionAMG prec_S; + { + LA::MPI::PreconditionAMG::AdditionalData data; + +#ifdef USE_PETSC_LA + data.symmetric_operator = true; +#else +#endif + prec_S.initialize(preconditioner_matrix.block(1,1), data); + } + + // The InverseMatrix is used to solve for the mass matrix: + typedef LinearSolvers::InverseMatrix mp_inverse_t; + const mp_inverse_t + mp_inverse (preconditioner_matrix.block(1,1), prec_S); + + // This constructs the block preconditioner based on the preconditioners + // for the individual blocks defined above. + const LinearSolvers::BlockDiagonalPreconditioner + preconditioner (prec_A, mp_inverse); + + // With that, we can finally set up the linear solver and solve the system: + SolverControl solver_control (system_matrix.m(), + 1e-10*system_rhs.l2_norm()); + + SolverMinRes solver (solver_control); + + LA::MPI::BlockVector + distributed_solution (owned_partitioning, mpi_communicator); + + constraints.set_zero (distributed_solution); + + solver.solve(system_matrix, distributed_solution, system_rhs, preconditioner); + + pcout << " Solved in " << solver_control.last_step() + << " iterations." << std::endl; + + constraints.distribute (distributed_solution); + + // Like in step-56, we subtract the mean pressure to allow error + // computations against our reference solution, which has a mean value + // of zero. + locally_relevant_solution = distributed_solution; + const double mean_pressure = VectorTools::compute_mean_value (dof_handler, + QGauss(velocity_degree+2), + locally_relevant_solution, + dim); + distributed_solution.block(1).add(-mean_pressure); + locally_relevant_solution.block(1) = distributed_solution.block(1); + } + + + + // @sect3{The rest} + // + // The remainder of the code that deals with mesh refinement, output, and + // the main loop is pretty standard. + template + void StokesProblem::refine_grid () + { + TimerOutput::Scope t(computing_timer, "refine"); + + if (true) + { + triangulation.refine_global(); + } + else + { + Vector estimated_error_per_cell (triangulation.n_active_cells()); + + FEValuesExtractors::Vector velocities(0); + KellyErrorEstimator::estimate (dof_handler, + QGauss(3), + typename FunctionMap::type(), + locally_relevant_solution, + estimated_error_per_cell, + fe.component_mask(velocities)); + parallel::distributed::GridRefinement:: + refine_and_coarsen_fixed_number (triangulation, + estimated_error_per_cell, + 0.3, 0.0); + triangulation.execute_coarsening_and_refinement (); + } + } + + + + + template + void StokesProblem::output_results (const unsigned int cycle) const + { + { + const ComponentSelectFunction pressure_mask (dim, dim+1); + const ComponentSelectFunction velocity_mask(std::make_pair(0, dim), dim+1); + + Vector cellwise_errors (triangulation.n_active_cells()); + QGauss quadrature (velocity_degree+2); + + VectorTools::integrate_difference (dof_handler, + locally_relevant_solution, + ExactSolution(), + cellwise_errors, + quadrature, + VectorTools::L2_norm, + &velocity_mask); + + const double error_u_l2 + = VectorTools::compute_global_error(triangulation, cellwise_errors, VectorTools::L2_norm); + + VectorTools::integrate_difference (dof_handler, + locally_relevant_solution, + ExactSolution(), + cellwise_errors, + quadrature, + VectorTools::L2_norm, + &pressure_mask); + + const double error_p_l2 + = VectorTools::compute_global_error(triangulation, cellwise_errors, VectorTools::L2_norm); + + pcout << "error: u_0: "<< error_u_l2 + << " p_0: " << error_p_l2 + << std::endl; + } + + + std::vector solution_names (dim, "velocity"); + solution_names.push_back ("pressure"); + std::vector + data_component_interpretation + (dim, DataComponentInterpretation::component_is_part_of_vector); + data_component_interpretation + .push_back (DataComponentInterpretation::component_is_scalar); + + DataOut data_out; + data_out.attach_dof_handler (dof_handler); + data_out.add_data_vector (locally_relevant_solution, + solution_names, + DataOut::type_dof_data, + data_component_interpretation); + + LA::MPI::BlockVector interpolated; + interpolated.reinit(owned_partitioning, MPI_COMM_WORLD); + VectorTools::interpolate(dof_handler, ExactSolution(), interpolated); + + LA::MPI::BlockVector interpolated_relevant(owned_partitioning, relevant_partitioning, MPI_COMM_WORLD); + interpolated_relevant = interpolated; + { + std::vector solution_names (dim, "ref_u"); + solution_names.push_back ("ref_p"); + data_out.add_data_vector (interpolated_relevant, solution_names, + DataOut::type_dof_data, + data_component_interpretation); + } + + + Vector subdomain (triangulation.n_active_cells()); + for (unsigned int i=0; i filenames; + for (unsigned int i=0; + i + void StokesProblem::run () + { +#ifdef USE_PETSC_LA + pcout << "Running using PETSc." << std::endl; +#else + pcout << "Running using Trilinos." << std::endl; +#endif + const unsigned int n_cycles = 5; + for (unsigned int cycle=0; cycle problem (2); + problem.run (); + } + catch (std::exception &exc) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + + return 1; + } + catch (...) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + + return 0; +} -- 2.39.5