From 53c6089b161d5204343d225e4483496ffd17c054 Mon Sep 17 00:00:00 2001 From: bangerth Date: Thu, 22 Sep 2011 03:56:27 +0000 Subject: [PATCH] Indent by astyle. May undo that again later. git-svn-id: https://svn.dealii.org/trunk@24361 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/examples/step-32/step-32.cc | 4477 +++++++++++++-------------- 1 file changed, 2238 insertions(+), 2239 deletions(-) diff --git a/deal.II/examples/step-32/step-32.cc b/deal.II/examples/step-32/step-32.cc index 6cb568d79b..4e47918a41 100644 --- a/deal.II/examples/step-32/step-32.cc +++ b/deal.II/examples/step-32/step-32.cc @@ -7,7 +7,7 @@ /* $Id$ */ /* Author: Martin Kronbichler, Uppsala University, Wolfgang Bangerth, Texas A&M University, - Timo Heister, University of Goettingen, 2008-2011 */ + Timo Heister, University of Goettingen, 2008-2011 */ /* */ /* Copyright (C) 2008, 2009, 2010, 2011 by the deal.II authors */ /* */ @@ -16,12 +16,12 @@ /* to the file deal.II/doc/license.html for the text and */ /* further information on this license. */ - // @sect3{Include files} +// @sect3{Include files} - // We include the functionality - // of these well-known deal.II - // library files and some C++ - // header files. +// We include the functionality +// of these well-known deal.II +// library files and some C++ +// header files. #include #include #include @@ -70,10 +70,10 @@ #include #include - // This is the only include file that is new: - // We use an IndexSet to describe the - // %parallel partitioning of vectors and - // matrices. +// This is the only include file that is new: +// We use an IndexSet to describe the +// %parallel partitioning of vectors and +// matrices. #include #include @@ -91,13 +91,13 @@ using namespace dealii; - // @sect3{Equation data} +// @sect3{Equation data} - // In the following namespace, we define the - // various pieces of equation data. All of - // these are exhaustively discussed in the - // description of the testcase in the - // introduction: +// In the following namespace, we define the +// various pieces of equation data. All of +// these are exhaustively discussed in the +// description of the testcase in the +// introduction: namespace EquationData { const double eta = 1e21; /* Pa s */ @@ -118,22 +118,22 @@ namespace EquationData const double year_in_seconds = 60*60*24*365.2425; //TODO: document in intro.dox - // scale not by R1-R0, but by a - // typical length scale, say 10km, - // of variation ("plume - // diameter"). this choice also - // roughly equilibrates the sizes - // of the velocity and pressure - // components of the solution - // vectors + // scale not by R1-R0, but by a + // typical length scale, say 10km, + // of variation ("plume + // diameter"). this choice also + // roughly equilibrates the sizes + // of the velocity and pressure + // components of the solution + // vectors const double pressure_scaling = eta / 10000; double density (const double temperature) { return (reference_density * - (1 - expansion_coefficient * (temperature - - reference_temperature))); + (1 - expansion_coefficient * (temperature - + reference_temperature))); } @@ -153,19 +153,19 @@ namespace EquationData template double adiabatic_pressure (const Point &p) { - // the static, adiabatic pressure - // satisfies - // dP/dr = -g rho - - // assuming a constant density, - // we can integrate the pressure - // equation in depth to get that - // the adiabatic pressure equals - // $P(r) = rho_0 \int_r^{R_1} g(r) dr$ - // - // using the model for the - // gravity vector above, this - // yields the following formula: + // the static, adiabatic pressure + // satisfies + // dP/dr = -g rho + + // assuming a constant density, + // we can integrate the pressure + // equation in depth to get that + // the adiabatic pressure equals + // $P(r) = rho_0 \int_r^{R_1} g(r) dr$ + // + // using the model for the + // gravity vector above, this + // yields the following formula: const double r = p.norm(); return reference_density * (1./2 * 1.245e-6 * (R1*R1 - r*r) - 7.714e13 * (1./R1 - 1./r)); } @@ -174,12 +174,12 @@ namespace EquationData template double adiabatic_temperature (const Point &p) { - // the static, adiabatic - // temperature satisfies - // $dT/dr = -T alpha/c_P g$ + // the static, adiabatic + // temperature satisfies + // $dT/dr = -T alpha/c_P g$ - // let's assume constant gravity, - // then we get by integration + // let's assume constant gravity, + // then we get by integration const double r = p.norm(); return T1 * std::exp(-expansion_coefficient * 9.81 / specific_heat * (r-R1)); @@ -194,10 +194,10 @@ namespace EquationData TemperatureInitialValues () : Function(1) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void vector_value (const Point &p, - Vector &value) const; + Vector &value) const; }; @@ -205,29 +205,29 @@ namespace EquationData template double TemperatureInitialValues::value (const Point &p, - const unsigned int) const + const unsigned int) const { const double r = p.norm(); const double h = R1-R0; - // s = fraction of the way from - // the inner to the outer - // boundary; 0<=s<=1 + // s = fraction of the way from + // the inner to the outer + // boundary; 0<=s<=1 const double s = (r-R0)/h; -/* now compute an angular variation of the linear temperature field by - stretching the variable s appropriately. note that the following - formula leaves the end points s=0 and s=1 fixed, but stretches the - region in between depending on the angle phi=atan2(x,y). + /* now compute an angular variation of the linear temperature field by + stretching the variable s appropriately. note that the following + formula leaves the end points s=0 and s=1 fixed, but stretches the + region in between depending on the angle phi=atan2(x,y). - For a plot, see - http://www.wolframalpha.com/input/?i=plot+%28%282*sqrt%28x^2%2By^2%29-1%29%2B0.2*%282*sqrt%28x^2%2By^2%29-1%29*%281-%282*sqrt%28x^2%2By^2%29-1%29%29*sin%286*atan2%28x%2Cy%29%29%29%2C+x%3D-1+to+1%2C+y%3D-1+to+1 -*/ + For a plot, see + http://www.wolframalpha.com/input/?i=plot+%28%282*sqrt%28x^2%2By^2%29-1%29%2B0.2*%282*sqrt%28x^2%2By^2%29-1%29*%281-%282*sqrt%28x^2%2By^2%29-1%29%29*sin%286*atan2%28x%2Cy%29%29%29%2C+x%3D-1+to+1%2C+y%3D-1+to+1 + */ const double scale = (dim==3)?std::max(0.0,cos(3.14159*abs(p(2)/R1))):1.0; const double phi = std::atan2(p(0),p(1)); const double s_mod = s - + - 0.2 * s * (1-s) * std::sin(6*phi) * scale; + + + 0.2 * s * (1-s) * std::sin(6*phi) * scale; return T0*(1.0-s_mod) + T1*s_mod; } @@ -236,7 +236,7 @@ namespace EquationData template void TemperatureInitialValues::vector_value (const Point &p, - Vector &values) const + Vector &values) const { for (unsigned int c=0; cn_components; ++c) values(c) = TemperatureInitialValues::value (p, c); @@ -245,31 +245,31 @@ namespace EquationData - // @sect3{Linear solvers and preconditioners} - - // In comparison to step-31, we did one - // change in the linear algebra of the - // problem: We exchange the - // InverseMatrix that - // previously held the approximation of the - // Schur complement by a preconditioner - // only (we will choose ILU in the - // application code below), as discussed in - // the introduction. This trick we already - // did for the velocity block - the idea of - // this is that the solver iterations on - // the block system will eventually also - // make the approximation for the Schur - // complement good. If the preconditioner - // we're using is good enough, there will - // be no increase in the outer iteration - // count compared to using converged solves - // for the inverse matrices of velocity and - // Schur complement. All we need to do for - // implementing that change is to give the - // respective variable in the - // BlockSchurPreconditioner class another - // name. +// @sect3{Linear solvers and preconditioners} + +// In comparison to step-31, we did one +// change in the linear algebra of the +// problem: We exchange the +// InverseMatrix that +// previously held the approximation of the +// Schur complement by a preconditioner +// only (we will choose ILU in the +// application code below), as discussed in +// the introduction. This trick we already +// did for the velocity block - the idea of +// this is that the solver iterations on +// the block system will eventually also +// make the approximation for the Schur +// complement good. If the preconditioner +// we're using is good enough, there will +// be no increase in the outer iteration +// count compared to using converged solves +// for the inverse matrices of velocity and +// Schur complement. All we need to do for +// implementing that change is to give the +// respective variable in the +// BlockSchurPreconditioner class another +// name. namespace LinearSolvers { template @@ -277,57 +277,57 @@ namespace LinearSolvers { public: RightPrecond ( - const TrilinosWrappers::BlockSparseMatrix &S, - const TrilinosWrappers::BlockSparseMatrix &Spre, - const PreconditionerMp &Mppreconditioner, - const PreconditionerA &Apreconditioner, - const bool do_solve_A_in = true) - : - stokes_matrix (&S), - stokes_preconditioner_matrix (&Spre), - mp_preconditioner (Mppreconditioner), - a_preconditioner (Apreconditioner), - do_solve_A (do_solve_A_in) - {} - - void solve_S(TrilinosWrappers::MPI::Vector &dst, - const TrilinosWrappers::MPI::Vector &src) const - { - SolverControl cn(5000, 1e-5); - - TrilinosWrappers::SolverCG solver(cn); - - solver.solve(stokes_preconditioner_matrix->block(1,1), - dst, src, - mp_preconditioner); - - dst*=-1.0; - } + const TrilinosWrappers::BlockSparseMatrix &S, + const TrilinosWrappers::BlockSparseMatrix &Spre, + const PreconditionerMp &Mppreconditioner, + const PreconditionerA &Apreconditioner, + const bool do_solve_A_in = true) + : + stokes_matrix (&S), + stokes_preconditioner_matrix (&Spre), + mp_preconditioner (Mppreconditioner), + a_preconditioner (Apreconditioner), + do_solve_A (do_solve_A_in) + {} + + void solve_S(TrilinosWrappers::MPI::Vector &dst, + const TrilinosWrappers::MPI::Vector &src) const + { + SolverControl cn(5000, 1e-5); + + TrilinosWrappers::SolverCG solver(cn); + + solver.solve(stokes_preconditioner_matrix->block(1,1), + dst, src, + mp_preconditioner); + + dst*=-1.0; + } void solve_A(TrilinosWrappers::MPI::Vector &dst, - const TrilinosWrappers::MPI::Vector &src) const - { - SolverControl cn(5000, src.l2_norm()*1e-2); - TrilinosWrappers::SolverCG solver(cn); - solver.solve(stokes_matrix->block(0,0), dst, src, a_preconditioner); - } + const TrilinosWrappers::MPI::Vector &src) const + { + SolverControl cn(5000, src.l2_norm()*1e-2); + TrilinosWrappers::SolverCG solver(cn); + solver.solve(stokes_matrix->block(0,0), dst, src, a_preconditioner); + } void vmult (TrilinosWrappers::MPI::BlockVector &dst, - const TrilinosWrappers::MPI::BlockVector &src) const - { - TrilinosWrappers::MPI::Vector utmp(src.block(0)); + const TrilinosWrappers::MPI::BlockVector &src) const + { + TrilinosWrappers::MPI::Vector utmp(src.block(0)); - solve_S(dst.block(1), src.block(1)); + solve_S(dst.block(1), src.block(1)); - stokes_matrix->block(0,1).vmult(utmp, dst.block(1)); //B^T - utmp*=-1.0; - utmp.add(src.block(0)); + stokes_matrix->block(0,1).vmult(utmp, dst.block(1)); //B^T + utmp*=-1.0; + utmp.add(src.block(0)); - if (do_solve_A == true) - solve_A(dst.block(0), utmp); - else - a_preconditioner.vmult (dst.block(0), utmp); - } + if (do_solve_A == true) + solve_A(dst.block(0), utmp); + else + a_preconditioner.vmult (dst.block(0), utmp); + } private: const SmartPointer stokes_matrix; @@ -340,68 +340,68 @@ namespace LinearSolvers - // @sect3{Definition of assembly data structures} - // - // As described in the introduction, we will - // use the WorkStream mechanism discussed in - // the @ref threads module to parallelize - // operations among the processors of a - // single machine. The WorkStream class - // requires that data is passed around in two - // kinds of data structures, one for scratch - // data and one to pass data from the - // assembly function to the function that - // copies local contributions into global - // objects. - // - // The following namespace (and the two - // sub-namespaces) contains a collection of - // data structures that serve this purpose, - // one pair for each of the four operations - // discussed in the introduction that we will - // want to parallelize. Each - // assembly routine gets two sets of data: a - // Scratch array that collects all the - // classes and arrays that are used for the - // calculation of the cell contribution, and - // a CopyData array that keeps local matrices - // and vectors which will be written into the - // global matrix. Whereas CopyData is a - // container for the final data that is - // written into the global matrices and - // vector (and, thus, absolutely necessary), - // the Scratch arrays are merely there for - // performance reasons — it would be - // much more expensive to set up a FEValues - // object on each cell, than creating it only - // once and updating some derivative data. - // - // Using the program in step-31, we have - // four assembly routines. One for the - // preconditioner matrix of the Stokes - // system, one for the Stokes matrix and - // right hand side, one for the - // temperature matrices and one for the - // right hand side of the temperature - // equation. We organize the scratch - // arrays and a CopyData arrays for each - // of those four assembly components - // using a struct - // environment. - // - // Regarding the Scratch array, each - // struct is equipped with a constructor - // that create an FEValues object for a - // @ref FiniteElement "finite element", a - // @ref Quadrature "quadrature formula" - // and some - // @ref UpdateFlags "update flags". - // Moreover, we manually - // implement a copy constructor (since - // the FEValues class is not copyable by - // itself), and provide some additional - // vector fields that are used to improve - // performance of assembly. +// @sect3{Definition of assembly data structures} +// +// As described in the introduction, we will +// use the WorkStream mechanism discussed in +// the @ref threads module to parallelize +// operations among the processors of a +// single machine. The WorkStream class +// requires that data is passed around in two +// kinds of data structures, one for scratch +// data and one to pass data from the +// assembly function to the function that +// copies local contributions into global +// objects. +// +// The following namespace (and the two +// sub-namespaces) contains a collection of +// data structures that serve this purpose, +// one pair for each of the four operations +// discussed in the introduction that we will +// want to parallelize. Each +// assembly routine gets two sets of data: a +// Scratch array that collects all the +// classes and arrays that are used for the +// calculation of the cell contribution, and +// a CopyData array that keeps local matrices +// and vectors which will be written into the +// global matrix. Whereas CopyData is a +// container for the final data that is +// written into the global matrices and +// vector (and, thus, absolutely necessary), +// the Scratch arrays are merely there for +// performance reasons — it would be +// much more expensive to set up a FEValues +// object on each cell, than creating it only +// once and updating some derivative data. +// +// Using the program in step-31, we have +// four assembly routines. One for the +// preconditioner matrix of the Stokes +// system, one for the Stokes matrix and +// right hand side, one for the +// temperature matrices and one for the +// right hand side of the temperature +// equation. We organize the scratch +// arrays and a CopyData arrays for each +// of those four assembly components +// using a struct +// environment. +// +// Regarding the Scratch array, each +// struct is equipped with a constructor +// that create an FEValues object for a +// @ref FiniteElement "finite element", a +// @ref Quadrature "quadrature formula" +// and some +// @ref UpdateFlags "update flags". +// Moreover, we manually +// implement a copy constructor (since +// the FEValues class is not copyable by +// itself), and provide some additional +// vector fields that are used to improve +// performance of assembly. namespace Assembly { namespace Scratch @@ -409,29 +409,29 @@ namespace Assembly template struct StokesPreconditioner { - StokesPreconditioner (const FiniteElement &stokes_fe, - const Quadrature &stokes_quadrature, - const Mapping &mapping, - const UpdateFlags update_flags); - StokesPreconditioner (const StokesPreconditioner &data); + StokesPreconditioner (const FiniteElement &stokes_fe, + const Quadrature &stokes_quadrature, + const Mapping &mapping, + const UpdateFlags update_flags); + StokesPreconditioner (const StokesPreconditioner &data); - FEValues stokes_fe_values; + FEValues stokes_fe_values; - std::vector > grads_phi_u; - std::vector phi_p; + std::vector > grads_phi_u; + std::vector phi_p; }; template StokesPreconditioner:: StokesPreconditioner (const FiniteElement &stokes_fe, - const Quadrature &stokes_quadrature, - const Mapping &mapping, - const UpdateFlags update_flags) - : - stokes_fe_values (mapping, stokes_fe, stokes_quadrature, - update_flags), - grads_phi_u (stokes_fe.dofs_per_cell), - phi_p (stokes_fe.dofs_per_cell) + const Quadrature &stokes_quadrature, + const Mapping &mapping, + const UpdateFlags update_flags) + : + stokes_fe_values (mapping, stokes_fe, stokes_quadrature, + update_flags), + grads_phi_u (stokes_fe.dofs_per_cell), + phi_p (stokes_fe.dofs_per_cell) {} @@ -439,88 +439,88 @@ namespace Assembly template StokesPreconditioner:: StokesPreconditioner (const StokesPreconditioner &scratch) - : - stokes_fe_values (scratch.stokes_fe_values.get_mapping(), - scratch.stokes_fe_values.get_fe(), - scratch.stokes_fe_values.get_quadrature(), - scratch.stokes_fe_values.get_update_flags()), - grads_phi_u (scratch.grads_phi_u), - phi_p (scratch.phi_p) + : + stokes_fe_values (scratch.stokes_fe_values.get_mapping(), + scratch.stokes_fe_values.get_fe(), + scratch.stokes_fe_values.get_quadrature(), + scratch.stokes_fe_values.get_update_flags()), + grads_phi_u (scratch.grads_phi_u), + phi_p (scratch.phi_p) {} - // Observe that we derive the - // StokesSystem scratch array from the - // StokesPreconditioner array. We do this - // because all the objects that are - // necessary for the assembly of the - // preconditioner are also needed for the - // actual matrix system and right hand - // side, plus some extra data. This makes - // the program more compact. Note also - // that the assembly of the Stokes system - // and the temperature right hand side - // further down requires data from - // temperature and velocity, - // respectively, so we actually need two - // FEValues objects for those two cases. + // Observe that we derive the + // StokesSystem scratch array from the + // StokesPreconditioner array. We do this + // because all the objects that are + // necessary for the assembly of the + // preconditioner are also needed for the + // actual matrix system and right hand + // side, plus some extra data. This makes + // the program more compact. Note also + // that the assembly of the Stokes system + // and the temperature right hand side + // further down requires data from + // temperature and velocity, + // respectively, so we actually need two + // FEValues objects for those two cases. template struct StokesSystem : public StokesPreconditioner { - StokesSystem (const FiniteElement &stokes_fe, - const Mapping &mapping, - const Quadrature &stokes_quadrature, - const UpdateFlags stokes_update_flags, - const FiniteElement &temperature_fe, - const UpdateFlags temperature_update_flags); + StokesSystem (const FiniteElement &stokes_fe, + const Mapping &mapping, + const Quadrature &stokes_quadrature, + const UpdateFlags stokes_update_flags, + const FiniteElement &temperature_fe, + const UpdateFlags temperature_update_flags); - StokesSystem (const StokesSystem &data); + StokesSystem (const StokesSystem &data); - FEValues temperature_fe_values; + FEValues temperature_fe_values; - std::vector > phi_u; - std::vector > grads_phi_u; - std::vector div_phi_u; + std::vector > phi_u; + std::vector > grads_phi_u; + std::vector div_phi_u; - std::vector old_temperature_values; + std::vector old_temperature_values; }; template StokesSystem:: StokesSystem (const FiniteElement &stokes_fe, - const Mapping &mapping, - const Quadrature &stokes_quadrature, - const UpdateFlags stokes_update_flags, - const FiniteElement &temperature_fe, - const UpdateFlags temperature_update_flags) - : - StokesPreconditioner (stokes_fe, stokes_quadrature, - mapping, - stokes_update_flags), - temperature_fe_values (mapping, temperature_fe, stokes_quadrature, - temperature_update_flags), - phi_u (stokes_fe.dofs_per_cell), - grads_phi_u (stokes_fe.dofs_per_cell), - div_phi_u (stokes_fe.dofs_per_cell), - old_temperature_values (stokes_quadrature.size()) + const Mapping &mapping, + const Quadrature &stokes_quadrature, + const UpdateFlags stokes_update_flags, + const FiniteElement &temperature_fe, + const UpdateFlags temperature_update_flags) + : + StokesPreconditioner (stokes_fe, stokes_quadrature, + mapping, + stokes_update_flags), + temperature_fe_values (mapping, temperature_fe, stokes_quadrature, + temperature_update_flags), + phi_u (stokes_fe.dofs_per_cell), + grads_phi_u (stokes_fe.dofs_per_cell), + div_phi_u (stokes_fe.dofs_per_cell), + old_temperature_values (stokes_quadrature.size()) {} template StokesSystem:: StokesSystem (const StokesSystem &scratch) - : - StokesPreconditioner (scratch), - temperature_fe_values (scratch.temperature_fe_values.get_mapping(), - scratch.temperature_fe_values.get_fe(), - scratch.temperature_fe_values.get_quadrature(), - scratch.temperature_fe_values.get_update_flags()), - phi_u (scratch.phi_u), - grads_phi_u (scratch.grads_phi_u), - div_phi_u (scratch.div_phi_u), - old_temperature_values (scratch.old_temperature_values) + : + StokesPreconditioner (scratch), + temperature_fe_values (scratch.temperature_fe_values.get_mapping(), + scratch.temperature_fe_values.get_fe(), + scratch.temperature_fe_values.get_quadrature(), + scratch.temperature_fe_values.get_update_flags()), + phi_u (scratch.phi_u), + grads_phi_u (scratch.grads_phi_u), + div_phi_u (scratch.div_phi_u), + old_temperature_values (scratch.old_temperature_values) {} @@ -528,164 +528,164 @@ namespace Assembly template struct TemperatureMatrix { - TemperatureMatrix (const FiniteElement &temperature_fe, - const Mapping &mapping, - const Quadrature &temperature_quadrature); - TemperatureMatrix (const TemperatureMatrix &data); + TemperatureMatrix (const FiniteElement &temperature_fe, + const Mapping &mapping, + const Quadrature &temperature_quadrature); + TemperatureMatrix (const TemperatureMatrix &data); - FEValues temperature_fe_values; + FEValues temperature_fe_values; - std::vector phi_T; - std::vector > grad_phi_T; + std::vector phi_T; + std::vector > grad_phi_T; }; template TemperatureMatrix:: TemperatureMatrix (const FiniteElement &temperature_fe, - const Mapping &mapping, - const Quadrature &temperature_quadrature) - : - temperature_fe_values (mapping, - temperature_fe, temperature_quadrature, - update_values | update_gradients | - update_JxW_values), - phi_T (temperature_fe.dofs_per_cell), - grad_phi_T (temperature_fe.dofs_per_cell) + const Mapping &mapping, + const Quadrature &temperature_quadrature) + : + temperature_fe_values (mapping, + temperature_fe, temperature_quadrature, + update_values | update_gradients | + update_JxW_values), + phi_T (temperature_fe.dofs_per_cell), + grad_phi_T (temperature_fe.dofs_per_cell) {} template TemperatureMatrix:: TemperatureMatrix (const TemperatureMatrix &scratch) - : - temperature_fe_values (scratch.temperature_fe_values.get_mapping(), - scratch.temperature_fe_values.get_fe(), - scratch.temperature_fe_values.get_quadrature(), - scratch.temperature_fe_values.get_update_flags()), - phi_T (scratch.phi_T), - grad_phi_T (scratch.grad_phi_T) + : + temperature_fe_values (scratch.temperature_fe_values.get_mapping(), + scratch.temperature_fe_values.get_fe(), + scratch.temperature_fe_values.get_quadrature(), + scratch.temperature_fe_values.get_update_flags()), + phi_T (scratch.phi_T), + grad_phi_T (scratch.grad_phi_T) {} template struct TemperatureRHS { - TemperatureRHS (const FiniteElement &temperature_fe, - const FiniteElement &stokes_fe, - const Mapping &mapping, - const Quadrature &quadrature); - TemperatureRHS (const TemperatureRHS &data); - - FEValues temperature_fe_values; - FEValues stokes_fe_values; - - std::vector phi_T; - std::vector > grad_phi_T; - - std::vector > old_velocity_values; - std::vector > old_old_velocity_values; - - std::vector > old_strain_rates; - std::vector > old_old_strain_rates; - - std::vector old_temperature_values; - std::vector old_old_temperature_values; - std::vector > old_temperature_grads; - std::vector > old_old_temperature_grads; - std::vector old_temperature_laplacians; - std::vector old_old_temperature_laplacians; + TemperatureRHS (const FiniteElement &temperature_fe, + const FiniteElement &stokes_fe, + const Mapping &mapping, + const Quadrature &quadrature); + TemperatureRHS (const TemperatureRHS &data); + + FEValues temperature_fe_values; + FEValues stokes_fe_values; + + std::vector phi_T; + std::vector > grad_phi_T; + + std::vector > old_velocity_values; + std::vector > old_old_velocity_values; + + std::vector > old_strain_rates; + std::vector > old_old_strain_rates; + + std::vector old_temperature_values; + std::vector old_old_temperature_values; + std::vector > old_temperature_grads; + std::vector > old_old_temperature_grads; + std::vector old_temperature_laplacians; + std::vector old_old_temperature_laplacians; }; template TemperatureRHS:: TemperatureRHS (const FiniteElement &temperature_fe, - const FiniteElement &stokes_fe, - const Mapping &mapping, - const Quadrature &quadrature) - : - temperature_fe_values (mapping, - temperature_fe, quadrature, - update_values | - update_gradients | - update_hessians | - update_quadrature_points | - update_JxW_values), - stokes_fe_values (mapping, - stokes_fe, quadrature, - update_values | update_gradients), - phi_T (temperature_fe.dofs_per_cell), - grad_phi_T (temperature_fe.dofs_per_cell), - - old_velocity_values (quadrature.size()), - old_old_velocity_values (quadrature.size()), - old_strain_rates (quadrature.size()), - old_old_strain_rates (quadrature.size()), - - old_temperature_values (quadrature.size()), - old_old_temperature_values(quadrature.size()), - old_temperature_grads(quadrature.size()), - old_old_temperature_grads(quadrature.size()), - old_temperature_laplacians(quadrature.size()), - old_old_temperature_laplacians(quadrature.size()) + const FiniteElement &stokes_fe, + const Mapping &mapping, + const Quadrature &quadrature) + : + temperature_fe_values (mapping, + temperature_fe, quadrature, + update_values | + update_gradients | + update_hessians | + update_quadrature_points | + update_JxW_values), + stokes_fe_values (mapping, + stokes_fe, quadrature, + update_values | update_gradients), + phi_T (temperature_fe.dofs_per_cell), + grad_phi_T (temperature_fe.dofs_per_cell), + + old_velocity_values (quadrature.size()), + old_old_velocity_values (quadrature.size()), + old_strain_rates (quadrature.size()), + old_old_strain_rates (quadrature.size()), + + old_temperature_values (quadrature.size()), + old_old_temperature_values(quadrature.size()), + old_temperature_grads(quadrature.size()), + old_old_temperature_grads(quadrature.size()), + old_temperature_laplacians(quadrature.size()), + old_old_temperature_laplacians(quadrature.size()) {} template TemperatureRHS:: TemperatureRHS (const TemperatureRHS &scratch) - : - temperature_fe_values (scratch.temperature_fe_values.get_mapping(), - scratch.temperature_fe_values.get_fe(), - scratch.temperature_fe_values.get_quadrature(), - scratch.temperature_fe_values.get_update_flags()), - stokes_fe_values (scratch.stokes_fe_values.get_mapping(), - scratch.stokes_fe_values.get_fe(), - scratch.stokes_fe_values.get_quadrature(), - scratch.stokes_fe_values.get_update_flags()), - phi_T (scratch.phi_T), - grad_phi_T (scratch.grad_phi_T), - - old_velocity_values (scratch.old_velocity_values), - old_old_velocity_values (scratch.old_old_velocity_values), - old_strain_rates (scratch.old_strain_rates), - old_old_strain_rates (scratch.old_old_strain_rates), - - old_temperature_values (scratch.old_temperature_values), - old_old_temperature_values (scratch.old_old_temperature_values), - old_temperature_grads (scratch.old_temperature_grads), - old_old_temperature_grads (scratch.old_old_temperature_grads), - old_temperature_laplacians (scratch.old_temperature_laplacians), - old_old_temperature_laplacians (scratch.old_old_temperature_laplacians) + : + temperature_fe_values (scratch.temperature_fe_values.get_mapping(), + scratch.temperature_fe_values.get_fe(), + scratch.temperature_fe_values.get_quadrature(), + scratch.temperature_fe_values.get_update_flags()), + stokes_fe_values (scratch.stokes_fe_values.get_mapping(), + scratch.stokes_fe_values.get_fe(), + scratch.stokes_fe_values.get_quadrature(), + scratch.stokes_fe_values.get_update_flags()), + phi_T (scratch.phi_T), + grad_phi_T (scratch.grad_phi_T), + + old_velocity_values (scratch.old_velocity_values), + old_old_velocity_values (scratch.old_old_velocity_values), + old_strain_rates (scratch.old_strain_rates), + old_old_strain_rates (scratch.old_old_strain_rates), + + old_temperature_values (scratch.old_temperature_values), + old_old_temperature_values (scratch.old_old_temperature_values), + old_temperature_grads (scratch.old_temperature_grads), + old_old_temperature_grads (scratch.old_old_temperature_grads), + old_temperature_laplacians (scratch.old_temperature_laplacians), + old_old_temperature_laplacians (scratch.old_old_temperature_laplacians) {} } - // The CopyData arrays are similar to the - // Scratch arrays. They provide a - // constructor, a copy operation, and - // some arrays for local matrix, local - // vectors and the relation between local - // and global degrees of freedom (a.k.a. - // local_dof_indices). + // The CopyData arrays are similar to the + // Scratch arrays. They provide a + // constructor, a copy operation, and + // some arrays for local matrix, local + // vectors and the relation between local + // and global degrees of freedom (a.k.a. + // local_dof_indices). namespace CopyData { template struct StokesPreconditioner { - StokesPreconditioner (const FiniteElement &stokes_fe); - StokesPreconditioner (const StokesPreconditioner &data); + StokesPreconditioner (const FiniteElement &stokes_fe); + StokesPreconditioner (const StokesPreconditioner &data); - FullMatrix local_matrix; - std::vector local_dof_indices; + FullMatrix local_matrix; + std::vector local_dof_indices; }; template StokesPreconditioner:: StokesPreconditioner (const FiniteElement &stokes_fe) - : - local_matrix (stokes_fe.dofs_per_cell, - stokes_fe.dofs_per_cell), - local_dof_indices (stokes_fe.dofs_per_cell) + : + local_matrix (stokes_fe.dofs_per_cell, + stokes_fe.dofs_per_cell), + local_dof_indices (stokes_fe.dofs_per_cell) {} @@ -693,9 +693,9 @@ namespace Assembly template StokesPreconditioner:: StokesPreconditioner (const StokesPreconditioner &data) - : - local_matrix (data.local_matrix), - local_dof_indices (data.local_dof_indices) + : + local_matrix (data.local_matrix), + local_dof_indices (data.local_dof_indices) {} @@ -703,28 +703,28 @@ namespace Assembly template struct StokesSystem : public StokesPreconditioner { - StokesSystem (const FiniteElement &stokes_fe); - StokesSystem (const StokesSystem &data); + StokesSystem (const FiniteElement &stokes_fe); + StokesSystem (const StokesSystem &data); - Vector local_rhs; + Vector local_rhs; }; template StokesSystem:: StokesSystem (const FiniteElement &stokes_fe) - : - StokesPreconditioner (stokes_fe), - local_rhs (stokes_fe.dofs_per_cell) + : + StokesPreconditioner (stokes_fe), + local_rhs (stokes_fe.dofs_per_cell) {} template StokesSystem:: StokesSystem (const StokesSystem &data) - : - StokesPreconditioner (data), - local_rhs (data.local_rhs) + : + StokesPreconditioner (data), + local_rhs (data.local_rhs) {} @@ -732,161 +732,161 @@ namespace Assembly template struct TemperatureMatrix { - TemperatureMatrix (const FiniteElement &temperature_fe); - TemperatureMatrix (const TemperatureMatrix &data); + TemperatureMatrix (const FiniteElement &temperature_fe); + TemperatureMatrix (const TemperatureMatrix &data); - FullMatrix local_mass_matrix; - FullMatrix local_stiffness_matrix; - std::vector local_dof_indices; + FullMatrix local_mass_matrix; + FullMatrix local_stiffness_matrix; + std::vector local_dof_indices; }; template TemperatureMatrix:: TemperatureMatrix (const FiniteElement &temperature_fe) - : - local_mass_matrix (temperature_fe.dofs_per_cell, - temperature_fe.dofs_per_cell), - local_stiffness_matrix (temperature_fe.dofs_per_cell, - temperature_fe.dofs_per_cell), - local_dof_indices (temperature_fe.dofs_per_cell) + : + local_mass_matrix (temperature_fe.dofs_per_cell, + temperature_fe.dofs_per_cell), + local_stiffness_matrix (temperature_fe.dofs_per_cell, + temperature_fe.dofs_per_cell), + local_dof_indices (temperature_fe.dofs_per_cell) {} template TemperatureMatrix:: TemperatureMatrix (const TemperatureMatrix &data) - : - local_mass_matrix (data.local_mass_matrix), - local_stiffness_matrix (data.local_stiffness_matrix), - local_dof_indices (data.local_dof_indices) + : + local_mass_matrix (data.local_mass_matrix), + local_stiffness_matrix (data.local_stiffness_matrix), + local_dof_indices (data.local_dof_indices) {} template struct TemperatureRHS { - TemperatureRHS (const FiniteElement &temperature_fe); - TemperatureRHS (const TemperatureRHS &data); + TemperatureRHS (const FiniteElement &temperature_fe); + TemperatureRHS (const TemperatureRHS &data); - Vector local_rhs; - std::vector local_dof_indices; - FullMatrix matrix_for_bc; + Vector local_rhs; + std::vector local_dof_indices; + FullMatrix matrix_for_bc; }; template TemperatureRHS:: TemperatureRHS (const FiniteElement &temperature_fe) - : - local_rhs (temperature_fe.dofs_per_cell), - local_dof_indices (temperature_fe.dofs_per_cell), - matrix_for_bc (temperature_fe.dofs_per_cell, - temperature_fe.dofs_per_cell) + : + local_rhs (temperature_fe.dofs_per_cell), + local_dof_indices (temperature_fe.dofs_per_cell), + matrix_for_bc (temperature_fe.dofs_per_cell, + temperature_fe.dofs_per_cell) {} template TemperatureRHS:: TemperatureRHS (const TemperatureRHS &data) - : - local_rhs (data.local_rhs), - local_dof_indices (data.local_dof_indices), - matrix_for_bc (data.matrix_for_bc) + : + local_rhs (data.local_rhs), + local_dof_indices (data.local_dof_indices), + matrix_for_bc (data.matrix_for_bc) {} } } - // @sect3{The BoussinesqFlowProblem class template} - // - // This is the declaration of the main - // class. It is very similar to - // step-31. Following the @ref - // MTWorkStream "task-based parallelization" - // paradigm, we split all the - // assembly routines into two parts: a - // first part that can do all the - // calculations on a certain cell without - // taking care of other threads, and a - // second part (which is writing the - // local data into the global matrices - // and vectors) which can be entered by - // only one thread at a time. In order to - // implement that, we provide functions - // for each of those two steps for all - // the four assembly routines that we use - // in this program. - // - // The pcout (for %parallel - // std::cout) object is used - // to simplify writing output: each MPI - // process can use this to generate output as - // usual, but since each of these processes - // will produce the same output it will just - // be replicated many times over; with the - // ConditionalOStream class, only the output - // generated by one task will actually be - // printed to screen, whereas the output by - // all the other threads will simply be - // forgotten. - // - // In a bit of naming confusion, you will - // notice below that some of the variables - // from namespace TrilinosWrappers are - // taken from namespace - // TrilinosWrappers::MPI (such as the right - // hand side vectors) whereas others are - // not (such as the various matrices). For - // the matrices, we happen to use the same - // class names for %parallel and sequential - // data structures, i.e. all matrices will - // actually be considered %parallel - // below. On the other hand, for vectors, - // only those from namespace - // TrilinosWrappers::MPI are actually - // distributed. In particular, we will - // frequently have to query velocities and - // temperatures at arbitrary quadrature - // points; consequently, rather than - // "localizing" a vector whenever we need a - // localized vector, we solve linear - // systems in %parallel but then immediately - // localize the solution for further - // processing. The various - // *_solution vectors are - // therefore filled immediately after - // solving their respective linear system - // in %parallel. - // - // The only other new data member is - // computing_timer. Its class - // type, TimerOutput, can be used to - // conveniently account for compute time - // spent in certain "sections" of the code - // that are repeatedly entered. For - // example, we will enter (and leave) - // sections for Stokes matrix assembly and - // would like to accumulate the run time - // spent in this section over all time - // steps. At the end of the program, the - // destructor of the TimerOutput class will - // automatically produce a nice summary of - // the times spent in all the sections. For - // this output, one can choose whether wall - // clock or CPU times are to be printed, as - // well as whether we want to produce - // output every time we leave a section -- - // which would be quite a lot of additional - // output -- or just in the end of the - // program (this choice is made in the - // from this variable in the results - // section of this tutorial program. +// @sect3{The BoussinesqFlowProblem class template} +// +// This is the declaration of the main +// class. It is very similar to +// step-31. Following the @ref +// MTWorkStream "task-based parallelization" +// paradigm, we split all the +// assembly routines into two parts: a +// first part that can do all the +// calculations on a certain cell without +// taking care of other threads, and a +// second part (which is writing the +// local data into the global matrices +// and vectors) which can be entered by +// only one thread at a time. In order to +// implement that, we provide functions +// for each of those two steps for all +// the four assembly routines that we use +// in this program. +// +// The pcout (for %parallel +// std::cout) object is used +// to simplify writing output: each MPI +// process can use this to generate output as +// usual, but since each of these processes +// will produce the same output it will just +// be replicated many times over; with the +// ConditionalOStream class, only the output +// generated by one task will actually be +// printed to screen, whereas the output by +// all the other threads will simply be +// forgotten. +// +// In a bit of naming confusion, you will +// notice below that some of the variables +// from namespace TrilinosWrappers are +// taken from namespace +// TrilinosWrappers::MPI (such as the right +// hand side vectors) whereas others are +// not (such as the various matrices). For +// the matrices, we happen to use the same +// class names for %parallel and sequential +// data structures, i.e. all matrices will +// actually be considered %parallel +// below. On the other hand, for vectors, +// only those from namespace +// TrilinosWrappers::MPI are actually +// distributed. In particular, we will +// frequently have to query velocities and +// temperatures at arbitrary quadrature +// points; consequently, rather than +// "localizing" a vector whenever we need a +// localized vector, we solve linear +// systems in %parallel but then immediately +// localize the solution for further +// processing. The various +// *_solution vectors are +// therefore filled immediately after +// solving their respective linear system +// in %parallel. +// +// The only other new data member is +// computing_timer. Its class +// type, TimerOutput, can be used to +// conveniently account for compute time +// spent in certain "sections" of the code +// that are repeatedly entered. For +// example, we will enter (and leave) +// sections for Stokes matrix assembly and +// would like to accumulate the run time +// spent in this section over all time +// steps. At the end of the program, the +// destructor of the TimerOutput class will +// automatically produce a nice summary of +// the times spent in all the sections. For +// this output, one can choose whether wall +// clock or CPU times are to be printed, as +// well as whether we want to produce +// output every time we leave a section -- +// which would be quite a lot of additional +// output -- or just in the end of the +// program (this choice is made in the +// from this variable in the results +// section of this tutorial program. template class BoussinesqFlowProblem { public: struct Parameters; - BoussinesqFlowProblem (Parameters & parameters); + BoussinesqFlowProblem (Parameters ¶meters); void run (); private: @@ -907,51 +907,51 @@ class BoussinesqFlowProblem double compute_viscosity(const std::vector &old_temperature, - const std::vector &old_old_temperature, - const std::vector > &old_temperature_grads, - const std::vector > &old_old_temperature_grads, - const std::vector &old_temperature_laplacians, - const std::vector &old_old_temperature_laplacians, - const std::vector > &old_velocity_values, - const std::vector > &old_old_velocity_values, - const std::vector > &old_strain_rates, - const std::vector > &old_old_strain_rates, - const double global_u_infty, - const double global_T_variation, - const double average_temperature, - const double global_entropy_variatiion, - const double cell_diameter) const; + const std::vector &old_old_temperature, + const std::vector > &old_temperature_grads, + const std::vector > &old_old_temperature_grads, + const std::vector &old_temperature_laplacians, + const std::vector &old_old_temperature_laplacians, + const std::vector > &old_velocity_values, + const std::vector > &old_old_velocity_values, + const std::vector > &old_strain_rates, + const std::vector > &old_old_strain_rates, + const double global_u_infty, + const double global_T_variation, + const double average_temperature, + const double global_entropy_variatiion, + const double cell_diameter) const; public: struct Parameters { - Parameters (const std::string & parameter_filename); + Parameters (const std::string ¶meter_filename); - static void declare_parameters (ParameterHandler &prm); - void parse_parameters (ParameterHandler &prm); + static void declare_parameters (ParameterHandler &prm); + void parse_parameters (ParameterHandler &prm); - double end_time; + double end_time; - unsigned int initial_global_refinement; - unsigned int initial_adaptive_refinement; + unsigned int initial_global_refinement; + unsigned int initial_adaptive_refinement; - bool generate_graphical_output; - unsigned int graphical_output_interval; + bool generate_graphical_output; + unsigned int graphical_output_interval; - unsigned int adaptive_refinement_interval; + unsigned int adaptive_refinement_interval; - double stabilization_alpha; - double stabilization_c_R; - double stabilization_beta; + double stabilization_alpha; + double stabilization_c_R; + double stabilization_beta; - unsigned int stokes_velocity_degree; - bool use_locally_conservative_discretization; + unsigned int stokes_velocity_degree; + bool use_locally_conservative_discretization; - unsigned int temperature_degree; + unsigned int temperature_degree; }; private: - Parameters & parameters; + Parameters ¶meters; ConditionalOStream pcout; parallel::distributed::Triangulation triangulation; @@ -1007,8 +1007,8 @@ class BoussinesqFlowProblem void local_assemble_stokes_preconditioner (const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::StokesPreconditioner &scratch, - Assembly::CopyData::StokesPreconditioner &data); + Assembly::Scratch::StokesPreconditioner &scratch, + Assembly::CopyData::StokesPreconditioner &data); void copy_local_to_global_stokes_preconditioner (const Assembly::CopyData::StokesPreconditioner &data); @@ -1016,8 +1016,8 @@ class BoussinesqFlowProblem void local_assemble_stokes_system (const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::StokesSystem &scratch, - Assembly::CopyData::StokesSystem &data); + Assembly::Scratch::StokesSystem &scratch, + Assembly::CopyData::StokesSystem &data); void copy_local_to_global_stokes_system (const Assembly::CopyData::StokesSystem &data); @@ -1025,8 +1025,8 @@ class BoussinesqFlowProblem void local_assemble_temperature_matrix (const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::TemperatureMatrix &scratch, - Assembly::CopyData::TemperatureMatrix &data); + Assembly::Scratch::TemperatureMatrix &scratch, + Assembly::CopyData::TemperatureMatrix &data); void copy_local_to_global_temperature_matrix (const Assembly::CopyData::TemperatureMatrix &data); @@ -1035,11 +1035,11 @@ class BoussinesqFlowProblem void local_assemble_temperature_rhs (const std::pair global_T_range, - const double global_max_velocity, - const double global_entropy_variation, - const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::TemperatureRHS &scratch, - Assembly::CopyData::TemperatureRHS &data); + const double global_max_velocity, + const double global_entropy_variation, + const typename DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::TemperatureRHS &scratch, + Assembly::CopyData::TemperatureRHS &data); void copy_local_to_global_temperature_rhs (const Assembly::CopyData::TemperatureRHS &data); @@ -1048,50 +1048,50 @@ class BoussinesqFlowProblem }; - // @sect3{BoussinesqFlowProblem class implementation} +// @sect3{BoussinesqFlowProblem class implementation} - // @sect4{BoussinesqFlowProblem::Parameters} +// @sect4{BoussinesqFlowProblem::Parameters} template -BoussinesqFlowProblem::Parameters::Parameters (const std::string & parameter_filename) - : - end_time (1e8), - initial_global_refinement (2), - initial_adaptive_refinement (2), - adaptive_refinement_interval (10), - stabilization_alpha (2), - stabilization_c_R (0.11), - stabilization_beta (0.078), - stokes_velocity_degree (2), - use_locally_conservative_discretization (true), - temperature_degree (2) +BoussinesqFlowProblem::Parameters::Parameters (const std::string ¶meter_filename) + : + end_time (1e8), + initial_global_refinement (2), + initial_adaptive_refinement (2), + adaptive_refinement_interval (10), + stabilization_alpha (2), + stabilization_c_R (0.11), + stabilization_beta (0.078), + stokes_velocity_degree (2), + use_locally_conservative_discretization (true), + temperature_degree (2) { - ParameterHandler prm; - BoussinesqFlowProblem::Parameters::declare_parameters (prm); + ParameterHandler prm; + BoussinesqFlowProblem::Parameters::declare_parameters (prm); - std::ifstream parameter_file (parameter_filename.c_str()); + std::ifstream parameter_file (parameter_filename.c_str()); - if (!parameter_file) - { - parameter_file.close (); + if (!parameter_file) + { + parameter_file.close (); - std::ostringstream message; - message << "Input parameter file <" - << parameter_filename << "> not found. Creating a" - << std::endl - << "template file of the same name." - << std::endl; + std::ostringstream message; + message << "Input parameter file <" + << parameter_filename << "> not found. Creating a" + << std::endl + << "template file of the same name." + << std::endl; - std::ofstream parameter_out (parameter_filename.c_str()); - prm.print_parameters (parameter_out, - ParameterHandler::Text); + std::ofstream parameter_out (parameter_filename.c_str()); + prm.print_parameters (parameter_out, + ParameterHandler::Text); - AssertThrow (false, ExcMessage (message.str().c_str())); - } + AssertThrow (false, ExcMessage (message.str().c_str())); + } - const bool success = prm.read_input (parameter_file); - AssertThrow (success, ExcMessage ("Invalid input parameter file.")); + const bool success = prm.read_input (parameter_file); + AssertThrow (success, ExcMessage ("Invalid input parameter file.")); - parse_parameters (prm); + parse_parameters (prm); } @@ -1101,64 +1101,64 @@ BoussinesqFlowProblem::Parameters:: declare_parameters (ParameterHandler &prm) { prm.declare_entry ("End time", "1e8", - Patterns::Double (0), - "The end time of the simulation in years."); + Patterns::Double (0), + "The end time of the simulation in years."); prm.declare_entry ("Initial global refinement", "2", - Patterns::Integer (0), - "The number of global refinement steps performed on " - "the initial coarse mesh, before the problem is first " - "solved there."); + Patterns::Integer (0), + "The number of global refinement steps performed on " + "the initial coarse mesh, before the problem is first " + "solved there."); prm.declare_entry ("Initial adaptive refinement", "2", - Patterns::Integer (0), - "The number of adaptive refinement steps performed after " - "initial global refinement."); + Patterns::Integer (0), + "The number of adaptive refinement steps performed after " + "initial global refinement."); prm.declare_entry ("Time steps between mesh refinement", "10", - Patterns::Integer (1), - "The number of time steps after which the mesh is to be " - "adapted based on computed error indicators."); + Patterns::Integer (1), + "The number of time steps after which the mesh is to be " + "adapted based on computed error indicators."); prm.declare_entry ("Generate graphical output", "false", - Patterns::Bool (), - "Whether graphical output is to be generated or not. " - "You may not want to get graphical output if the number " - "of processors is large."); + Patterns::Bool (), + "Whether graphical output is to be generated or not. " + "You may not want to get graphical output if the number " + "of processors is large."); prm.declare_entry ("Time steps between graphical output", "50", - Patterns::Integer (1), - "The number of time steps between each generation of " - "graphical output files."); + Patterns::Integer (1), + "The number of time steps between each generation of " + "graphical output files."); prm.enter_subsection ("Stabilization parameters"); { prm.declare_entry ("alpha", "2", - Patterns::Double (1, 2), - "The exponent in the entropy viscosity stabilization."); + Patterns::Double (1, 2), + "The exponent in the entropy viscosity stabilization."); prm.declare_entry ("c_R", "0.11", - Patterns::Double (0), - "The c_R factor in the entropy viscosity " - "stabilization."); + Patterns::Double (0), + "The c_R factor in the entropy viscosity " + "stabilization."); prm.declare_entry ("beta", "0.078", - Patterns::Double (0), - "The beta factor in the artificial viscosity " - "stabilization. An appropriate value for 2d is 0.052 " - "and 0.078 for 3d."); + Patterns::Double (0), + "The beta factor in the artificial viscosity " + "stabilization. An appropriate value for 2d is 0.052 " + "and 0.078 for 3d."); } prm.leave_subsection (); prm.enter_subsection ("Discretization"); { prm.declare_entry ("Stokes velocity polynomial degree", "2", - Patterns::Integer (1), - "The polynomial degree to use for the velocity variables " - "in the Stokes system."); + Patterns::Integer (1), + "The polynomial degree to use for the velocity variables " + "in the Stokes system."); prm.declare_entry ("Temperature polynomial degree", "2", - Patterns::Integer (1), - "The polynomial degree to use for the temperature variable."); + Patterns::Integer (1), + "The polynomial degree to use for the temperature variable."); prm.declare_entry ("Use locally conservative discretization", "true", - Patterns::Bool (), - "Whether to use a Stokes discretization that is locally " - "conservative at the expense of a larger number of degrees " - "of freedom, or to go with a cheaper discretization " - "that does not locally conserve mass (although it is " - "globally conservative."); + Patterns::Bool (), + "Whether to use a Stokes discretization that is locally " + "conservative at the expense of a larger number of degrees " + "of freedom, or to go with a cheaper discretization " + "that does not locally conserve mass (although it is " + "globally conservative."); } prm.leave_subsection (); } @@ -1200,148 +1200,148 @@ parse_parameters (ParameterHandler &prm) - // @sect4{BoussinesqFlowProblem::BoussinesqFlowProblem} - // - // The constructor of the problem is very - // similar to the constructor in - // step-31. What is different is the - // %parallel communication: Trilinos uses a - // message passing interface (MPI) for data - // distribution. When entering the - // BoussinesqFlowProblem class, we have to - // decide how the parallization is to be - // done. We choose a rather simple strategy - // and let all processors that are running - // the program work together, specified by - // the communicator - // comm_world(). Next, we - // create some modified output stream as we - // already did in step-18. In MPI, all the - // processors run the same program - // individually (they simply operate on - // different chunks of data and exchange - // some part of that data from time to - // time). Next, we need to initialize the - // pcout object in order to - // print the user information only on one - // processor. The implementation of this - // idea is to check the process number when - // pcout gets a true argument, - // and it uses the std::cout - // stream for output. If we are one - // processor five, for instance, then we - // will give a false argument - // to pcout, which means that - // the output of that processor will not be - // printed anywhere. - // - // Finally, we enter the preferred options - // for the TimerOutput object to its - // constructor. We restrict the output to - // the pcout stream (processor - // 0), and then we specify that we want to - // get a summary table in the end of the - // program which shows us wallclock times - // (as opposed to CPU times). +// @sect4{BoussinesqFlowProblem::BoussinesqFlowProblem} +// +// The constructor of the problem is very +// similar to the constructor in +// step-31. What is different is the +// %parallel communication: Trilinos uses a +// message passing interface (MPI) for data +// distribution. When entering the +// BoussinesqFlowProblem class, we have to +// decide how the parallization is to be +// done. We choose a rather simple strategy +// and let all processors that are running +// the program work together, specified by +// the communicator +// comm_world(). Next, we +// create some modified output stream as we +// already did in step-18. In MPI, all the +// processors run the same program +// individually (they simply operate on +// different chunks of data and exchange +// some part of that data from time to +// time). Next, we need to initialize the +// pcout object in order to +// print the user information only on one +// processor. The implementation of this +// idea is to check the process number when +// pcout gets a true argument, +// and it uses the std::cout +// stream for output. If we are one +// processor five, for instance, then we +// will give a false argument +// to pcout, which means that +// the output of that processor will not be +// printed anywhere. +// +// Finally, we enter the preferred options +// for the TimerOutput object to its +// constructor. We restrict the output to +// the pcout stream (processor +// 0), and then we specify that we want to +// get a summary table in the end of the +// program which shows us wallclock times +// (as opposed to CPU times). template -BoussinesqFlowProblem::BoussinesqFlowProblem (Parameters & parameters_) - : - parameters (parameters_), - pcout (std::cout, - (Utilities::System:: - get_this_mpi_process(MPI_COMM_WORLD) - == 0)), - - triangulation (MPI_COMM_WORLD, - typename Triangulation::MeshSmoothing - (Triangulation::smoothing_on_refinement | - Triangulation::smoothing_on_coarsening)), - - mapping (4), - - stokes_fe (FE_Q(parameters.stokes_velocity_degree), - dim, - (parameters.use_locally_conservative_discretization - ? - static_cast &> - (FE_DGP(parameters.stokes_velocity_degree-1)) - : - static_cast &> - (FE_Q(parameters.stokes_velocity_degree-1))), - 1), - - stokes_dof_handler (triangulation), - - temperature_fe (parameters.temperature_degree), - temperature_dof_handler (triangulation), - - time_step (0), - old_time_step (0), - timestep_number (0), - rebuild_stokes_matrix (true), - rebuild_stokes_preconditioner (true), - rebuild_temperature_matrices (true), - rebuild_temperature_preconditioner (true), - - computing_timer (pcout, TimerOutput::summary, - TimerOutput::wall_times) +BoussinesqFlowProblem::BoussinesqFlowProblem (Parameters ¶meters_) + : + parameters (parameters_), + pcout (std::cout, + (Utilities::System:: + get_this_mpi_process(MPI_COMM_WORLD) + == 0)), + + triangulation (MPI_COMM_WORLD, + typename Triangulation::MeshSmoothing + (Triangulation::smoothing_on_refinement | + Triangulation::smoothing_on_coarsening)), + + mapping (4), + + stokes_fe (FE_Q(parameters.stokes_velocity_degree), + dim, + (parameters.use_locally_conservative_discretization + ? + static_cast &> + (FE_DGP(parameters.stokes_velocity_degree-1)) + : + static_cast &> + (FE_Q(parameters.stokes_velocity_degree-1))), + 1), + + stokes_dof_handler (triangulation), + + temperature_fe (parameters.temperature_degree), + temperature_dof_handler (triangulation), + + time_step (0), + old_time_step (0), + timestep_number (0), + rebuild_stokes_matrix (true), + rebuild_stokes_preconditioner (true), + rebuild_temperature_matrices (true), + rebuild_temperature_preconditioner (true), + + computing_timer (pcout, TimerOutput::summary, + TimerOutput::wall_times) {} - // @sect4{The BoussinesqFlowProblem helper functions} - // - // Except two small details, this - // function is the very same as in - // step-31. The first detail is - // actually common to all functions - // that implement loop over all cells - // in the triangulation: When - // operating in %parallel, each - // processor only works on a chunk of - // cells. This chunk of cells is - // identified via a so-called - // subdomain_id, as we - // also did in step-18. All we need - // to change is hence to perform the - // cell-related operations only on - // the process with the correct - // ID. The second difference is the - // way we calculate the maximum - // value. Before, we could simply - // have a double - // variable that we checked against - // on each quadrature point for each - // cell. Now, we have to be a bit - // more careful since each processor - // only operates on a subset of - // cells. What we do is to first let - // each processor calculate the - // maximum among its cells, and then - // do a global communication - // operation called - // MaxAll that searches - // for the maximum value among all - // the maximum values of the - // individual processors. MPI - // provides such a call, but it's - // even simpler to use the respective - // function of the MPI - // communicator object since that - // will do the right thing even if we - // work without MPI and on a single - // machine only. The call to - // MaxAll needs three - // arguments, namely the local - // maximum (input), a field for the - // global maximum (output), and an - // integer value one that says that - // we only work on one double. +// @sect4{The BoussinesqFlowProblem helper functions} +// +// Except two small details, this +// function is the very same as in +// step-31. The first detail is +// actually common to all functions +// that implement loop over all cells +// in the triangulation: When +// operating in %parallel, each +// processor only works on a chunk of +// cells. This chunk of cells is +// identified via a so-called +// subdomain_id, as we +// also did in step-18. All we need +// to change is hence to perform the +// cell-related operations only on +// the process with the correct +// ID. The second difference is the +// way we calculate the maximum +// value. Before, we could simply +// have a double +// variable that we checked against +// on each quadrature point for each +// cell. Now, we have to be a bit +// more careful since each processor +// only operates on a subset of +// cells. What we do is to first let +// each processor calculate the +// maximum among its cells, and then +// do a global communication +// operation called +// MaxAll that searches +// for the maximum value among all +// the maximum values of the +// individual processors. MPI +// provides such a call, but it's +// even simpler to use the respective +// function of the MPI +// communicator object since that +// will do the right thing even if we +// work without MPI and on a single +// machine only. The call to +// MaxAll needs three +// arguments, namely the local +// maximum (input), a field for the +// global maximum (output), and an +// integer value one that says that +// we only work on one double. template double BoussinesqFlowProblem::get_maximal_velocity () const { const QIterated quadrature_formula (QTrapez<1>(), - parameters.stokes_velocity_degree); + parameters.stokes_velocity_degree); const unsigned int n_q_points = quadrature_formula.size(); FEValues fe_values (mapping, stokes_fe, quadrature_formula, update_values); @@ -1352,25 +1352,25 @@ double BoussinesqFlowProblem::get_maximal_velocity () const double max_local_velocity = 0; typename DoFHandler::active_cell_iterator - cell = stokes_dof_handler.begin_active(), - endc = stokes_dof_handler.end(); + cell = stokes_dof_handler.begin_active(), + endc = stokes_dof_handler.end(); for (; cell!=endc; ++cell) if (cell->subdomain_id() == - Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) + Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) { - fe_values.reinit (cell); - fe_values[velocities].get_function_values (stokes_solution, - velocity_values); + fe_values.reinit (cell); + fe_values[velocities].get_function_values (stokes_solution, + velocity_values); - for (unsigned int q=0; q::get_maximal_velocity () const - // Similar function to before, but we now - // compute the cfl number, i.e., maximal - // velocity on a cell divided by the cell - // diameter +// Similar function to before, but we now +// compute the cfl number, i.e., maximal +// velocity on a cell divided by the cell +// diameter template double BoussinesqFlowProblem::get_cfl_number () const { const QIterated quadrature_formula (QTrapez<1>(), - parameters.stokes_velocity_degree); + parameters.stokes_velocity_degree); const unsigned int n_q_points = quadrature_formula.size(); FEValues fe_values (mapping, stokes_fe, quadrature_formula, update_values); @@ -1399,28 +1399,28 @@ double BoussinesqFlowProblem::get_cfl_number () const double max_local_cfl = 0; typename DoFHandler::active_cell_iterator - cell = stokes_dof_handler.begin_active(), - endc = stokes_dof_handler.end(); + cell = stokes_dof_handler.begin_active(), + endc = stokes_dof_handler.end(); for (; cell!=endc; ++cell) if (cell->subdomain_id() == - Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) + Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) { - fe_values.reinit (cell); - fe_values[velocities].get_function_values (stokes_solution, - velocity_values); - - double max_local_velocity = 1e-10; - for (unsigned int q=0; qdiameter()); + fe_values.reinit (cell); + fe_values[velocities].get_function_values (stokes_solution, + velocity_values); + + double max_local_velocity = 1e-10; + for (unsigned int q=0; qdiameter()); } double max_cfl_number = 0.; #ifdef DEAL_II_COMPILER_SUPPORTS_MPI MPI_Allreduce (&max_local_cfl, &max_cfl_number, 1, MPI_DOUBLE, - MPI_MAX, MPI_COMM_WORLD); + MPI_MAX, MPI_COMM_WORLD); #else max_cfl_number = max_local_cfl; #endif @@ -1434,13 +1434,13 @@ template double BoussinesqFlowProblem::get_entropy_variation (const double average_temperature) const { - // only do this if we really need entropy - // variation + // only do this if we really need entropy + // variation if (parameters.stabilization_alpha != 2) return 1.; - // record maximal entropy on Gauss quadrature - // points + // record maximal entropy on Gauss quadrature + // points const QGauss quadrature_formula (parameters.temperature_degree+1); const unsigned int n_q_points = quadrature_formula.size(); @@ -1450,42 +1450,42 @@ BoussinesqFlowProblem::get_entropy_variation (const double average_temperat std::vector old_old_temperature_values(n_q_points); double min_entropy = std::numeric_limits::max(), - max_entropy = -std::numeric_limits::max(), - area = 0, - entropy_integrated = 0; + max_entropy = -std::numeric_limits::max(), + area = 0, + entropy_integrated = 0; typename DoFHandler::active_cell_iterator - cell = temperature_dof_handler.begin_active(), - endc = temperature_dof_handler.end(); + cell = temperature_dof_handler.begin_active(), + endc = temperature_dof_handler.end(); for (; cell!=endc; ++cell) if (cell->subdomain_id() == - Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) + Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) { - fe_values.reinit (cell); - fe_values.get_function_values (old_temperature_solution, - old_temperature_values); - fe_values.get_function_values (old_old_temperature_solution, - old_old_temperature_values); - for (unsigned int q=0; q::get_entropy_variation (const double average_temperat local_for_max[1] = max_entropy; #ifdef DEAL_II_COMPILER_SUPPORTS_MPI MPI_Allreduce (&local_for_sum[0], &global_for_sum[0], 2, MPI_DOUBLE, - MPI_SUM, MPI_COMM_WORLD); + MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce (&local_for_max[0], &global_for_max[0], 2, MPI_DOUBLE, - MPI_MAX, MPI_COMM_WORLD); + MPI_MAX, MPI_COMM_WORLD); #else global_for_sum[0] = local_for_sum[0]; global_for_sum[1] = local_for_sum[1]; @@ -1505,27 +1505,27 @@ BoussinesqFlowProblem::get_entropy_variation (const double average_temperat #endif const double average_entropy = global_for_sum[0] / global_for_sum[1]; const double entropy_diff = std::max(global_for_max[1] - average_entropy, - average_entropy - (-global_for_max[0])); + average_entropy - (-global_for_max[0])); return entropy_diff; } - // Again, this is only a slightly - // modified version of the respective - // function in step-31. What is new is - // that each processor works on its - // partition of cells, and gets a minimum - // and maximum temperature on that - // partition. Two global communication - // steps synchronize the data among the - // processors. +// Again, this is only a slightly +// modified version of the respective +// function in step-31. What is new is +// that each processor works on its +// partition of cells, and gets a minimum +// and maximum temperature on that +// partition. Two global communication +// steps synchronize the data among the +// processors. template std::pair BoussinesqFlowProblem::get_extrapolated_temperature_range () const { const QIterated quadrature_formula (QTrapez<1>(), - parameters.temperature_degree); + parameters.temperature_degree); const unsigned int n_q_points = quadrature_formula.size(); FEValues fe_values (mapping, temperature_fe, quadrature_formula, @@ -1533,74 +1533,74 @@ BoussinesqFlowProblem::get_extrapolated_temperature_range () const std::vector old_temperature_values(n_q_points); std::vector old_old_temperature_values(n_q_points); - // This presets the minimum with a bigger - // and the maximum with a smaller number - // than one that is going to appear. Will - // be overwritten in the cell loop or in - // the communication step at the - // latest. + // This presets the minimum with a bigger + // and the maximum with a smaller number + // than one that is going to appear. Will + // be overwritten in the cell loop or in + // the communication step at the + // latest. double min_local_temperature = std::numeric_limits::max(), - max_local_temperature = -std::numeric_limits::max(); + max_local_temperature = -std::numeric_limits::max(); if (timestep_number != 0) { typename DoFHandler::active_cell_iterator - cell = temperature_dof_handler.begin_active(), - endc = temperature_dof_handler.end(); + cell = temperature_dof_handler.begin_active(), + endc = temperature_dof_handler.end(); for (; cell!=endc; ++cell) - if (cell->subdomain_id() == - Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) - { - fe_values.reinit (cell); - fe_values.get_function_values (old_temperature_solution, - old_temperature_values); - fe_values.get_function_values (old_old_temperature_solution, - old_old_temperature_values); - - for (unsigned int q=0; qsubdomain_id() == + Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) + { + fe_values.reinit (cell); + fe_values.get_function_values (old_temperature_solution, + old_temperature_values); + fe_values.get_function_values (old_old_temperature_solution, + old_old_temperature_values); + + for (unsigned int q=0; q::active_cell_iterator - cell = temperature_dof_handler.begin_active(), - endc = temperature_dof_handler.end(); + cell = temperature_dof_handler.begin_active(), + endc = temperature_dof_handler.end(); for (; cell!=endc; ++cell) - if (cell->subdomain_id() == - Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) - { - fe_values.reinit (cell); - fe_values.get_function_values (old_temperature_solution, - old_temperature_values); - - for (unsigned int q=0; qsubdomain_id() == + Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) + { + fe_values.reinit (cell); + fe_values.get_function_values (old_temperature_solution, + old_temperature_values); + + for (unsigned int q=0; q::get_extrapolated_temperature_range () const - // The function that calculates the - // viscosity is purely local, so this is - // the same code as in step-31. +// The function that calculates the +// viscosity is purely local, so this is +// the same code as in step-31. template double BoussinesqFlowProblem:: compute_viscosity (const std::vector &old_temperature, - const std::vector &old_old_temperature, - const std::vector > &old_temperature_grads, - const std::vector > &old_old_temperature_grads, - const std::vector &old_temperature_laplacians, - const std::vector &old_old_temperature_laplacians, - const std::vector > &old_velocity_values, - const std::vector > &old_old_velocity_values, - const std::vector > &old_strain_rates, - const std::vector > &old_old_strain_rates, - const double global_u_infty, - const double global_T_variation, - const double average_temperature, - const double global_entropy_variation, - const double cell_diameter) const + const std::vector &old_old_temperature, + const std::vector > &old_temperature_grads, + const std::vector > &old_old_temperature_grads, + const std::vector &old_temperature_laplacians, + const std::vector &old_old_temperature_laplacians, + const std::vector > &old_velocity_values, + const std::vector > &old_old_velocity_values, + const std::vector > &old_strain_rates, + const std::vector > &old_old_strain_rates, + const double global_u_infty, + const double global_T_variation, + const double average_temperature, + const double global_entropy_variation, + const double cell_diameter) const { if (global_u_infty == 0) return 5e-3 * cell_diameter; @@ -1644,37 +1644,37 @@ compute_viscosity (const std::vector &old_temperature, for (unsigned int q=0; q < n_q_points; ++q) { const Tensor<1,dim> u = (old_velocity_values[q] + - old_old_velocity_values[q]) / 2; + old_old_velocity_values[q]) / 2; const SymmetricTensor<2,dim> strain_rate = (old_strain_rates[q] + - old_old_strain_rates[q]) / 2; + old_old_strain_rates[q]) / 2; const double T = (old_temperature[q] + old_old_temperature[q]) / 2; const double dT_dt = (old_temperature[q] - old_old_temperature[q]) - / old_time_step; + / old_time_step; const double u_grad_T = u * (old_temperature_grads[q] + - old_old_temperature_grads[q]) / 2; + old_old_temperature_grads[q]) / 2; const double kappa_Delta_T = EquationData::kappa - * (old_temperature_laplacians[q] + - old_old_temperature_laplacians[q]) / 2; + * (old_temperature_laplacians[q] + + old_old_temperature_laplacians[q]) / 2; const double gamma - = ((EquationData::radiogenic_heating * EquationData::density(T) - + - 2 * EquationData::eta * strain_rate * strain_rate) / - (EquationData::density(T) * EquationData::specific_heat)); + = ((EquationData::radiogenic_heating * EquationData::density(T) + + + 2 * EquationData::eta * strain_rate * strain_rate) / + (EquationData::density(T) * EquationData::specific_heat)); double residual - = std::abs(dT_dt + u_grad_T - kappa_Delta_T - gamma); + = std::abs(dT_dt + u_grad_T - kappa_Delta_T - gamma); if (parameters.stabilization_alpha == 2) - residual *= std::abs(T - average_temperature); + residual *= std::abs(T - average_temperature); max_residual = std::max (residual, max_residual); max_velocity = std::max (std::sqrt (u*u), max_velocity); } const double max_viscosity = (parameters.stabilization_beta * - max_velocity * cell_diameter); + max_velocity * cell_diameter); if (timestep_number == 0) return max_viscosity; else @@ -1683,15 +1683,15 @@ compute_viscosity (const std::vector &old_temperature, double entropy_viscosity; if (parameters.stabilization_alpha == 2) - entropy_viscosity = (parameters.stabilization_c_R * - cell_diameter * cell_diameter * - max_residual / - global_entropy_variation); + entropy_viscosity = (parameters.stabilization_c_R * + cell_diameter * cell_diameter * + max_residual / + global_entropy_variation); else - entropy_viscosity = (parameters.stabilization_c_R * - cell_diameter * global_Omega_diameter * - max_velocity * max_residual / - (global_u_infty * global_T_variation)); + entropy_viscosity = (parameters.stabilization_c_R * + cell_diameter * global_Omega_diameter * + max_velocity * max_residual / + (global_u_infty * global_T_variation)); return std::min (max_viscosity, entropy_viscosity); } @@ -1699,90 +1699,90 @@ compute_viscosity (const std::vector &old_temperature, - // This function is new compared to - // step-31. What is does is to re-implement - // the library function - // VectorTools::project() for - // an MPI-based parallelization, a function - // we used for generating an initial vector - // for temperature based on some initial - // function. The library function only - // works with shared memory but doesn't - // know how to utilize multiple machines - // coupled through MPI to compute the - // projected solution. If run with - // more than one MPI process, this would - // mean that each processor projects the - // whole field, which is clearly not very - // efficient. The details of a - // project() function are not - // very difficult. All we do is to use a - // mass matrix and put the evaluation of - // the initial value function on the right - // hand side. The mass matrix for - // temperature we can simply generate using - // the respective assembly function, so all - // we need to do here is to create the - // right hand side and do a CG solve. The - // assembly function does a loop over all - // cells and evaluates the function in the - // EquationData namespace, and - // does this only on cells pertaining to - // the respective processor. The - // implementation of this assembly differs - // from the assembly we do for the - // principal assembly functions further - // down (which include thread-based - // parallelization with the WorkStream - // concept). Here we chose to keep things - // simple (keeping in mind that this function - // is also only called at the beginning of - // the program, not every time step), and - // generating that right hand - // side is cheap anyway so we won't even - // notice that this part is not parallized - // by threads. - // - // Regarding the implementation of - // inhomogeneous Dirichlet boundary - // conditions: Since we use the temperature - // ConstraintMatrix, we can apply the - // boundary conditions directly when - // building the respective matrix and right - // hand side. In this case, the boundary - // conditions are inhomogeneous, which - // makes this procedure somewhat - // tricky. Remember that we get the matrix - // from some other function. However, the - // correct imposition of boundary - // conditions needs the matrix data we work - // on plus the right hand side - // simultaneously, since the right hand - // side is created by Gaussian elimination - // on the matrix rows. In order to not - // introduce the matrix assembly at this - // place, but still having the matrix data - // available, we choose to create a dummy - // matrix matrix_for_bc that - // we only fill with data when we need it - // for imposing boundary conditions. These - // positions are exactly those where we - // have an inhomogeneous entry in the - // ConstraintMatrix. There are only a few - // such positions (on the boundary dofs), - // so it is still much cheaper to use this - // function than to create the full matrix - // here. To implement this, we ask the - // constraint matrix whether the dof under - // consideration is inhomogeneously - // constraint. In that case, we generate - // the respective matrix column that we - // need for creating the correct right hand - // side. Note that this (manually - // generated) matrix entry needs to be - // exactly the entry that we would fill the - // matrix with — otherwise, this will - // not work. +// This function is new compared to +// step-31. What is does is to re-implement +// the library function +// VectorTools::project() for +// an MPI-based parallelization, a function +// we used for generating an initial vector +// for temperature based on some initial +// function. The library function only +// works with shared memory but doesn't +// know how to utilize multiple machines +// coupled through MPI to compute the +// projected solution. If run with +// more than one MPI process, this would +// mean that each processor projects the +// whole field, which is clearly not very +// efficient. The details of a +// project() function are not +// very difficult. All we do is to use a +// mass matrix and put the evaluation of +// the initial value function on the right +// hand side. The mass matrix for +// temperature we can simply generate using +// the respective assembly function, so all +// we need to do here is to create the +// right hand side and do a CG solve. The +// assembly function does a loop over all +// cells and evaluates the function in the +// EquationData namespace, and +// does this only on cells pertaining to +// the respective processor. The +// implementation of this assembly differs +// from the assembly we do for the +// principal assembly functions further +// down (which include thread-based +// parallelization with the WorkStream +// concept). Here we chose to keep things +// simple (keeping in mind that this function +// is also only called at the beginning of +// the program, not every time step), and +// generating that right hand +// side is cheap anyway so we won't even +// notice that this part is not parallized +// by threads. +// +// Regarding the implementation of +// inhomogeneous Dirichlet boundary +// conditions: Since we use the temperature +// ConstraintMatrix, we can apply the +// boundary conditions directly when +// building the respective matrix and right +// hand side. In this case, the boundary +// conditions are inhomogeneous, which +// makes this procedure somewhat +// tricky. Remember that we get the matrix +// from some other function. However, the +// correct imposition of boundary +// conditions needs the matrix data we work +// on plus the right hand side +// simultaneously, since the right hand +// side is created by Gaussian elimination +// on the matrix rows. In order to not +// introduce the matrix assembly at this +// place, but still having the matrix data +// available, we choose to create a dummy +// matrix matrix_for_bc that +// we only fill with data when we need it +// for imposing boundary conditions. These +// positions are exactly those where we +// have an inhomogeneous entry in the +// ConstraintMatrix. There are only a few +// such positions (on the boundary dofs), +// so it is still much cheaper to use this +// function than to create the full matrix +// here. To implement this, we ask the +// constraint matrix whether the dof under +// consideration is inhomogeneously +// constraint. In that case, we generate +// the respective matrix column that we +// need for creating the correct right hand +// side. Note that this (manually +// generated) matrix entry needs to be +// exactly the entry that we would fill the +// matrix with — otherwise, this will +// not work. template void BoussinesqFlowProblem::project_temperature_field () { @@ -1790,58 +1790,58 @@ void BoussinesqFlowProblem::project_temperature_field () QGauss quadrature(parameters.temperature_degree+2); UpdateFlags update_flags = UpdateFlags(update_values | - update_quadrature_points | - update_JxW_values); + update_quadrature_points | + update_JxW_values); FEValues fe_values (mapping, temperature_fe, quadrature, update_flags); const unsigned int dofs_per_cell = fe_values.dofs_per_cell, - n_q_points = fe_values.n_quadrature_points; + n_q_points = fe_values.n_quadrature_points; std::vector local_dof_indices (dofs_per_cell); Vector cell_vector (dofs_per_cell); FullMatrix matrix_for_bc (dofs_per_cell, dofs_per_cell); typename DoFHandler::active_cell_iterator - cell = temperature_dof_handler.begin_active(), - endc = temperature_dof_handler.end(); + cell = temperature_dof_handler.begin_active(), + endc = temperature_dof_handler.end(); std::vector rhs_values(n_q_points); TrilinosWrappers::MPI::Vector - rhs (temperature_mass_matrix.row_partitioner()), - solution (temperature_mass_matrix.row_partitioner()); + rhs (temperature_mass_matrix.row_partitioner()), + solution (temperature_mass_matrix.row_partitioner()); for (; cell!=endc; ++cell) if (cell->subdomain_id() == - Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) + Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) { - cell->get_dof_indices (local_dof_indices); - fe_values.reinit(cell); - - EquationData::TemperatureInitialValues().value_list - (fe_values.get_quadrature_points(), rhs_values); - - cell_vector = 0; - matrix_for_bc = 0; - for (unsigned int point=0; pointget_dof_indices (local_dof_indices); + fe_values.reinit(cell); + + EquationData::TemperatureInitialValues().value_list + (fe_values.get_quadrature_points(), rhs_values); + + cell_vector = 0; + matrix_for_bc = 0; + for (unsigned int point=0; point::project_temperature_field () // old_temperature_solution = solution; old_temperature_solution.reinit(solution, false, true); - // this is good enough: no need to set - // current temperature since we need this - // field only for computing the next stokes - // system, which depends on the temperature - // of the *previous* time step + // this is good enough: no need to set + // current temperature since we need this + // field only for computing the next stokes + // system, which depends on the temperature + // of the *previous* time step } - // @sect4{The BoussinesqFlowProblem setup functions} - - // The following three functions set - // up the Stokes matrix, the matrix - // used for the Stokes - // preconditioner, and the - // temperature matrix. The code is - // mostly the same as in step-31, but - // it has been broken out into three - // functions of their own for - // simplicity, but also so that they - // can easily be run in %parallel on - // multiple threads (unless we are - // running with MPI, in which case - // this is not possible, as explained - // in the introduction). - // - // The main functional difference - // between the code here and that in - // step-31 is that the matrices we - // want to set up are distributed - // across multiple processors. Since - // we still want to build up the - // sparsity pattern first for - // efficiency reasons, we could - // continue to build the - // entire sparsity pattern as - // a - // BlockCompressedSimpleSparsityPattern, - // as we did in step-31. However, - // that would be inefficient: every - // processor would build the same - // sparsity pattern, but only - // initialize a small part of the - // matrix using it. - // - // Rather, we use an object of type - // TrilinosWrappers::BlockSparsityPattern, - // which is (obviously) a wrapper - // around a sparsity pattern object - // provided by Trilinos. The - // advantage is that the Trilinos - // sparsity pattern class can - // communicate across multiple - // processors: if this processor - // fills in all the nonzero entries - // that result from the cells it - // owns, and every other processor - // does so as well, then at the end - // after some MPI communication - // initiated by the - // compress() call, we - // will have the globally assembled - // sparsity pattern available with - // which the global matrix can be - // initialized. - // - // The only other change we need to - // make is to tell the - // DoFTools::make_sparsity_pattern - // function that it is only supposed - // to work on a subset of cells, - // namely the ones whose - // subdomain_id equals - // the number of the current - // processor, and to ignore all other - // cells. - // - // This strategy is replicated across - // all three of the following - // functions. - // - // Note that Trilinos matrices store the - // information contained in the sparsity - // patterns, so we can safely release the - // sp variable once the matrix - // has been given the sparsity structure. +// @sect4{The BoussinesqFlowProblem setup functions} + +// The following three functions set +// up the Stokes matrix, the matrix +// used for the Stokes +// preconditioner, and the +// temperature matrix. The code is +// mostly the same as in step-31, but +// it has been broken out into three +// functions of their own for +// simplicity, but also so that they +// can easily be run in %parallel on +// multiple threads (unless we are +// running with MPI, in which case +// this is not possible, as explained +// in the introduction). +// +// The main functional difference +// between the code here and that in +// step-31 is that the matrices we +// want to set up are distributed +// across multiple processors. Since +// we still want to build up the +// sparsity pattern first for +// efficiency reasons, we could +// continue to build the +// entire sparsity pattern as +// a +// BlockCompressedSimpleSparsityPattern, +// as we did in step-31. However, +// that would be inefficient: every +// processor would build the same +// sparsity pattern, but only +// initialize a small part of the +// matrix using it. +// +// Rather, we use an object of type +// TrilinosWrappers::BlockSparsityPattern, +// which is (obviously) a wrapper +// around a sparsity pattern object +// provided by Trilinos. The +// advantage is that the Trilinos +// sparsity pattern class can +// communicate across multiple +// processors: if this processor +// fills in all the nonzero entries +// that result from the cells it +// owns, and every other processor +// does so as well, then at the end +// after some MPI communication +// initiated by the +// compress() call, we +// will have the globally assembled +// sparsity pattern available with +// which the global matrix can be +// initialized. +// +// The only other change we need to +// make is to tell the +// DoFTools::make_sparsity_pattern +// function that it is only supposed +// to work on a subset of cells, +// namely the ones whose +// subdomain_id equals +// the number of the current +// processor, and to ignore all other +// cells. +// +// This strategy is replicated across +// all three of the following +// functions. +// +// Note that Trilinos matrices store the +// information contained in the sparsity +// patterns, so we can safely release the +// sp variable once the matrix +// has been given the sparsity structure. template void BoussinesqFlowProblem:: - setup_stokes_matrix (const std::vector &stokes_partitioning) +setup_stokes_matrix (const std::vector &stokes_partitioning) { stokes_matrix.clear (); TrilinosWrappers::BlockSparsityPattern sp (stokes_partitioning, - MPI_COMM_WORLD); + MPI_COMM_WORLD); Table<2,DoFTools::Coupling> coupling (dim+1, dim+1); for (unsigned int c=0; c:: template void BoussinesqFlowProblem:: - setup_stokes_preconditioner (const std::vector &stokes_partitioning) +setup_stokes_preconditioner (const std::vector &stokes_partitioning) { Amg_preconditioner.reset (); Mp_preconditioner.reset (); @@ -1985,21 +1985,21 @@ void BoussinesqFlowProblem:: stokes_preconditioner_matrix.clear (); TrilinosWrappers::BlockSparsityPattern sp (stokes_partitioning, - MPI_COMM_WORLD); + MPI_COMM_WORLD); Table<2,DoFTools::Coupling> coupling (dim+1, dim+1); for (unsigned int c=0; c:: template void BoussinesqFlowProblem:: - setup_temperature_matrices (const IndexSet &temperature_partitioner) +setup_temperature_matrices (const IndexSet &temperature_partitioner) { T_preconditioner.reset (); temperature_mass_matrix.clear (); @@ -2016,11 +2016,11 @@ void BoussinesqFlowProblem:: temperature_matrix.clear (); TrilinosWrappers::SparsityPattern sp (temperature_partitioner, - MPI_COMM_WORLD); + MPI_COMM_WORLD); DoFTools::make_sparsity_pattern (temperature_dof_handler, sp, - temperature_constraints, false, - Utilities::System:: - get_this_mpi_process(MPI_COMM_WORLD)); + temperature_constraints, false, + Utilities::System:: + get_this_mpi_process(MPI_COMM_WORLD)); sp.compress(); temperature_matrix.reinit (sp); @@ -2030,84 +2030,84 @@ void BoussinesqFlowProblem:: - // The remainder of the setup function - // (after splitting out the three functions - // above) mostly has to deal with the - // things we need to do for parallelization - // across processors. In particular, at the - // top it calls - // GridTools::partition_triangulation to - // subdivide all cells into subdomains of - // roughly equal size and roughly minimal - // interface length (using METIS). We then - // distribute degrees of freedom for Stokes - // and temperature DoFHandler objects, and - // re-sort them in such a way that all - // degrees of freedom associated with - // subdomain zero come before all those - // associated with subdomain one, etc. For - // the Stokes part, this entails, however, - // that velocities and pressures become - // intermixed, but this is trivially solved - // by sorting again by blocks; it is worth - // noting that this latter operation leaves - // the relative ordering of all velocities - // and pressures alone, i.e. within the - // velocity block we will still have all - // those associated with subdomain zero - // before all velocities associated with - // subdomain one, etc. This is important - // since we store each of the blocks of - // this matrix distributed across all - // processors and want this to be done in - // such a way that each processor stores - // that part of the matrix that is roughly - // equal to the degrees of freedom located - // on those cells that it will actually - // work on. Note how we set boundary - // conditions on the temperature by using - // the ConstraintMatrix object. - // - // After this, we have to set up the - // various partitioners (of type - // IndexSet, see the - // introduction) that describe which parts - // of each matrix or vector will be stored - // where, then call the functions that - // actually set up the matrices - // (concurrently if not using MPI - // but sequentially otherwise, as explained - // in the introduction), and at the end also - // resize the various vectors we keep - // around in this program. We given those - // vectors the correct size using the - // aforementioned Epetra_Map. Most of the - // vectors are actually localized, i.e., - // they store all dofs in the problem on - // each processor. In that case, the only - // information that is used is the global - // size. This is different for the two - // right hand side vectors, which are - // distributed ones, see also the class - // declaration. - // - // Note how this function enters and leaves - // a timed section so that we can get a - // time report at the end of the - // program. Note also the use of the - // pcout variable: to every - // process it looks like we can write to - // screen, but only the output of the first - // processor actually ends up somewhere. We - // could of course have achieved the same - // effect by writing to - // std::cout but would then - // have had to guard every access to that - // stream by something like if - // (Utilities:: System:: - // get_this_mpi_process - // (MPI_COMM_WORLD) == 0), - // hardly a pretty solution. +// The remainder of the setup function +// (after splitting out the three functions +// above) mostly has to deal with the +// things we need to do for parallelization +// across processors. In particular, at the +// top it calls +// GridTools::partition_triangulation to +// subdivide all cells into subdomains of +// roughly equal size and roughly minimal +// interface length (using METIS). We then +// distribute degrees of freedom for Stokes +// and temperature DoFHandler objects, and +// re-sort them in such a way that all +// degrees of freedom associated with +// subdomain zero come before all those +// associated with subdomain one, etc. For +// the Stokes part, this entails, however, +// that velocities and pressures become +// intermixed, but this is trivially solved +// by sorting again by blocks; it is worth +// noting that this latter operation leaves +// the relative ordering of all velocities +// and pressures alone, i.e. within the +// velocity block we will still have all +// those associated with subdomain zero +// before all velocities associated with +// subdomain one, etc. This is important +// since we store each of the blocks of +// this matrix distributed across all +// processors and want this to be done in +// such a way that each processor stores +// that part of the matrix that is roughly +// equal to the degrees of freedom located +// on those cells that it will actually +// work on. Note how we set boundary +// conditions on the temperature by using +// the ConstraintMatrix object. +// +// After this, we have to set up the +// various partitioners (of type +// IndexSet, see the +// introduction) that describe which parts +// of each matrix or vector will be stored +// where, then call the functions that +// actually set up the matrices +// (concurrently if not using MPI +// but sequentially otherwise, as explained +// in the introduction), and at the end also +// resize the various vectors we keep +// around in this program. We given those +// vectors the correct size using the +// aforementioned Epetra_Map. Most of the +// vectors are actually localized, i.e., +// they store all dofs in the problem on +// each processor. In that case, the only +// information that is used is the global +// size. This is different for the two +// right hand side vectors, which are +// distributed ones, see also the class +// declaration. +// +// Note how this function enters and leaves +// a timed section so that we can get a +// time report at the end of the +// program. Note also the use of the +// pcout variable: to every +// process it looks like we can write to +// screen, but only the output of the first +// processor actually ends up somewhere. We +// could of course have achieved the same +// effect by writing to +// std::cout but would then +// have had to guard every access to that +// stream by something like if +// (Utilities:: System:: +// get_this_mpi_process +// (MPI_COMM_WORLD) == 0), +// hardly a pretty solution. template void BoussinesqFlowProblem::setup_dofs () { @@ -2122,28 +2122,28 @@ void BoussinesqFlowProblem::setup_dofs () std::vector stokes_dofs_per_block (2); DoFTools::count_dofs_per_block (stokes_dof_handler, stokes_dofs_per_block, - stokes_sub_blocks); + stokes_sub_blocks); const unsigned int n_u = stokes_dofs_per_block[0], n_p = stokes_dofs_per_block[1], - n_T = temperature_dof_handler.n_dofs(); + n_T = temperature_dof_handler.n_dofs(); - // print dof numbers with 1000s - // separator since they are frequently - // large + // print dof numbers with 1000s + // separator since they are frequently + // large std::locale s = pcout.get_stream().getloc(); pcout.get_stream().imbue(std::locale("")); pcout << "Number of active cells: " - << triangulation.n_global_active_cells() - << " (on " - << triangulation.n_levels() - << " levels)" - << std::endl - << "Number of degrees of freedom: " - << n_u + n_p + n_T - << " (" << n_u << '+' << n_p << '+'<< n_T <<')' - << std::endl - << std::endl; + << triangulation.n_global_active_cells() + << " (on " + << triangulation.n_levels() + << " levels)" + << std::endl + << "Number of degrees of freedom: " + << n_u + n_p + n_T + << " (" << n_u << '+' << n_p << '+'<< n_T <<')' + << std::endl + << std::endl; pcout.get_stream().imbue(s); @@ -2159,13 +2159,13 @@ void BoussinesqFlowProblem::setup_dofs () stokes_partitioning.push_back(stokes_index_set.get_view(n_u,n_u+n_p)); DoFTools::extract_locally_relevant_dofs (stokes_dof_handler, - stokes_relevant_set); + stokes_relevant_set); stokes_relevant_partitioning.push_back(stokes_relevant_set.get_view(0,n_u)); stokes_relevant_partitioning.push_back(stokes_relevant_set.get_view(n_u,n_u+n_p)); temperature_partitioning = temperature_dof_handler.locally_owned_dofs(); DoFTools::extract_locally_relevant_dofs (temperature_dof_handler, - temperature_relevant_partitioning); + temperature_relevant_partitioning); } { @@ -2173,26 +2173,26 @@ void BoussinesqFlowProblem::setup_dofs () stokes_constraints.clear (); // IndexSet stokes_la; // DoFTools::extract_locally_active_dofs (stokes_dof_handler, -// stokes_la); +// stokes_la); stokes_constraints.reinit(stokes_relevant_set); DoFTools::make_hanging_node_constraints (stokes_dof_handler, - stokes_constraints); + stokes_constraints); std::vector velocity_mask (dim+1, true); velocity_mask[dim] = false; VectorTools::interpolate_boundary_values (stokes_dof_handler, - 0, - ZeroFunction(dim+1), - stokes_constraints, - velocity_mask); + 0, + ZeroFunction(dim+1), + stokes_constraints, + velocity_mask); std::set no_normal_flux_boundaries; no_normal_flux_boundaries.insert (1); VectorTools::compute_no_normal_flux_constraints (stokes_dof_handler, 0, - no_normal_flux_boundaries, - stokes_constraints, - mapping); + no_normal_flux_boundaries, + stokes_constraints, + mapping); stokes_constraints.close (); } { @@ -2200,15 +2200,15 @@ void BoussinesqFlowProblem::setup_dofs () temperature_constraints.reinit(temperature_relevant_partitioning);//temp_locally_active); DoFTools::make_hanging_node_constraints (temperature_dof_handler, - temperature_constraints); + temperature_constraints); VectorTools::interpolate_boundary_values (temperature_dof_handler, - 0, - EquationData::TemperatureInitialValues(), - temperature_constraints); + 0, + EquationData::TemperatureInitialValues(), + temperature_constraints); VectorTools::interpolate_boundary_values (temperature_dof_handler, - 1, - EquationData::TemperatureInitialValues(), - temperature_constraints); + 1, + EquationData::TemperatureInitialValues(), + temperature_constraints); temperature_constraints.close (); } @@ -2216,14 +2216,14 @@ void BoussinesqFlowProblem::setup_dofs () { Threads::TaskGroup<> tasks; tasks += Threads::new_task (&BoussinesqFlowProblem::setup_stokes_matrix, - *this, - stokes_partitioning); + *this, + stokes_partitioning); tasks += Threads::new_task (&BoussinesqFlowProblem::setup_stokes_preconditioner, - *this, - stokes_partitioning); + *this, + stokes_partitioning); tasks += Threads::new_task (&BoussinesqFlowProblem::setup_temperature_matrices, - *this, - temperature_partitioning); + *this, + temperature_partitioning); tasks.join_all (); } else @@ -2252,63 +2252,63 @@ void BoussinesqFlowProblem::setup_dofs () - // @sect4{The BoussinesqFlowProblem assembly functions} - // - // Following the discussion in the - // introduction and in the @ref threads - // module, we split the assembly functions - // into different parts: - // - //
    - //
  • The local calculations of matrices - // and right hand sides, given a certain cell - // as input (these functions are named - // local_assemble_* below). The - // resulting function is, in other words, - // essentially the body of the loop over all - // cells in step-31. Note, however, that - // these functions store the result from the - // local calculations in variables of classes - // from the CopyData namespace. - // - //
  • These objects are then given to the - // second step which writes the local data - // into the global data structures (these - // functions are named - // copy_local_to_global_* - // below). These functions are pretty - // trivial. - // - //
  • These two subfunctions are then used - // in the respective assembly routine (called - // assemble_* below), where a - // WorkStream object is set up and runs over - // all the cells that belong to the - // processor's subdomain. - //
- - // @sect5{Stokes preconditioner assembly} - // - // Let us start with the functions that - // builds the Stokes preconditioner. The - // first two of these are pretty trivial, - // given the discussion above. Note in - // particular that the main point in using - // the scratch data object is that we want to - // avoid allocating any objects on the free - // space each time we visit a new cell. As a - // consequence, the assembly function below - // only has automatic local variables, and - // everything else is accessed through the - // scratch data object, which is allocated - // only once before we start the loop over - // all cells: +// @sect4{The BoussinesqFlowProblem assembly functions} +// +// Following the discussion in the +// introduction and in the @ref threads +// module, we split the assembly functions +// into different parts: +// +//
    +//
  • The local calculations of matrices +// and right hand sides, given a certain cell +// as input (these functions are named +// local_assemble_* below). The +// resulting function is, in other words, +// essentially the body of the loop over all +// cells in step-31. Note, however, that +// these functions store the result from the +// local calculations in variables of classes +// from the CopyData namespace. +// +//
  • These objects are then given to the +// second step which writes the local data +// into the global data structures (these +// functions are named +// copy_local_to_global_* +// below). These functions are pretty +// trivial. +// +//
  • These two subfunctions are then used +// in the respective assembly routine (called +// assemble_* below), where a +// WorkStream object is set up and runs over +// all the cells that belong to the +// processor's subdomain. +//
+ +// @sect5{Stokes preconditioner assembly} +// +// Let us start with the functions that +// builds the Stokes preconditioner. The +// first two of these are pretty trivial, +// given the discussion above. Note in +// particular that the main point in using +// the scratch data object is that we want to +// avoid allocating any objects on the free +// space each time we visit a new cell. As a +// consequence, the assembly function below +// only has automatic local variables, and +// everything else is accessed through the +// scratch data object, which is allocated +// only once before we start the loop over +// all cells: template void BoussinesqFlowProblem:: local_assemble_stokes_preconditioner (const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::StokesPreconditioner &scratch, - Assembly::CopyData::StokesPreconditioner &data) + Assembly::Scratch::StokesPreconditioner &scratch, + Assembly::CopyData::StokesPreconditioner &data) { const unsigned int dofs_per_cell = stokes_fe.dofs_per_cell; const unsigned int n_q_points = scratch.stokes_fe_values.n_quadrature_points; @@ -2324,25 +2324,25 @@ local_assemble_stokes_preconditioner (const typename DoFHandler::active_cel for (unsigned int q=0; q:: copy_local_to_global_stokes_preconditioner (const Assembly::CopyData::StokesPreconditioner &data) { stokes_constraints.distribute_local_to_global (data.local_matrix, - data.local_dof_indices, - stokes_preconditioner_matrix); + data.local_dof_indices, + stokes_preconditioner_matrix); } - // When we create the WorkStream, we modify - // the start and end iterator into a - // so-called SubdomainFilter - // that tells the individual processes which - // cells to work on. This is exactly the case - // discussed in the introduction. Note how we - // use the construct - // std_cxx1x::bind to create a - // function object that is compatible with - // the WorkStream class. It uses placeholders - // _1, std_cxx1x::_2, _3 for the local - // assembly function that specify cell, - // scratch data, and copy data, as well as - // the placeholder _1 for the - // copy function that expects the data to be - // written into the global matrix. On the - // other hand, the implicit zeroth argument - // of member functions (namely the - // this pointer of the object on - // which that member function is to operate - // on) is bound to the - // this pointer of the current - // function. The WorkStream class, as a - // consequence, does not need to know - // anything about the object these functions - // work on. - // - // When the - // WorkStream is executed, it will create - // several local assembly routines of the - // first kind for several cells and let - // some available processors work on - // them. The function that needs to be - // synchronized, i.e., the write operation - // into the global matrix, however, is - // executed by only one thread at a time in - // the prescribed order. Of course, this - // only holds for the parallelization on a - // single MPI process. Different MPI - // processes will have their own WorkStream - // objects and do that work completely - // independently. In a distributed - // calculation, some data will accumulate - // at degrees of freedom that are not owned - // by the respective processor. It would be - // inefficient to send data around every - // time we encounter such a dof. What - // happens instead is that the Trilinos - // sparse matrix will keep that data and - // send it to the owner at the end of - // assembly, by calling the - // compress() command. +// When we create the WorkStream, we modify +// the start and end iterator into a +// so-called SubdomainFilter +// that tells the individual processes which +// cells to work on. This is exactly the case +// discussed in the introduction. Note how we +// use the construct +// std_cxx1x::bind to create a +// function object that is compatible with +// the WorkStream class. It uses placeholders +// _1, std_cxx1x::_2, _3 for the local +// assembly function that specify cell, +// scratch data, and copy data, as well as +// the placeholder _1 for the +// copy function that expects the data to be +// written into the global matrix. On the +// other hand, the implicit zeroth argument +// of member functions (namely the +// this pointer of the object on +// which that member function is to operate +// on) is bound to the +// this pointer of the current +// function. The WorkStream class, as a +// consequence, does not need to know +// anything about the object these functions +// work on. +// +// When the +// WorkStream is executed, it will create +// several local assembly routines of the +// first kind for several cells and let +// some available processors work on +// them. The function that needs to be +// synchronized, i.e., the write operation +// into the global matrix, however, is +// executed by only one thread at a time in +// the prescribed order. Of course, this +// only holds for the parallelization on a +// single MPI process. Different MPI +// processes will have their own WorkStream +// objects and do that work completely +// independently. In a distributed +// calculation, some data will accumulate +// at degrees of freedom that are not owned +// by the respective processor. It would be +// inefficient to send data around every +// time we encounter such a dof. What +// happens instead is that the Trilinos +// sparse matrix will keep that data and +// send it to the owner at the end of +// assembly, by calling the +// compress() command. template void BoussinesqFlowProblem::assemble_stokes_preconditioner () @@ -2421,49 +2421,49 @@ BoussinesqFlowProblem::assemble_stokes_preconditioner () const QGauss quadrature_formula(parameters.stokes_velocity_degree+1); typedef - FilteredIterator::active_cell_iterator> - SubdomainFilter; + FilteredIterator::active_cell_iterator> + SubdomainFilter; WorkStream:: - run (SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), - stokes_dof_handler.begin_active()), - SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), - stokes_dof_handler.end()), - std_cxx1x::bind (&BoussinesqFlowProblem:: - local_assemble_stokes_preconditioner, - this, - std_cxx1x::_1, - std_cxx1x::_2, - std_cxx1x::_3), - std_cxx1x::bind (&BoussinesqFlowProblem:: - copy_local_to_global_stokes_preconditioner, - this, - std_cxx1x::_1), - Assembly::Scratch:: - StokesPreconditioner (stokes_fe, quadrature_formula, - mapping, - update_JxW_values | - update_values | - update_gradients), - Assembly::CopyData:: - StokesPreconditioner (stokes_fe)); + run (SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), + stokes_dof_handler.begin_active()), + SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), + stokes_dof_handler.end()), + std_cxx1x::bind (&BoussinesqFlowProblem:: + local_assemble_stokes_preconditioner, + this, + std_cxx1x::_1, + std_cxx1x::_2, + std_cxx1x::_3), + std_cxx1x::bind (&BoussinesqFlowProblem:: + copy_local_to_global_stokes_preconditioner, + this, + std_cxx1x::_1), + Assembly::Scratch:: + StokesPreconditioner (stokes_fe, quadrature_formula, + mapping, + update_JxW_values | + update_values | + update_gradients), + Assembly::CopyData:: + StokesPreconditioner (stokes_fe)); stokes_preconditioner_matrix.compress(); } - // The final function in this block initiates - // assemble of the Stokes preconditioner - // matrix and then builds the Stokes - // preconditioner. It is mostly the same as - // in the serial case. The only difference to - // step-31 is that we use an ILU - // preconditioner for the pressure mass - // matrix instead of IC, as discussed in the - // introduction. +// The final function in this block initiates +// assemble of the Stokes preconditioner +// matrix and then builds the Stokes +// preconditioner. It is mostly the same as +// in the serial case. The only difference to +// step-31 is that we use an ILU +// preconditioner for the pressure mass +// matrix instead of IC, as discussed in the +// introduction. template void BoussinesqFlowProblem::build_stokes_preconditioner () @@ -2480,12 +2480,12 @@ BoussinesqFlowProblem::build_stokes_preconditioner () std::vector velocity_components (dim+1,true); velocity_components[dim] = false; DoFTools::extract_constant_modes (stokes_dof_handler, velocity_components, - constant_modes); + constant_modes); Mp_preconditioner = std_cxx1x::shared_ptr - (new TrilinosWrappers::PreconditionILU()); + (new TrilinosWrappers::PreconditionILU()); Amg_preconditioner = std_cxx1x::shared_ptr - (new TrilinosWrappers::PreconditionAMG()); + (new TrilinosWrappers::PreconditionAMG()); TrilinosWrappers::PreconditionAMG::AdditionalData Amg_data; Amg_data.constant_modes = constant_modes; @@ -2496,41 +2496,41 @@ BoussinesqFlowProblem::build_stokes_preconditioner () Mp_preconditioner->initialize (stokes_preconditioner_matrix.block(1,1)); Amg_preconditioner->initialize (stokes_preconditioner_matrix.block(0,0), - Amg_data); + Amg_data); rebuild_stokes_preconditioner = false; - pcout << std::endl; - computing_timer.exit_section(); + pcout << std::endl; + computing_timer.exit_section(); } - // @sect5{Stokes system assembly} - - // The next three functions implement the - // assembly of the Stokes system, again - // split up into a part performing local - // calculations, one for writing the local - // data into the global matrix and vector, - // and one for actually running the loop - // over all cells with the help of the - // WorkStream class. Note that the assembly - // of the Stokes matrix needs only to be - // done in case we have changed the - // mesh. Otherwise, just the - // (temperature-dependent) right hand side - // needs to be calculated here. Since we - // are working with distributed matrices - // and vectors, we have to call the - // respective compress() - // functions in the end of the assembly in - // order to send non-local data to the - // owner process. +// @sect5{Stokes system assembly} + +// The next three functions implement the +// assembly of the Stokes system, again +// split up into a part performing local +// calculations, one for writing the local +// data into the global matrix and vector, +// and one for actually running the loop +// over all cells with the help of the +// WorkStream class. Note that the assembly +// of the Stokes matrix needs only to be +// done in case we have changed the +// mesh. Otherwise, just the +// (temperature-dependent) right hand side +// needs to be calculated here. Since we +// are working with distributed matrices +// and vectors, we have to call the +// respective compress() +// functions in the end of the assembly in +// order to send non-local data to the +// owner process. template void BoussinesqFlowProblem:: local_assemble_stokes_system (const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::StokesSystem &scratch, - Assembly::CopyData::StokesSystem &data) + Assembly::Scratch::StokesSystem &scratch, + Assembly::CopyData::StokesSystem &data) { const unsigned int dofs_per_cell = scratch.stokes_fe_values.get_fe().dofs_per_cell; const unsigned int n_q_points = scratch.stokes_fe_values.n_quadrature_points; @@ -2541,10 +2541,10 @@ local_assemble_stokes_system (const typename DoFHandler::active_cell_iterat scratch.stokes_fe_values.reinit (cell); typename DoFHandler::active_cell_iterator - temperature_cell (&triangulation, - cell->level(), - cell->index(), - &temperature_dof_handler); + temperature_cell (&triangulation, + cell->level(), + cell->index(), + &temperature_dof_handler); scratch.temperature_fe_values.reinit (temperature_cell); if (rebuild_stokes_matrix) @@ -2552,43 +2552,43 @@ local_assemble_stokes_system (const typename DoFHandler::active_cell_iterat data.local_rhs = 0; scratch.temperature_fe_values.get_function_values (old_temperature_solution, - scratch.old_temperature_values); + scratch.old_temperature_values); for (unsigned int q=0; q - gravity = EquationData::gravity_vector (scratch.stokes_fe_values - .quadrature_point(q)); + gravity = EquationData::gravity_vector (scratch.stokes_fe_values + .quadrature_point(q)); for (unsigned int i=0; iget_dof_indices (data.local_dof_indices); @@ -2603,14 +2603,14 @@ copy_local_to_global_stokes_system (const Assembly::CopyData::StokesSystem { if (rebuild_stokes_matrix == true) stokes_constraints.distribute_local_to_global (data.local_matrix, - data.local_rhs, - data.local_dof_indices, - stokes_matrix, - stokes_rhs); + data.local_rhs, + data.local_dof_indices, + stokes_matrix, + stokes_rhs); else stokes_constraints.distribute_local_to_global (data.local_rhs, - data.local_dof_indices, - stokes_rhs); + data.local_dof_indices, + stokes_rhs); } @@ -2628,40 +2628,40 @@ void BoussinesqFlowProblem::assemble_stokes_system () const QGauss quadrature_formula(parameters.stokes_velocity_degree+1); typedef - FilteredIterator::active_cell_iterator> - SubdomainFilter; + FilteredIterator::active_cell_iterator> + SubdomainFilter; WorkStream:: - run (SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), - stokes_dof_handler.begin_active()), - SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), - stokes_dof_handler.end()), - std_cxx1x::bind (&BoussinesqFlowProblem:: - local_assemble_stokes_system, - this, - std_cxx1x::_1, - std_cxx1x::_2, - std_cxx1x::_3), - std_cxx1x::bind (&BoussinesqFlowProblem:: - copy_local_to_global_stokes_system, - this, - std_cxx1x::_1), - Assembly::Scratch:: - StokesSystem (stokes_fe, mapping, quadrature_formula, - (update_values | - update_quadrature_points | - update_JxW_values | - (rebuild_stokes_matrix == true - ? - update_gradients - : - UpdateFlags(0))), - temperature_fe, - update_values), - Assembly::CopyData:: - StokesSystem (stokes_fe)); + run (SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), + stokes_dof_handler.begin_active()), + SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), + stokes_dof_handler.end()), + std_cxx1x::bind (&BoussinesqFlowProblem:: + local_assemble_stokes_system, + this, + std_cxx1x::_1, + std_cxx1x::_2, + std_cxx1x::_3), + std_cxx1x::bind (&BoussinesqFlowProblem:: + copy_local_to_global_stokes_system, + this, + std_cxx1x::_1), + Assembly::Scratch:: + StokesSystem (stokes_fe, mapping, quadrature_formula, + (update_values | + update_quadrature_points | + update_JxW_values | + (rebuild_stokes_matrix == true + ? + update_gradients + : + UpdateFlags(0))), + temperature_fe, + update_values), + Assembly::CopyData:: + StokesSystem (stokes_fe)); stokes_matrix.compress(); stokes_rhs.compress(Add); @@ -2673,26 +2673,26 @@ void BoussinesqFlowProblem::assemble_stokes_system () } - // @sect5{Temperature matrix assembly} - - // The task to be performed by the next three - // functions is to calculate a mass matrix - // and a Laplace matrix on the temperature - // system. These will be combined in order to - // yield the semi-implicit time stepping - // matrix that consists of the mass matrix - // plus a time step weight times the Laplace - // matrix. This function is again essentially - // the body of the loop over all cells from - // step-31. - // - // The two following functions perform - // similar services as the ones above. +// @sect5{Temperature matrix assembly} + +// The task to be performed by the next three +// functions is to calculate a mass matrix +// and a Laplace matrix on the temperature +// system. These will be combined in order to +// yield the semi-implicit time stepping +// matrix that consists of the mass matrix +// plus a time step weight times the Laplace +// matrix. This function is again essentially +// the body of the loop over all cells from +// step-31. +// +// The two following functions perform +// similar services as the ones above. template void BoussinesqFlowProblem:: local_assemble_temperature_matrix (const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::TemperatureMatrix &scratch, - Assembly::CopyData::TemperatureMatrix &data) + Assembly::Scratch::TemperatureMatrix &scratch, + Assembly::CopyData::TemperatureMatrix &data) { const unsigned int dofs_per_cell = scratch.temperature_fe_values.get_fe().dofs_per_cell; const unsigned int n_q_points = scratch.temperature_fe_values.n_quadrature_points; @@ -2706,23 +2706,23 @@ local_assemble_temperature_matrix (const typename DoFHandler::active_cell_i for (unsigned int q=0; q:: copy_local_to_global_temperature_matrix (const Assembly::CopyData::TemperatureMatrix &data) { temperature_constraints.distribute_local_to_global (data.local_mass_matrix, - data.local_dof_indices, - temperature_mass_matrix); + data.local_dof_indices, + temperature_mass_matrix); temperature_constraints.distribute_local_to_global (data.local_stiffness_matrix, - data.local_dof_indices, - temperature_stiffness_matrix); + data.local_dof_indices, + temperature_stiffness_matrix); } @@ -2755,30 +2755,30 @@ void BoussinesqFlowProblem::assemble_temperature_matrix () const QGauss quadrature_formula(parameters.temperature_degree+2); typedef - FilteredIterator::active_cell_iterator> - SubdomainFilter; + FilteredIterator::active_cell_iterator> + SubdomainFilter; WorkStream:: - run (SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), - temperature_dof_handler.begin_active()), - SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), - temperature_dof_handler.end()), - std_cxx1x::bind (&BoussinesqFlowProblem:: - local_assemble_temperature_matrix, - this, - std_cxx1x::_1, - std_cxx1x::_2, - std_cxx1x::_3), - std_cxx1x::bind (&BoussinesqFlowProblem:: - copy_local_to_global_temperature_matrix, - this, - std_cxx1x::_1), - Assembly::Scratch:: - TemperatureMatrix (temperature_fe, mapping, quadrature_formula), - Assembly::CopyData:: - TemperatureMatrix (temperature_fe)); + run (SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), + temperature_dof_handler.begin_active()), + SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), + temperature_dof_handler.end()), + std_cxx1x::bind (&BoussinesqFlowProblem:: + local_assemble_temperature_matrix, + this, + std_cxx1x::_1, + std_cxx1x::_2, + std_cxx1x::_3), + std_cxx1x::bind (&BoussinesqFlowProblem:: + copy_local_to_global_temperature_matrix, + this, + std_cxx1x::_1), + Assembly::Scratch:: + TemperatureMatrix (temperature_fe, mapping, quadrature_formula), + Assembly::CopyData:: + TemperatureMatrix (temperature_fe)); temperature_mass_matrix.compress(); temperature_stiffness_matrix.compress(); @@ -2790,38 +2790,38 @@ void BoussinesqFlowProblem::assemble_temperature_matrix () } - // @sect5{Temperature right hand side assembly} - - // This is the last assembly function. It - // calculates the right hand side of the - // temperature system, which includes the - // convection and the stabilization - // terms. It includes a lot of evaluations - // of old solutions at the quadrature - // points (which are necessary for - // calculating the artificial viscosity of - // stabilization), but is otherwise similar - // to the other assembly functions. Notice, - // once again, how we resolve the dilemma - // of having inhomogeneous boundary - // conditions, but just making a right hand - // side at this point (compare the comments - // for the project function): We create - // some matrix columns with exactly the - // values that would be entered for the - // temperature stiffness matrix, in case we - // have inhomogeneously constrained - // dofs. That will account for the correct - // balance of the right hand side vector - // with the matrix system of temperature. +// @sect5{Temperature right hand side assembly} + +// This is the last assembly function. It +// calculates the right hand side of the +// temperature system, which includes the +// convection and the stabilization +// terms. It includes a lot of evaluations +// of old solutions at the quadrature +// points (which are necessary for +// calculating the artificial viscosity of +// stabilization), but is otherwise similar +// to the other assembly functions. Notice, +// once again, how we resolve the dilemma +// of having inhomogeneous boundary +// conditions, but just making a right hand +// side at this point (compare the comments +// for the project function): We create +// some matrix columns with exactly the +// values that would be entered for the +// temperature stiffness matrix, in case we +// have inhomogeneously constrained +// dofs. That will account for the correct +// balance of the right hand side vector +// with the matrix system of temperature. template void BoussinesqFlowProblem:: local_assemble_temperature_rhs (const std::pair global_T_range, - const double global_max_velocity, - const double global_entropy_variation, - const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::TemperatureRHS &scratch, - Assembly::CopyData::TemperatureRHS &data) + const double global_max_velocity, + const double global_entropy_variation, + const typename DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::TemperatureRHS &scratch, + Assembly::CopyData::TemperatureRHS &data) { const bool use_bdf2_scheme = (timestep_number != 0); @@ -2837,133 +2837,133 @@ local_assemble_temperature_rhs (const std::pair global_T_range, scratch.temperature_fe_values.reinit (cell); typename DoFHandler::active_cell_iterator - stokes_cell (&triangulation, - cell->level(), - cell->index(), - &stokes_dof_handler); + stokes_cell (&triangulation, + cell->level(), + cell->index(), + &stokes_dof_handler); scratch.stokes_fe_values.reinit (stokes_cell); scratch.temperature_fe_values.get_function_values (old_temperature_solution, - scratch.old_temperature_values); + scratch.old_temperature_values); scratch.temperature_fe_values.get_function_values (old_old_temperature_solution, - scratch.old_old_temperature_values); + scratch.old_old_temperature_values); scratch.temperature_fe_values.get_function_gradients (old_temperature_solution, - scratch.old_temperature_grads); + scratch.old_temperature_grads); scratch.temperature_fe_values.get_function_gradients (old_old_temperature_solution, - scratch.old_old_temperature_grads); + scratch.old_old_temperature_grads); scratch.temperature_fe_values.get_function_laplacians (old_temperature_solution, - scratch.old_temperature_laplacians); + scratch.old_temperature_laplacians); scratch.temperature_fe_values.get_function_laplacians (old_old_temperature_solution, - scratch.old_old_temperature_laplacians); + scratch.old_old_temperature_laplacians); scratch.stokes_fe_values[velocities].get_function_values (stokes_solution, - scratch.old_velocity_values); + scratch.old_velocity_values); scratch.stokes_fe_values[velocities].get_function_values (old_stokes_solution, - scratch.old_old_velocity_values); + scratch.old_old_velocity_values); scratch.stokes_fe_values[velocities].get_function_symmetric_gradients (stokes_solution, - scratch.old_strain_rates); + scratch.old_strain_rates); scratch.stokes_fe_values[velocities].get_function_symmetric_gradients (old_stokes_solution, - scratch.old_old_strain_rates); + scratch.old_old_strain_rates); const double nu = compute_viscosity (scratch.old_temperature_values, - scratch.old_old_temperature_values, - scratch.old_temperature_grads, - scratch.old_old_temperature_grads, - scratch.old_temperature_laplacians, - scratch.old_old_temperature_laplacians, - scratch.old_velocity_values, - scratch.old_old_velocity_values, - scratch.old_strain_rates, - scratch.old_old_strain_rates, - global_max_velocity, - global_T_range.second - global_T_range.first, - 0.5 * (global_T_range.second + global_T_range.first), - global_entropy_variation, - cell->diameter()); + scratch.old_old_temperature_values, + scratch.old_temperature_grads, + scratch.old_old_temperature_grads, + scratch.old_temperature_laplacians, + scratch.old_old_temperature_laplacians, + scratch.old_velocity_values, + scratch.old_old_velocity_values, + scratch.old_strain_rates, + scratch.old_old_strain_rates, + global_max_velocity, + global_T_range.second - global_T_range.first, + 0.5 * (global_T_range.second + global_T_range.first), + global_entropy_variation, + cell->diameter()); for (unsigned int q=0; q ext_grad_T - = (use_bdf2_scheme ? - (scratch.old_temperature_grads[q] * - (1+time_step/old_time_step) - - - scratch.old_old_temperature_grads[q] * - time_step / old_time_step) - : - scratch.old_temperature_grads[q]); + = (use_bdf2_scheme ? + (scratch.old_temperature_grads[q] * + (1+time_step/old_time_step) + - + scratch.old_old_temperature_grads[q] * + time_step / old_time_step) + : + scratch.old_temperature_grads[q]); const Tensor<1,dim> extrapolated_u - = (use_bdf2_scheme ? - (scratch.old_velocity_values[q] * (1+time_step/old_time_step) - - scratch.old_old_velocity_values[q] * time_step/old_time_step) - : - scratch.old_velocity_values[q]); + = (use_bdf2_scheme ? + (scratch.old_velocity_values[q] * (1+time_step/old_time_step) - + scratch.old_old_velocity_values[q] * time_step/old_time_step) + : + scratch.old_velocity_values[q]); const SymmetricTensor<2,dim> extrapolated_strain_rate - = (use_bdf2_scheme ? - (scratch.old_strain_rates[q] * (1+time_step/old_time_step) - - scratch.old_old_strain_rates[q] * time_step/old_time_step) - : - scratch.old_strain_rates[q]); + = (use_bdf2_scheme ? + (scratch.old_strain_rates[q] * (1+time_step/old_time_step) - + scratch.old_old_strain_rates[q] * time_step/old_time_step) + : + scratch.old_strain_rates[q]); const double gamma - = ((EquationData::radiogenic_heating * EquationData::density(old_Ts) //?????? why old_Ts? - + - 2 * EquationData::eta * extrapolated_strain_rate * extrapolated_strain_rate) / - (EquationData::density(old_Ts) * EquationData::specific_heat)); + = ((EquationData::radiogenic_heating * EquationData::density(old_Ts) //?????? why old_Ts? + + + 2 * EquationData::eta * extrapolated_strain_rate * extrapolated_strain_rate) / + (EquationData::density(old_Ts) * EquationData::specific_heat)); for (unsigned int i=0; i:: copy_local_to_global_temperature_rhs (const Assembly::CopyData::TemperatureRHS &data) { temperature_constraints.distribute_local_to_global (data.local_rhs, - data.local_dof_indices, - temperature_rhs, - data.matrix_for_bc); + data.local_dof_indices, + temperature_rhs, + data.matrix_for_bc); } - // In the function that runs the WorkStream - // for actually calculating the right hand - // side, we also generate the final - // matrix. As mentioned above, it is a sum - // of the mass matrix and the Laplace - // matrix, times some time step - // weight. This weight is specified by the - // BDF-2 time integration scheme, see the - // introduction in step-31. What is new in - // this tutorial program (in addition to - // the use of MPI parallelization and the - // WorkStream class), is that we now - // precompute the temperature - // preconditioner as well. The reason is - // that the setup of the IC preconditioner - // takes a noticable time compared to the - // solver because we usually only need - // between 10 and 20 iterations for solving - // the temperature system. Hence, it is - // more efficient to precompute the - // preconditioner, even though the matrix - // entries may slightly change because the - // time step might change. This is not - // too big a problem because we remesh every - // fifth time step (and regenerate the - // preconditioner then). +// In the function that runs the WorkStream +// for actually calculating the right hand +// side, we also generate the final +// matrix. As mentioned above, it is a sum +// of the mass matrix and the Laplace +// matrix, times some time step +// weight. This weight is specified by the +// BDF-2 time integration scheme, see the +// introduction in step-31. What is new in +// this tutorial program (in addition to +// the use of MPI parallelization and the +// WorkStream class), is that we now +// precompute the temperature +// preconditioner as well. The reason is +// that the setup of the IC preconditioner +// takes a noticable time compared to the +// solver because we usually only need +// between 10 and 20 iterations for solving +// the temperature system. Hence, it is +// more efficient to precompute the +// preconditioner, even though the matrix +// entries may slightly change because the +// time step might change. This is not +// too big a problem because we remesh every +// fifth time step (and regenerate the +// preconditioner then). template void BoussinesqFlowProblem::assemble_temperature_system (const double maximal_velocity) { @@ -3016,7 +3016,7 @@ void BoussinesqFlowProblem::assemble_temperature_system (const double maxim { temperature_matrix.copy_from (temperature_mass_matrix); temperature_matrix *= (2*time_step + old_time_step) / - (time_step + old_time_step); + (time_step + old_time_step); temperature_matrix.add (time_step, temperature_stiffness_matrix); } else @@ -3029,7 +3029,7 @@ void BoussinesqFlowProblem::assemble_temperature_system (const double maxim if (rebuild_temperature_preconditioner == true) { T_preconditioner = std_cxx1x::shared_ptr - (new TrilinosWrappers::PreconditionIC()); + (new TrilinosWrappers::PreconditionIC()); T_preconditioner->initialize (temperature_matrix); rebuild_temperature_preconditioner = false; } @@ -3038,48 +3038,48 @@ void BoussinesqFlowProblem::assemble_temperature_system (const double maxim const QGauss quadrature_formula(parameters.temperature_degree+2); const std::pair - global_T_range = get_extrapolated_temperature_range(); - - // use midpoint between maximum and minimum - // temperature for definition of average - // temperature in entropy viscosity. Could - // also use the integral average, but the - // results are not very sensitive to this - // choice. + global_T_range = get_extrapolated_temperature_range(); + + // use midpoint between maximum and minimum + // temperature for definition of average + // temperature in entropy viscosity. Could + // also use the integral average, but the + // results are not very sensitive to this + // choice. const double average_temperature = 0.5 * (global_T_range.first + - global_T_range.second); + global_T_range.second); const double global_entropy_variation = get_entropy_variation (average_temperature); typedef - FilteredIterator::active_cell_iterator> - SubdomainFilter; + FilteredIterator::active_cell_iterator> + SubdomainFilter; WorkStream:: - run (SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), - temperature_dof_handler.begin_active()), - SubdomainFilter (IteratorFilters::SubdomainEqualTo - (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), - temperature_dof_handler.end()), - std_cxx1x::bind (&BoussinesqFlowProblem:: - local_assemble_temperature_rhs, - this, - global_T_range, - maximal_velocity, - global_entropy_variation, - std_cxx1x::_1, - std_cxx1x::_2, - std_cxx1x::_3), - std_cxx1x::bind (&BoussinesqFlowProblem:: - copy_local_to_global_temperature_rhs, - this, - std_cxx1x::_1), - Assembly::Scratch:: - TemperatureRHS (temperature_fe, stokes_fe, mapping, - quadrature_formula), - Assembly::CopyData:: - TemperatureRHS (temperature_fe)); + run (SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), + temperature_dof_handler.begin_active()), + SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)), + temperature_dof_handler.end()), + std_cxx1x::bind (&BoussinesqFlowProblem:: + local_assemble_temperature_rhs, + this, + global_T_range, + maximal_velocity, + global_entropy_variation, + std_cxx1x::_1, + std_cxx1x::_2, + std_cxx1x::_3), + std_cxx1x::bind (&BoussinesqFlowProblem:: + copy_local_to_global_temperature_rhs, + this, + std_cxx1x::_1), + Assembly::Scratch:: + TemperatureRHS (temperature_fe, stokes_fe, mapping, + quadrature_formula), + Assembly::CopyData:: + TemperatureRHS (temperature_fe)); temperature_rhs.compress(Add); } @@ -3087,52 +3087,52 @@ void BoussinesqFlowProblem::assemble_temperature_system (const double maxim - // @sect4{BoussinesqFlowProblem::solve} - - // This function solves the linear systems - // in each time step of the Boussinesq - // problem. First, we - // work on the Stokes system and then on - // the temperature system. In essence, it - // does the same things as the respective - // function in step-31. However, there are - // a few things that we need to pay some - // attention to. The first thing is, as - // mentioned in the introduction, the way - // we store our solution: we keep the full - // vector with all degrees of freedom on - // each MPI node. When we enter a solver - // which is supposed to perform - // matrix-vector products with a - // distributed matrix, this is not the - // appropriate form, though. There, we will - // want to have the solution vector to be - // distributed in the same way as the - // matrix. So what we do first (after - // initializing the Schur-complement based - // preconditioner) is to generate a - // distributed vector called - // distributed_stokes_solution - // and put only the locally owned dofs into - // that, which is neatly done by the - // operator= of the Trilinos - // vector. Next, we need to set the - // pressure values at hanging nodes to - // zero. This we also did in step-31 in - // order not to disturb the Schur - // complement by some vector entries that - // actually are irrelevant during the solve - // stage. As a difference to step-31, here - // we do it only for the locally owned - // pressure dofs. After solving for the - // Stokes solution, each processor copies - // distributed solution back into the solution - // vector for which every element is locally - // owned. - // - // Apart from these two changes, everything - // is the same as in step-31, so we don't - // need to further comment on it. +// @sect4{BoussinesqFlowProblem::solve} + +// This function solves the linear systems +// in each time step of the Boussinesq +// problem. First, we +// work on the Stokes system and then on +// the temperature system. In essence, it +// does the same things as the respective +// function in step-31. However, there are +// a few things that we need to pay some +// attention to. The first thing is, as +// mentioned in the introduction, the way +// we store our solution: we keep the full +// vector with all degrees of freedom on +// each MPI node. When we enter a solver +// which is supposed to perform +// matrix-vector products with a +// distributed matrix, this is not the +// appropriate form, though. There, we will +// want to have the solution vector to be +// distributed in the same way as the +// matrix. So what we do first (after +// initializing the Schur-complement based +// preconditioner) is to generate a +// distributed vector called +// distributed_stokes_solution +// and put only the locally owned dofs into +// that, which is neatly done by the +// operator= of the Trilinos +// vector. Next, we need to set the +// pressure values at hanging nodes to +// zero. This we also did in step-31 in +// order not to disturb the Schur +// complement by some vector entries that +// actually are irrelevant during the solve +// stage. As a difference to step-31, here +// we do it only for the locally owned +// pressure dofs. After solving for the +// Stokes solution, each processor copies +// distributed solution back into the solution +// vector for which every element is locally +// owned. +// +// Apart from these two changes, everything +// is the same as in step-31, so we don't +// need to further comment on it. template void BoussinesqFlowProblem::solve () { @@ -3142,26 +3142,26 @@ void BoussinesqFlowProblem::solve () pcout << " Solving Stokes system... " << std::flush; TrilinosWrappers::MPI::BlockVector - distributed_stokes_solution (stokes_rhs); + distributed_stokes_solution (stokes_rhs); // distributed_stokes_solution = stokes_solution; distributed_stokes_solution.block(0).reinit(stokes_solution.block(0),false,true); distributed_stokes_solution.block(1).reinit(stokes_solution.block(1),false,true); const unsigned int - start = (distributed_stokes_solution.block(0).size() + - distributed_stokes_solution.block(1).local_range().first), - end = (distributed_stokes_solution.block(0).size() + - distributed_stokes_solution.block(1).local_range().second); + start = (distributed_stokes_solution.block(0).size() + + distributed_stokes_solution.block(1).local_range().first), + end = (distributed_stokes_solution.block(0).size() + + distributed_stokes_solution.block(1).local_range().second); for (unsigned int i=start; i mem; - // step 1: try if the simple and fast solver - // succeeds in 30 steps or less. + // step 1: try if the simple and fast solver + // succeeds in 30 steps or less. unsigned int n_iterations = 0; double reduction = 0; const double solver_tolerance = 1e-7 * stokes_rhs.l2_norm(); @@ -3169,72 +3169,72 @@ void BoussinesqFlowProblem::solve () try { - const LinearSolvers::RightPrecond - preconditioner (stokes_matrix, stokes_preconditioner_matrix, - *Mp_preconditioner, *Amg_preconditioner, - false); - - SolverFGMRES - solver(solver_control, mem, - SolverFGMRES:: - AdditionalData(30, true)); - solver.solve(stokes_matrix, distributed_stokes_solution, stokes_rhs, - preconditioner); - - n_iterations = solver_control.last_step(); - reduction = solver_control.last_value()/solver_control.initial_value(); + const LinearSolvers::RightPrecond + preconditioner (stokes_matrix, stokes_preconditioner_matrix, + *Mp_preconditioner, *Amg_preconditioner, + false); + + SolverFGMRES + solver(solver_control, mem, + SolverFGMRES:: + AdditionalData(30, true)); + solver.solve(stokes_matrix, distributed_stokes_solution, stokes_rhs, + preconditioner); + + n_iterations = solver_control.last_step(); + reduction = solver_control.last_value()/solver_control.initial_value(); } - // step 2: take the stronger solver in case - // the simple solver failed + // step 2: take the stronger solver in case + // the simple solver failed catch (SolverControl::NoConvergence) { - const LinearSolvers::RightPrecond - preconditioner (stokes_matrix, stokes_preconditioner_matrix, - *Mp_preconditioner, *Amg_preconditioner, - true); - - SolverControl solver_control_refined (stokes_matrix.m(), solver_tolerance); - SolverFGMRES - solver(solver_control_refined, mem, - SolverFGMRES:: - AdditionalData(50, true)); - solver.solve(stokes_matrix, distributed_stokes_solution, stokes_rhs, - preconditioner); - - n_iterations = (solver_control.last_step() + - solver_control_refined.last_step()); - reduction = (solver_control_refined.last_value()/ - std::max(solver_control.initial_value(), - solver_control_refined.initial_value())); + const LinearSolvers::RightPrecond + preconditioner (stokes_matrix, stokes_preconditioner_matrix, + *Mp_preconditioner, *Amg_preconditioner, + true); + + SolverControl solver_control_refined (stokes_matrix.m(), solver_tolerance); + SolverFGMRES + solver(solver_control_refined, mem, + SolverFGMRES:: + AdditionalData(50, true)); + solver.solve(stokes_matrix, distributed_stokes_solution, stokes_rhs, + preconditioner); + + n_iterations = (solver_control.last_step() + + solver_control_refined.last_step()); + reduction = (solver_control_refined.last_value()/ + std::max(solver_control.initial_value(), + solver_control_refined.initial_value())); } stokes_constraints.distribute (distributed_stokes_solution); stokes_solution.block(0).reinit(distributed_stokes_solution.block(0), - false, true); + false, true); stokes_solution.block(1).reinit(distributed_stokes_solution.block(1), - false, true); + false, true); pcout << n_iterations << " iterations." << " Reduced residual by " << reduction - << std::endl; + << std::endl; TrilinosWrappers::MPI::Vector tmp; tmp.reinit (stokes_rhs.block(1)); pcout << " Relative divergence residual: " - << stokes_matrix.block(1,0).residual (tmp, - distributed_stokes_solution.block(0), - stokes_rhs.block(1)) - / - distributed_stokes_solution.block(0).l2_norm() / EquationData::pressure_scaling - << std::endl; + << stokes_matrix.block(1,0).residual (tmp, + distributed_stokes_solution.block(0), + stokes_rhs.block(1)) + / + distributed_stokes_solution.block(0).l2_norm() / EquationData::pressure_scaling + << std::endl; pcout << " Relative vector sizes: " - << distributed_stokes_solution.block(0).linfty_norm() << ' ' - << distributed_stokes_solution.block(1).linfty_norm() << std::endl; + << distributed_stokes_solution.block(0).linfty_norm() << ' ' + << distributed_stokes_solution.block(1).linfty_norm() << std::endl; } computing_timer.exit_section(); @@ -3245,23 +3245,23 @@ void BoussinesqFlowProblem::solve () old_time_step = time_step; const double cfl_number = get_cfl_number(); - // we found out that we need - // approximately a quarter the time step - // size in 3d + // we found out that we need + // approximately a quarter the time step + // size in 3d double scaling = (dim==3)?0.25:1.0; time_step = (scaling/(2.1*dim*std::sqrt(1.*dim)) / - (parameters.temperature_degree * - cfl_number)); + (parameters.temperature_degree * + cfl_number)); const double maximal_velocity = get_maximal_velocity(); pcout << " Maximal velocity: " - << maximal_velocity * EquationData::year_in_seconds * 100 - << " cm/year" - << std::endl; + << maximal_velocity *EquationData::year_in_seconds * 100 + << " cm/year" + << std::endl; pcout << " " << "Time step: " - << time_step/EquationData::year_in_seconds - << " years" - << std::endl; + << time_step/EquationData::year_in_seconds + << " years" + << std::endl; temperature_solution = old_temperature_solution; assemble_temperature_system (maximal_velocity); @@ -3271,70 +3271,70 @@ void BoussinesqFlowProblem::solve () computing_timer.enter_section (" Solve temperature system"); { SolverControl solver_control (temperature_matrix.m(), - 1e-12*temperature_rhs.l2_norm()); + 1e-12*temperature_rhs.l2_norm()); SolverCG cg (solver_control); TrilinosWrappers::MPI::Vector - distributed_temperature_solution (temperature_rhs); + distributed_temperature_solution (temperature_rhs); // distributed_temperature_solution = temperature_solution; distributed_temperature_solution.reinit(temperature_solution, false, true); cg.solve (temperature_matrix, distributed_temperature_solution, - temperature_rhs, *T_preconditioner); + temperature_rhs, *T_preconditioner); temperature_constraints.distribute (distributed_temperature_solution); // temperature_solution = distributed_temperature_solution; temperature_solution.reinit(distributed_temperature_solution, false, true); pcout << " " - << solver_control.last_step() - << " CG iterations for temperature" << std::endl; + << solver_control.last_step() + << " CG iterations for temperature" << std::endl; computing_timer.exit_section(); - // extract temperature range + // extract temperature range std::vector temperature (2), global_temperature (2); temperature[0] = std::numeric_limits::max(), - temperature[1] = -std::numeric_limits::max(); + temperature[1] = -std::numeric_limits::max(); for (unsigned int i=0; i (temperature[0], - distributed_temperature_solution.trilinos_vector()[0][i]); - temperature[1] = std::max (temperature[1], - distributed_temperature_solution.trilinos_vector()[0][i]); + temperature[0] = std::min (temperature[0], + distributed_temperature_solution.trilinos_vector()[0][i]); + temperature[1] = std::max (temperature[1], + distributed_temperature_solution.trilinos_vector()[0][i]); } #ifdef DEAL_II_COMPILER_SUPPORTS_MPI temperature[0] *= -1.0; MPI_Allreduce (&temperature[0], &global_temperature[0], - 2, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); + 2, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); global_temperature[0] *= -1.0; #else global_temperature = local_temperature; #endif pcout << " Temperature range: " - << global_temperature[0] << ' ' << global_temperature[1] - << std::endl; + << global_temperature[0] << ' ' << global_temperature[1] + << std::endl; } } - // @sect4{BoussinesqFlowProblem::output_results} +// @sect4{BoussinesqFlowProblem::output_results} template class BoussinesqFlowProblem::Postprocessor : public DataPostprocessor { public: Postprocessor (const unsigned int partition, - const double minimal_pressure); + const double minimal_pressure); virtual void compute_derived_quantities_vector (const std::vector > &uh, - const std::vector > > &duh, - const std::vector > > &dduh, - const std::vector > &normals, - const std::vector > &evaluation_points, - std::vector > &computed_quantities) const; + const std::vector > > &duh, + const std::vector > > &dduh, + const std::vector > &normals, + const std::vector > &evaluation_points, + std::vector > &computed_quantities) const; virtual std::vector get_names () const; @@ -3355,10 +3355,10 @@ class BoussinesqFlowProblem::Postprocessor : public DataPostprocessor template BoussinesqFlowProblem::Postprocessor:: Postprocessor (const unsigned int partition, - const double minimal_pressure) - : - partition (partition), - minimal_pressure (minimal_pressure) + const double minimal_pressure) + : + partition (partition), + minimal_pressure (minimal_pressure) {} @@ -3391,8 +3391,8 @@ BoussinesqFlowProblem::Postprocessor:: get_data_component_interpretation () const { std::vector - interpretation (dim, - DataComponentInterpretation::component_is_part_of_vector); + interpretation (dim, + DataComponentInterpretation::component_is_part_of_vector); interpretation.push_back (DataComponentInterpretation::component_is_scalar); interpretation.push_back (DataComponentInterpretation::component_is_scalar); @@ -3417,11 +3417,11 @@ template void BoussinesqFlowProblem::Postprocessor:: compute_derived_quantities_vector (const std::vector > &uh, - const std::vector > > &duh, - const std::vector > > &/*dduh*/, - const std::vector > &/*normals*/, - const std::vector > &evaluation_points, - std::vector > &computed_quantities) const + const std::vector > > &duh, + const std::vector > > &/*dduh*/, + const std::vector > &/*normals*/, + const std::vector > &evaluation_points, + std::vector > &computed_quantities) const { const unsigned int n_quadrature_points = uh.size(); Assert (duh.size() == n_quadrature_points, ExcInternalError()); @@ -3431,111 +3431,111 @@ compute_derived_quantities_vector (const std::vector > for (unsigned int q=0; q grad_u; for (unsigned int d=0; d strain_rate = symmetrize (grad_u); computed_quantities[q](dim+2) = 2 * EquationData::eta * - strain_rate * strain_rate; + strain_rate * strain_rate; computed_quantities[q](dim+3) = partition; computed_quantities[q](dim+4) = pressure - - EquationData::adiabatic_pressure (evaluation_points[q]); + EquationData::adiabatic_pressure (evaluation_points[q]); computed_quantities[q](dim+5) = temperature - - EquationData::adiabatic_temperature (evaluation_points[q]); + EquationData::adiabatic_temperature (evaluation_points[q]); } } - // This function does mostly what the - // corresponding one did in to - // step-31, in particular merging - // data from the two DoFHandler - // objects (for the Stokes and the - // temperature parts of the problem) - // into one is the same. There are - // three minor changes: we make sure - // that only a single processor - // actually does some work here; take - // care of scaling variables in a - // useful way; and in addition to the - // Stokes and temperature parts in - // the joint_fe finite - // element, we also add a piecewise - // constant field that denotes the - // subdomain id a cell corresponds - // to. This allows us to visualize - // the partitioning of the domain. As - // a consequence, we also have to - // change the assertion about the - // number of degrees of freedom in - // the joint DoFHandler object (which - // is now equal to the number of - // Stokes degrees of freedom plus the - // temperature degrees of freedom - // plus the number of active cells as - // that is the number of partition - // variables we want to add), and - // adjust the number of elements in - // the arrays we use to name the - // components of the joint solution - // vector and to identify which of - // these components are scalars or - // parts of dim-dimensional vectors. - // - // As for scaling: as mentioned in - // the introduction, to keep the - // Stokes equations properly scaled - // and symmetric, we introduced a new - // pressure $\hat p = - // \frac{L}{\eta}p$. What we really - // wanted, however, was the original - // pressure $p$, so while copying - // data from the Stokes DoFHandler - // into the joint one, we undo this - // scaling. While we're at it messing - // with the results of the - // simulation, we do two more things: - // First, the pressure is only - // defined up to a constant. To make - // it more easily comparable, we - // compute the minimal value of the - // pressure computed and shift all - // values up by that amount -- in - // essence making all pressure - // variables positive or - // zero. Secondly, let's also take - // care of the awkward units we use - // for the velocity: it is computed - // in SI units of meters per second, - // which of course is a very small - // number in the earth mantle. We - // therefore rescale things into - // centimeters per year, the unit - // commonly used in geophysics. +// This function does mostly what the +// corresponding one did in to +// step-31, in particular merging +// data from the two DoFHandler +// objects (for the Stokes and the +// temperature parts of the problem) +// into one is the same. There are +// three minor changes: we make sure +// that only a single processor +// actually does some work here; take +// care of scaling variables in a +// useful way; and in addition to the +// Stokes and temperature parts in +// the joint_fe finite +// element, we also add a piecewise +// constant field that denotes the +// subdomain id a cell corresponds +// to. This allows us to visualize +// the partitioning of the domain. As +// a consequence, we also have to +// change the assertion about the +// number of degrees of freedom in +// the joint DoFHandler object (which +// is now equal to the number of +// Stokes degrees of freedom plus the +// temperature degrees of freedom +// plus the number of active cells as +// that is the number of partition +// variables we want to add), and +// adjust the number of elements in +// the arrays we use to name the +// components of the joint solution +// vector and to identify which of +// these components are scalars or +// parts of dim-dimensional vectors. +// +// As for scaling: as mentioned in +// the introduction, to keep the +// Stokes equations properly scaled +// and symmetric, we introduced a new +// pressure $\hat p = +// \frac{L}{\eta}p$. What we really +// wanted, however, was the original +// pressure $p$, so while copying +// data from the Stokes DoFHandler +// into the joint one, we undo this +// scaling. While we're at it messing +// with the results of the +// simulation, we do two more things: +// First, the pressure is only +// defined up to a constant. To make +// it more easily comparable, we +// compute the minimal value of the +// pressure computed and shift all +// values up by that amount -- in +// essence making all pressure +// variables positive or +// zero. Secondly, let's also take +// care of the awkward units we use +// for the velocity: it is computed +// in SI units of meters per second, +// which of course is a very small +// number in the earth mantle. We +// therefore rescale things into +// centimeters per year, the unit +// commonly used in geophysics. template void BoussinesqFlowProblem::output_results () { computing_timer.enter_section ("Postprocessing"); - //calculate l2 norm of divergence and - //norm of gradient + //calculate l2 norm of divergence and + //norm of gradient { double my_cells_error[2] = {0, 0}; QGauss<1> q_base(parameters.stokes_velocity_degree); @@ -3543,7 +3543,7 @@ void BoussinesqFlowProblem::output_results () const unsigned int n_q_points = err_quadrature.size(); FEValues fe_values (mapping, stokes_fe, err_quadrature, - update_JxW_values | update_gradients); + update_JxW_values | update_gradients); const unsigned int dofs_per_cell = fe_values.get_fe().dofs_per_cell; const FEValuesExtractors::Vector velocities (0); @@ -3553,32 +3553,32 @@ void BoussinesqFlowProblem::output_results () std::vector > local_grad (n_q_points); typename DoFHandler::active_cell_iterator - cell = stokes_dof_handler.begin_active(), - endc = stokes_dof_handler.end(); + cell = stokes_dof_handler.begin_active(), + endc = stokes_dof_handler.end(); for (; cell!=endc; ++cell) if (cell->subdomain_id() == - Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) - { - fe_values.reinit (cell); - cell->get_dof_indices(local_dof_indices); - - fe_values[velocities].get_function_divergences (stokes_solution, - local_div); - fe_values[velocities].get_function_gradients (stokes_solution, - local_grad); - - double cell_error = 0.0; - for (unsigned int q = 0; q < n_q_points; ++q) - { - my_cells_error[0] += local_div[q] * local_div[q] * fe_values.JxW(q); - my_cells_error[1] += scalar_product(local_grad[q], local_grad[q]) * fe_values.JxW(q); - } - } + Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)) + { + fe_values.reinit (cell); + cell->get_dof_indices(local_dof_indices); + + fe_values[velocities].get_function_divergences (stokes_solution, + local_div); + fe_values[velocities].get_function_gradients (stokes_solution, + local_grad); + + double cell_error = 0.0; + for (unsigned int q = 0; q < n_q_points; ++q) + { + my_cells_error[0] += local_div[q] * local_div[q] * fe_values.JxW(q); + my_cells_error[1] += scalar_product(local_grad[q], local_grad[q]) * fe_values.JxW(q); + } + } double div_error[2] = {0,0}; #ifdef DEAL_II_COMPILER_SUPPORTS_MPI MPI_Allreduce (&my_cells_error, &div_error, 2, MPI_DOUBLE, - MPI_SUM, MPI_COMM_WORLD); + MPI_SUM, MPI_COMM_WORLD); #else div_error[0] = my_cells_error[0]; div_error[1] = my_cells_error[1]; @@ -3596,8 +3596,8 @@ void BoussinesqFlowProblem::output_results () DoFHandler joint_dof_handler (triangulation); joint_dof_handler.distribute_dofs (joint_fe); Assert (joint_dof_handler.n_dofs() == - stokes_dof_handler.n_dofs() + temperature_dof_handler.n_dofs(), - ExcInternalError()); + stokes_dof_handler.n_dofs() + temperature_dof_handler.n_dofs(), + ExcInternalError()); TrilinosWrappers::MPI::Vector joint_solution; joint_solution.reinit (joint_dof_handler.locally_owned_dofs(), MPI_COMM_WORLD); @@ -3608,43 +3608,43 @@ void BoussinesqFlowProblem::output_results () std::vector local_temperature_dof_indices (temperature_fe.dofs_per_cell); typename DoFHandler::active_cell_iterator - joint_cell = joint_dof_handler.begin_active(), - joint_endc = joint_dof_handler.end(), - stokes_cell = stokes_dof_handler.begin_active(), - temperature_cell = temperature_dof_handler.begin_active(); + joint_cell = joint_dof_handler.begin_active(), + joint_endc = joint_dof_handler.end(), + stokes_cell = stokes_dof_handler.begin_active(), + temperature_cell = temperature_dof_handler.begin_active(); for (; joint_cell!=joint_endc; - ++joint_cell, ++stokes_cell, ++temperature_cell) + ++joint_cell, ++stokes_cell, ++temperature_cell) if (joint_cell->is_locally_owned()) - { - joint_cell->get_dof_indices (local_joint_dof_indices); - stokes_cell->get_dof_indices (local_stokes_dof_indices); - temperature_cell->get_dof_indices (local_temperature_dof_indices); - - for (unsigned int i=0; iget_dof_indices (local_joint_dof_indices); + stokes_cell->get_dof_indices (local_stokes_dof_indices); + temperature_cell->get_dof_indices (local_temperature_dof_indices); + + for (unsigned int i=0; i::output_results () locally_relevant_joint_solution = joint_solution; Postprocessor postprocessor (Utilities::System:: - get_this_mpi_process(MPI_COMM_WORLD), - stokes_solution.block(1).minimal_value()); + get_this_mpi_process(MPI_COMM_WORLD), + stokes_solution.block(1).minimal_value()); DataOut data_out; data_out.attach_dof_handler (joint_dof_handler); @@ -3665,11 +3665,11 @@ void BoussinesqFlowProblem::output_results () static int out_index=0; const std::string filename = ("solution-" + - Utilities::int_to_string (out_index, 5) + - "." + - Utilities::int_to_string - (triangulation.locally_owned_subdomain(), 4) + - ".vtu"); + Utilities::int_to_string (out_index, 5) + + "." + + Utilities::int_to_string + (triangulation.locally_owned_subdomain(), 4) + + ".vtu"); std::ofstream output (filename.c_str()); data_out.write_vtu (output); @@ -3677,22 +3677,22 @@ void BoussinesqFlowProblem::output_results () { std::vector filenames; for (unsigned int i=0; i::output_results () - // @sect4{BoussinesqFlowProblem::refine_mesh} - - // This function isn't really new - // either. Since the - // setup_dofs function - // that we call in the middle has its - // own timer section, we split timing - // this function into two - // sections. It will also allow us to - // easily identify which of the two - // is more expensive. - // - // One thing of note, however, is - // that we don't want to compute all - // error indicators on all cells, of - // course. Rather, it would be nice - // if each processor could only - // compute the error indicators for - // those cells it actually - // owns. However, in order for mesh - // refinement to proceed in the same - // way on all processors, all - // processors would have to exchange - // their refinement indicators. We do - // so in two steps: first, we call - // the KellyErrorEstimator::estimate - // function with an argument (usually - // defaulted, but explicitly given - // here) thatindicates the subdomain - // id of all those cells that we want - // to work on; note that this means - // that we also have to specify - // values for all those default - // arguments that lie before the one - // we want to give. - // - // Secondly, we need to exchange the - // data. To do this, we could add up - // the refinement indicators from all - // processors, since they all only - // worked on a disjoint subset of the - // elements of the vector that holds - // these indicators. We could set up - // a distributed Trilinos vector for - // this, but that appears - // unnecessarily complicated because - // we would have to specify a - // partition of this vector, and none - // appears immediately - // obvious. Rather, we want to use - // the Trilinos communicator class to - // this for us, taking the local - // indicators as a collection of - // floating point values rather than - // a linear algebra - // vector. Unfortunately, the - // Trilinos communicator class - // doesn't appear to have function - // that wraps around the MPI add - // function; it has one that computes - // the maximum of a bunch of values, - // though, which in our case is - // equally good -- maybe even better, - // in case two processors should - // compute values for the same cell - // (which they shouldn't of course, - // unless we have made a mistake in - // specifying the arguments to the - // estimate function below). There is - // little snag again, however, that - // makes this a bit awkward: the - // Trilinos communicator class can - // take the maximum over all - // processors for each element of a - // vector, but only if the vector - // contains doubles. The vector - // returned by the - // KellyErrorEstimator::estimate - // function, on the other hand, has - // floats as its data type. An ugly, - // if workable way, is therefore to - // compute the indicators as floats, - // convert the vector to doubles, and - // form the maximum of that. - // - // At the end of this chain of - // events, every processors has the - // complete set of refinement - // indicators, and the rest of the - // function proceeds as before. +// @sect4{BoussinesqFlowProblem::refine_mesh} + +// This function isn't really new +// either. Since the +// setup_dofs function +// that we call in the middle has its +// own timer section, we split timing +// this function into two +// sections. It will also allow us to +// easily identify which of the two +// is more expensive. +// +// One thing of note, however, is +// that we don't want to compute all +// error indicators on all cells, of +// course. Rather, it would be nice +// if each processor could only +// compute the error indicators for +// those cells it actually +// owns. However, in order for mesh +// refinement to proceed in the same +// way on all processors, all +// processors would have to exchange +// their refinement indicators. We do +// so in two steps: first, we call +// the KellyErrorEstimator::estimate +// function with an argument (usually +// defaulted, but explicitly given +// here) thatindicates the subdomain +// id of all those cells that we want +// to work on; note that this means +// that we also have to specify +// values for all those default +// arguments that lie before the one +// we want to give. +// +// Secondly, we need to exchange the +// data. To do this, we could add up +// the refinement indicators from all +// processors, since they all only +// worked on a disjoint subset of the +// elements of the vector that holds +// these indicators. We could set up +// a distributed Trilinos vector for +// this, but that appears +// unnecessarily complicated because +// we would have to specify a +// partition of this vector, and none +// appears immediately +// obvious. Rather, we want to use +// the Trilinos communicator class to +// this for us, taking the local +// indicators as a collection of +// floating point values rather than +// a linear algebra +// vector. Unfortunately, the +// Trilinos communicator class +// doesn't appear to have function +// that wraps around the MPI add +// function; it has one that computes +// the maximum of a bunch of values, +// though, which in our case is +// equally good -- maybe even better, +// in case two processors should +// compute values for the same cell +// (which they shouldn't of course, +// unless we have made a mistake in +// specifying the arguments to the +// estimate function below). There is +// little snag again, however, that +// makes this a bit awkward: the +// Trilinos communicator class can +// take the maximum over all +// processors for each element of a +// vector, but only if the vector +// contains doubles. The vector +// returned by the +// KellyErrorEstimator::estimate +// function, on the other hand, has +// floats as its data type. An ugly, +// if workable way, is therefore to +// compute the indicators as floats, +// convert the vector to doubles, and +// form the maximum of that. +// +// At the end of this chain of +// events, every processors has the +// complete set of refinement +// indicators, and the rest of the +// function proceeds as before. template void BoussinesqFlowProblem::refine_mesh (const unsigned int max_grid_level) { @@ -3800,47 +3800,47 @@ void BoussinesqFlowProblem::refine_mesh (const unsigned int max_grid_level) Vector estimated_error_per_cell (triangulation.n_active_cells()); KellyErrorEstimator::estimate (temperature_dof_handler, - QGauss(parameters.temperature_degree+1), - typename FunctionMap::type(), - temperature_solution, - estimated_error_per_cell, - std::vector(), - 0, - 0, - triangulation.locally_owned_subdomain()); + QGauss(parameters.temperature_degree+1), + typename FunctionMap::type(), + temperature_solution, + estimated_error_per_cell, + std::vector(), + 0, + 0, + triangulation.locally_owned_subdomain()); parallel::distributed::GridRefinement:: - refine_and_coarsen_fixed_fraction (triangulation, - estimated_error_per_cell, - 0.3, 0.1); - - // for (typename Triangulation::active_cell_iterator - // cell = triangulation.begin_active(); - // cell != triangulation.end(); ++cell) - // if (cell->is_locally_owned()) - // if ((cell->center()[1] > 0) - // && - // (cell->center()[2] > 0)) - // cell->set_refine_flag(); - - // limit maximum refinement level + refine_and_coarsen_fixed_fraction (triangulation, + estimated_error_per_cell, + 0.3, 0.1); + + // for (typename Triangulation::active_cell_iterator + // cell = triangulation.begin_active(); + // cell != triangulation.end(); ++cell) + // if (cell->is_locally_owned()) + // if ((cell->center()[1] > 0) + // && + // (cell->center()[2] > 0)) + // cell->set_refine_flag(); + + // limit maximum refinement level if (triangulation.n_levels() > max_grid_level) for (typename Triangulation::active_cell_iterator - cell = triangulation.begin_active(max_grid_level); - cell != triangulation.end(); ++cell) + cell = triangulation.begin_active(max_grid_level); + cell != triangulation.end(); ++cell) cell->clear_refine_flag (); - std::vector x_temperature (2); + std::vector x_temperature (2); x_temperature[0] = &temperature_solution; x_temperature[1] = &old_temperature_solution; - std::vector x_stokes (2); + std::vector x_stokes (2); x_stokes[0] = &stokes_solution; x_stokes[1] = &old_stokes_solution; parallel::distributed::SolutionTransfer - temperature_trans(temperature_dof_handler); + temperature_trans(temperature_dof_handler); parallel::distributed::SolutionTransfer - stokes_trans(stokes_dof_handler); + stokes_trans(stokes_dof_handler); triangulation.prepare_coarsening_and_refinement(); temperature_trans.prepare_for_coarsening_and_refinement(x_temperature); @@ -3855,11 +3855,11 @@ void BoussinesqFlowProblem::refine_mesh (const unsigned int max_grid_level) { TrilinosWrappers::MPI::Vector - distributed_temp1 (temperature_rhs); + distributed_temp1 (temperature_rhs); TrilinosWrappers::MPI::Vector - distributed_temp2 (temperature_rhs); + distributed_temp2 (temperature_rhs); - std::vector tmp (2); + std::vector tmp (2); tmp[0] = &(distributed_temp1); tmp[1] = &(distributed_temp2); temperature_trans.interpolate(tmp); @@ -3872,10 +3872,10 @@ void BoussinesqFlowProblem::refine_mesh (const unsigned int max_grid_level) { TrilinosWrappers::MPI::BlockVector - distributed_stokes (stokes_rhs); + distributed_stokes (stokes_rhs); TrilinosWrappers::MPI::BlockVector - old_distributed_stokes (stokes_rhs); - std::vector stokes_tmp (2); + old_distributed_stokes (stokes_rhs); + std::vector stokes_tmp (2); stokes_tmp[0] = &(distributed_stokes); stokes_tmp[1] = &(old_distributed_stokes); @@ -3892,26 +3892,26 @@ void BoussinesqFlowProblem::refine_mesh (const unsigned int max_grid_level) - // @sect4{BoussinesqFlowProblem::run} +// @sect4{BoussinesqFlowProblem::run} - // This is the final function in this - // class. It actually runs the program. It - // is, once more, very similar to - // step-31. The only thing that really - // changed is that we use the - // project_temperature_field() - // function instead of the library function - // VectorTools::project, the - // rest is as before. +// This is the final function in this +// class. It actually runs the program. It +// is, once more, very similar to +// step-31. The only thing that really +// changed is that we use the +// project_temperature_field() +// function instead of the library function +// VectorTools::project, the +// rest is as before. template void BoussinesqFlowProblem::run () { GridGenerator::hyper_shell (triangulation, - Point(), - EquationData::R0, - EquationData::R1, - (dim==3) ? 96 : 12, - true); + Point(), + EquationData::R0, + EquationData::R1, + (dim==3) ? 96 : 12, + true); static HyperShellBoundary boundary; triangulation.set_boundary (0, boundary); triangulation.set_boundary (1, boundary); @@ -3924,7 +3924,7 @@ void BoussinesqFlowProblem::run () unsigned int pre_refinement_step = 0; - start_time_iteration: +start_time_iteration: project_temperature_field (); @@ -3936,9 +3936,9 @@ void BoussinesqFlowProblem::run () do { pcout << "Timestep " << timestep_number - << ": t=" << time/EquationData::year_in_seconds - << " years" - << std::endl; + << ": t=" << time/EquationData::year_in_seconds + << " years" + << std::endl; assemble_stokes_system (); build_stokes_preconditioner (); @@ -3949,62 +3949,61 @@ void BoussinesqFlowProblem::run () pcout << std::endl; if ((timestep_number == 0) && - (pre_refinement_step < parameters.initial_adaptive_refinement)) - { - refine_mesh (parameters.initial_global_refinement + - parameters.initial_adaptive_refinement); - ++pre_refinement_step; - goto start_time_iteration; - } - else - if ((timestep_number > 0) - && - (timestep_number % parameters.adaptive_refinement_interval == 0)) - refine_mesh (parameters.initial_global_refinement + - parameters.initial_adaptive_refinement); + (pre_refinement_step < parameters.initial_adaptive_refinement)) + { + refine_mesh (parameters.initial_global_refinement + + parameters.initial_adaptive_refinement); + ++pre_refinement_step; + goto start_time_iteration; + } + else if ((timestep_number > 0) + && + (timestep_number % parameters.adaptive_refinement_interval == 0)) + refine_mesh (parameters.initial_global_refinement + + parameters.initial_adaptive_refinement); if ((parameters.generate_graphical_output == true) - && - (timestep_number % parameters.graphical_output_interval == 0)) - output_results (); + && + (timestep_number % parameters.graphical_output_interval == 0)) + output_results (); time += time_step; ++timestep_number; - // if we are at the end of - // time, stop now + // if we are at the end of + // time, stop now if (time > parameters.end_time * EquationData::year_in_seconds) - break; + break; - // otherwise prepare for the - // next time step + // otherwise prepare for the + // next time step TrilinosWrappers::MPI::BlockVector old_old_stokes_solution; old_old_stokes_solution = old_stokes_solution; old_stokes_solution = stokes_solution; old_old_temperature_solution = old_temperature_solution; old_temperature_solution = temperature_solution; if (old_time_step > 0) - { - stokes_solution.sadd (1.+time_step/old_time_step, -time_step/old_time_step, - old_old_stokes_solution); - temperature_solution.sadd (1.+time_step/old_time_step, - -time_step/old_time_step, - old_old_temperature_solution); - } - - // every 100 time steps output - // a summary of the current - // timing information + { + stokes_solution.sadd (1.+time_step/old_time_step, -time_step/old_time_step, + old_old_stokes_solution); + temperature_solution.sadd (1.+time_step/old_time_step, + -time_step/old_time_step, + old_old_temperature_solution); + } + + // every 100 time steps output + // a summary of the current + // timing information if (timestep_number % 100 == 0) - computing_timer.print_summary (); + computing_timer.print_summary (); } while (true); - // if we are generating graphical - // output, do so also for the last - // time step unless we had just - // done so before we left the - // do-while loop + // if we are generating graphical + // output, do so also for the last + // time step unless we had just + // done so before we left the + // do-while loop if ((parameters.generate_graphical_output == true) && !((timestep_number-1) % parameters.graphical_output_interval == 0)) @@ -4013,9 +4012,9 @@ void BoussinesqFlowProblem::run () - // @sect3{The main function} +// @sect3{The main function} - // This is copied verbatim from step-31: +// This is copied verbatim from step-31: int main (int argc, char *argv[]) { Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv); @@ -4026,9 +4025,9 @@ int main (int argc, char *argv[]) std::string parameter_filename; if (argc>=2) - parameter_filename = argv[1]; + parameter_filename = argv[1]; else - parameter_filename = "step-32.prm"; + parameter_filename = "step-32.prm"; const int dim = 3; BoussinesqFlowProblem::Parameters parameters(parameter_filename); -- 2.39.5