From f7d502856ce25b4c249e37080ba2c3046b5e7cfc Mon Sep 17 00:00:00 2001 From: bangerth Date: Fri, 9 Sep 2011 03:17:50 +0000 Subject: [PATCH] Move things local to each program into a local namespace. git-svn-id: https://svn.dealii.org/trunk@24291 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/examples/step-15/step-15.cc | 2510 +++++++++--------- deal.II/examples/step-16/step-16.cc | 1748 ++++++------- deal.II/examples/step-17/step-17.cc | 2220 ++++++++-------- deal.II/examples/step-18/step-18.cc | 2619 ++++++++++--------- deal.II/examples/step-19/step-19.cc | 1207 ++++----- deal.II/examples/step-20/step-20.cc | 2142 +++++++-------- deal.II/examples/step-21/step-21.cc | 2304 ++++++++-------- deal.II/examples/step-22/step-22.cc | 2356 ++++++++--------- deal.II/examples/step-23/step-23.cc | 1247 ++++----- deal.II/examples/step-24/step-24.cc | 1101 ++++---- deal.II/examples/step-25/step-25.cc | 1610 ++++++------ deal.II/examples/step-26/step-26.cc | 1091 ++++---- deal.II/examples/step-27/step-27.cc | 1812 ++++++------- deal.II/examples/step-28/step-28.cc | 3764 ++++++++++++++------------- deal.II/examples/step-29/step-29.cc | 2578 +++++++++--------- 15 files changed, 15196 insertions(+), 15113 deletions(-) diff --git a/deal.II/examples/step-15/step-15.cc b/deal.II/examples/step-15/step-15.cc index e002c4a8c1..13237fe333 100644 --- a/deal.II/examples/step-15/step-15.cc +++ b/deal.II/examples/step-15/step-15.cc @@ -3,7 +3,7 @@ /* $Id$ */ /* */ -/* Copyright (C) 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 by the deal.II authors */ +/* Copyright (C) 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010, 2011 by the deal.II authors */ /* */ /* This file is subject to QPL and may not be distributed */ /* without copyright and license information. Please refer */ @@ -51,1304 +51,1307 @@ // The last step is as in all // previous programs: -using namespace dealii; - - // The first thing we have here is a helper - // function that computes an even power $|v|^n$ - // of a vector $v$, by evaluating - // $(v\cdot v)^{n/2}$. We need this in the - // computations below where we do not want to - // dwell on the fact that the gradient of the - // solution is actually a scalar in the 1d - // situation we consider in this program (in - // 1d, the gradient is a vector with a single - // element, which is easily extracted). Small - // tricks like this make it significantly - // simpler to later extend a program so that - // it also runs in higher space dimensions. - // - // While the implementation of the function - // is obvious, note the assertion at the - // beginning of the function body, which - // makes sure that the exponent is indeed an - // even number (here, we use that n/2 is - // computed in integer arithmetic, i.e. any - // remainder of the division is - // lost). ExcMessage is a pre-defined - // exception class that takes a string - // argument explaining what goes wrong. It is - // a simpler way to declare exceptions than - // the ones shown in step-9 and step-13/14 - // where we explicitly declared exception - // classes. However, by using a generic - // exception class, we lose the ability to - // attach additional information at run-time - // to the exception message, such as the - // value of the variable n. By following - // the way explained in above example - // programs, adding this feature is simple, - // though. -template -inline -double gradient_power (const Tensor<1,dim> &v, - const unsigned int n) +namespace Step15 { - Assert ((n/2)*2 == n, ExcMessage ("Value of 'n' must be even")); - double p = 1; - for (unsigned int k=0; kn/2 is + // computed in integer arithmetic, i.e. any + // remainder of the division is + // lost). ExcMessage is a pre-defined + // exception class that takes a string + // argument explaining what goes wrong. It is + // a simpler way to declare exceptions than + // the ones shown in step-9 and step-13/14 + // where we explicitly declared exception + // classes. However, by using a generic + // exception class, we lose the ability to + // attach additional information at run-time + // to the exception message, such as the + // value of the variable n. By following + // the way explained in above example + // programs, adding this feature is simple, + // though. + template + inline + double gradient_power (const Tensor<1,dim> &v, + const unsigned int n) + { + Assert ((n/2)*2 == n, ExcMessage ("Value of 'n' must be even")); + double p = 1; + for (unsigned int k=0; k -{ - public: - InitializationValues () : Function<1>() {} - - virtual double value (const Point<1> &p, - const unsigned int component = 0) const; -}; - - - - // So here comes the function that implements - // the function object. The base value is - // $x^{1/3}$, while random is a random - // number between -1 and 1 (note that - // rand() returns a random integer value - // between zero and RAND_MAX; to convert - // it to a floating point value between 0 and - // 2, we have to divide by RAND_MAX and - // multiply by two -- note that the first - // multiplication has to happen in floating - // point arithmetic, so that the division is - // done in non-truncating floating point mode - // as well; the final step is then to shift - // the interval [0,2] to [-1,1]). - // - // In a second step, we add the base value - // and a random value in [-0.1,0.1] together - // and return it, unless it is less than - // zero, in which case we take zero. -double InitializationValues::value (const Point<1> &p, - const unsigned int) const -{ - const double base = std::pow(p(0), 1./3.); - const double random = 2.*rand()/RAND_MAX-1; - return std::max (base+.1*random, 0.); -} + // Secondly, we declare a class that defines + // our initial values for the nonlinear + // iteration. It is a function object, + // i.e. it has a member operator that returns + // for a given point the value of the + // function. The value we return is a random + // perturbation of the $x^{1/3}$ function + // which we know is the optimal solution in a + // larger function space. To make things a + // little simpler on the optimizer, we return + // zero if the proposed random value is + // negative. + // + // Note that this class works strictly only + // for 1d. If the program is to be extended + // to higher space dimensions, so has to be + // this class. + class InitializationValues : public Function<1> + { + public: + InitializationValues () : Function<1>() {} + + virtual double value (const Point<1> &p, + const unsigned int component = 0) const; + }; + + + + // So here comes the function that implements + // the function object. The base value is + // $x^{1/3}$, while random is a random + // number between -1 and 1 (note that + // rand() returns a random integer value + // between zero and RAND_MAX; to convert + // it to a floating point value between 0 and + // 2, we have to divide by RAND_MAX and + // multiply by two -- note that the first + // multiplication has to happen in floating + // point arithmetic, so that the division is + // done in non-truncating floating point mode + // as well; the final step is then to shift + // the interval [0,2] to [-1,1]). + // + // In a second step, we add the base value + // and a random value in [-0.1,0.1] together + // and return it, unless it is less than + // zero, in which case we take zero. + double InitializationValues::value (const Point<1> &p, + const unsigned int) const + { + const double base = std::pow(p(0), 1./3.); + const double random = 2.*rand()/RAND_MAX-1; + return std::max (base+.1*random, 0.); + } - // Next is the declaration of the main - // class. As in most of the previous example - // programs, the public interface of the - // class consists only of a constructor and a - // run function that does the actual - // work. The constructor takes an additional - // argument that indicates the number of the - // run we are presently performing. This - // value is only used at the very end when we - // generate graphical output with a filename - // that matches this number. - // - // The private section of the class has the - // usual assortment of functions setting up - // the computations, doing one nonlinear - // step, refineming the mesh, doing a line - // search for step length computations, - // etc. The energy function computes the - // value of the optimization functional on an - // arbitrary finite element function with - // nodal values given on the DoFHandler - // given as an argument. Since it does not - // depend on the state of this object, we - // declare this function as static. - // - // The member variables of this class are - // what we have seen before, and the - // variables that characterize the linear - // system to be solved in the next nonlinear - // step, as well as the present approximation - // of the solution. -template -class MinimizationProblem -{ - public: - MinimizationProblem (const unsigned int run_number); - void run (); - - private: - void initialize_solution (); - void setup_system_on_mesh (); - void assemble_step (); - double line_search (const Vector & update) const; - void do_step (); - void output_results () const; - void refine_grid (); - - static double energy (const DoFHandler &dof_handler, - const Vector &function); - - - const unsigned int run_number; - - Triangulation triangulation; - - FE_Q fe; - DoFHandler dof_handler; - - ConstraintMatrix hanging_node_constraints; - - SparsityPattern sparsity_pattern; - SparseMatrix matrix; - - Vector present_solution; - Vector residual; -}; - - - - // The constructor of this class is actually - // somewhat boring: -template -MinimizationProblem::MinimizationProblem (const unsigned int run_number) - : - run_number (run_number), - fe (1), - dof_handler (triangulation) -{} - - - // Then, here is the function that - // initializes the solution before the first - // non-linear iteration, by setting the - // initial values to the random function - // described above and making sure that the - // boundary values are set correctly. We will - // then only seek updates to this function - // with zero boundary values, so that the - // boundary values are always correct. - // - // Note how we have specialized this function - // to 1d only. We do this since the second - // part of the function, where we deal with - // boundary values, is only correct if we are - // in 1d. Not generating a general template - // for this function prevents the compiler - // from erroneously compiling this function - // for other space dimensions, then. -template <> -void MinimizationProblem<1>::initialize_solution () -{ - // The first part is to assign the correct - // size to the vector, and use library - // function that takes a function object, - // and interpolates the given vector living - // on a DoFHandler to this function - // object: - present_solution.reinit (dof_handler.n_dofs()); - VectorTools::interpolate (dof_handler, - InitializationValues(), - present_solution); - - // Then we still have to make sure that we - // get the boundary values right. This - // could have been done inside the - // InitializationValues class, but it - // is instructive to see how it can also be - // done, in particular since it is so - // simple in 1d. First, start out with an - // arbitrary cell on level 0, i.e. the - // coarse mesh: - DoFHandler<1>::cell_iterator cell; - cell = dof_handler.begin(0); - // Then move as far to the left as - // possible. Note that while in two or more - // space dimensions, there is is no - // guarantee as to the coordinate - // directions of a given face number of a - // cell, in 1d the zeroth face (and - // neighbor) is always the one to the left, - // and the first one the one to the - // right. Similarly, the zeroth child is - // the left one, the first child is the - // right one. - while (cell->at_boundary(0) == false) - cell = cell->neighbor(0); - // Now that we are at the leftmost coarse - // grid cell, go recursively through its - // left children until we find a terminal - // one: - while (cell->has_children() == true) - cell = cell->child(0); - // Then set the value of the solution - // corresponding to the zeroth degree of - // freedom and the zeroth vertex of the - // cell to zero. Note that the zeroth - // vertex is the left one, and that zero is - // the only valid second argument to the - // call to vertex_dof_index, since we - // have a scalar finite element; thus, - // there is only a single component. - present_solution(cell->vertex_dof_index(0,0)) = 0; - - // Now do all the same with the right - // boundary value, and set it to one: - cell = dof_handler.begin(0); - while (cell->at_boundary(1) == false) - cell = cell->neighbor(1); - while (cell->has_children()) - cell = cell->child(1); - present_solution(cell->vertex_dof_index(1,0)) = 1; -} - // The function that prepares the member - // variables of this class for assembling the - // linear system in each nonlinear step is - // also not very interesting. This has all - // been shown before in previous example - // programs. Note, however, that all this - // works in 1d just as in any other space - // dimension, and would not require any - // changes if we were to use the program in - // another space dimension. - // - // Note that this function is only called - // when the mesh has been changed (or before - // the first nonlinear step). It only - // initializes the variables to their right - // sizes, but since these sizes don't change - // as long as we don't change the mesh, we - // can use them for more than just one - // nonlinear iteration without reinitializing - // them. -template -void MinimizationProblem::setup_system_on_mesh () -{ - hanging_node_constraints.clear (); - DoFTools::make_hanging_node_constraints (dof_handler, - hanging_node_constraints); - hanging_node_constraints.close (); + // Next is the declaration of the main + // class. As in most of the previous example + // programs, the public interface of the + // class consists only of a constructor and a + // run function that does the actual + // work. The constructor takes an additional + // argument that indicates the number of the + // run we are presently performing. This + // value is only used at the very end when we + // generate graphical output with a filename + // that matches this number. + // + // The private section of the class has the + // usual assortment of functions setting up + // the computations, doing one nonlinear + // step, refineming the mesh, doing a line + // search for step length computations, + // etc. The energy function computes the + // value of the optimization functional on an + // arbitrary finite element function with + // nodal values given on the DoFHandler + // given as an argument. Since it does not + // depend on the state of this object, we + // declare this function as static. + // + // The member variables of this class are + // what we have seen before, and the + // variables that characterize the linear + // system to be solved in the next nonlinear + // step, as well as the present approximation + // of the solution. + template + class MinimizationProblem + { + public: + MinimizationProblem (const unsigned int run_number); + void run (); + + private: + void initialize_solution (); + void setup_system_on_mesh (); + void assemble_step (); + double line_search (const Vector & update) const; + void do_step (); + void output_results () const; + void refine_grid (); + + static double energy (const DoFHandler &dof_handler, + const Vector &function); + + + const unsigned int run_number; + + Triangulation triangulation; + + FE_Q fe; + DoFHandler dof_handler; + + ConstraintMatrix hanging_node_constraints; + + SparsityPattern sparsity_pattern; + SparseMatrix matrix; + + Vector present_solution; + Vector residual; + }; + + + + // The constructor of this class is actually + // somewhat boring: + template + MinimizationProblem::MinimizationProblem (const unsigned int run_number) + : + run_number (run_number), + fe (1), + dof_handler (triangulation) + {} + + + // Then, here is the function that + // initializes the solution before the first + // non-linear iteration, by setting the + // initial values to the random function + // described above and making sure that the + // boundary values are set correctly. We will + // then only seek updates to this function + // with zero boundary values, so that the + // boundary values are always correct. + // + // Note how we have specialized this function + // to 1d only. We do this since the second + // part of the function, where we deal with + // boundary values, is only correct if we are + // in 1d. Not generating a general template + // for this function prevents the compiler + // from erroneously compiling this function + // for other space dimensions, then. + template <> + void MinimizationProblem<1>::initialize_solution () + { + // The first part is to assign the correct + // size to the vector, and use library + // function that takes a function object, + // and interpolates the given vector living + // on a DoFHandler to this function + // object: + present_solution.reinit (dof_handler.n_dofs()); + VectorTools::interpolate (dof_handler, + InitializationValues(), + present_solution); + + // Then we still have to make sure that we + // get the boundary values right. This + // could have been done inside the + // InitializationValues class, but it + // is instructive to see how it can also be + // done, in particular since it is so + // simple in 1d. First, start out with an + // arbitrary cell on level 0, i.e. the + // coarse mesh: + DoFHandler<1>::cell_iterator cell; + cell = dof_handler.begin(0); + // Then move as far to the left as + // possible. Note that while in two or more + // space dimensions, there is is no + // guarantee as to the coordinate + // directions of a given face number of a + // cell, in 1d the zeroth face (and + // neighbor) is always the one to the left, + // and the first one the one to the + // right. Similarly, the zeroth child is + // the left one, the first child is the + // right one. + while (cell->at_boundary(0) == false) + cell = cell->neighbor(0); + // Now that we are at the leftmost coarse + // grid cell, go recursively through its + // left children until we find a terminal + // one: + while (cell->has_children() == true) + cell = cell->child(0); + // Then set the value of the solution + // corresponding to the zeroth degree of + // freedom and the zeroth vertex of the + // cell to zero. Note that the zeroth + // vertex is the left one, and that zero is + // the only valid second argument to the + // call to vertex_dof_index, since we + // have a scalar finite element; thus, + // there is only a single component. + present_solution(cell->vertex_dof_index(0,0)) = 0; + + // Now do all the same with the right + // boundary value, and set it to one: + cell = dof_handler.begin(0); + while (cell->at_boundary(1) == false) + cell = cell->neighbor(1); + while (cell->has_children()) + cell = cell->child(1); + present_solution(cell->vertex_dof_index(1,0)) = 1; + } - sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - hanging_node_constraints.condense (sparsity_pattern); + // The function that prepares the member + // variables of this class for assembling the + // linear system in each nonlinear step is + // also not very interesting. This has all + // been shown before in previous example + // programs. Note, however, that all this + // works in 1d just as in any other space + // dimension, and would not require any + // changes if we were to use the program in + // another space dimension. + // + // Note that this function is only called + // when the mesh has been changed (or before + // the first nonlinear step). It only + // initializes the variables to their right + // sizes, but since these sizes don't change + // as long as we don't change the mesh, we + // can use them for more than just one + // nonlinear iteration without reinitializing + // them. + template + void MinimizationProblem::setup_system_on_mesh () + { + hanging_node_constraints.clear (); + DoFTools::make_hanging_node_constraints (dof_handler, + hanging_node_constraints); + hanging_node_constraints.close (); - sparsity_pattern.compress(); -} + sparsity_pattern.reinit (dof_handler.n_dofs(), + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); + DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); + hanging_node_constraints.condense (sparsity_pattern); + sparsity_pattern.compress(); + } - // Next is the function that assembles the - // linear system. The first part, - // initializing various local variables is - // what we have been doing previously - // already. -template -void MinimizationProblem::assemble_step () -{ - // The first two lines of the function - // clear the matrix and right hand side - // values of their prior content, which - // could possibly still be there from the - // previous nonlinear step. - matrix.reinit (sparsity_pattern); - residual.reinit (dof_handler.n_dofs()); - - // Then we initialize a FEValues object - // with a 4-point Gauss quadrature - // formula. This object will be used to - // compute the values and gradients of the - // shape functions at the quadrature - // points, which we need to assemble the - // matrix and right hand side of the - // nonlinear step as outlined in the - // introduction to this example program. In - // order to compute values and gradients, - // we need to pass the update_values - // and update_gradients flags to the - // constructor, and the - // update_JxW_values flag for the - // Jacobian times the weight at a - // quadrature point. In addition, we need - // to have the coordinate values of each - // quadrature point in real space for the - // $x-u^3$ terms; to get these from the - // FEValues object, we need to pass it - // the update_quadrature_points flag. - // - // It is a simple calculation to figure out - // that for linear elements, the integrals - // in the right hand side semilinear form - // is a polynomial of sixth order. Thus, - // the appropriate quadrature formula is - // the one we have chosen here. - QGauss quadrature_formula(4); - FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); - - // Next, here are the usual two convenience - // variables, followed by declarations for - // the local contributions to matrix and - // right hand side, as well as an array to - // hold the indices of the local degrees of - // freedom on each cell: - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); - - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - Vector cell_rhs (dofs_per_cell); - - std::vector local_dof_indices (dofs_per_cell); - - // The next two variables are needed since - // the problem we consider is nonlinear, - // and thus the right hand side depends on - // the previous solution (in a Newton - // method, for example, the left hand side - // matrix would also depend on the previous - // solution, but as explained in the - // introduction, we only use a simple - // gradient-type method in which the matrix - // is a scaled Laplace-type matrix). In - // order to compute the values of the - // integrand for the right hand side, we - // therefore need to have the values and - // gradients of the previous solution at - // the quadrature points. We will get them - // from the FEValues object above, and - // will put them into the following two - // variables: - std::vector local_solution_values (n_q_points); - std::vector > local_solution_grads (n_q_points); - - // Now, here comes the main loop over all - // the cells of the mesh: - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - // First, clear the objects that hold - // the local matrix and right hand side - // contributions for this cell: - cell_matrix = 0; - cell_rhs = 0; - - // Then initialize the values and - // gradients of the shape functions at - // the quadrature points of this cell: - fe_values.reinit (cell); - - // And get the values and gradients of - // the previous solution at the - // quadrature points. To get them, we - // don't actually have to do much, - // except for giving the FEValues - // object the global node vector from - // which to compute this data, and a - // reference to the objects into which - // to put them. After the calls, the - // local_solution_values and - // local_solution_values variables - // will contain values and gradients - // for each of the quadrature points on - // this cell. - fe_values.get_function_values (present_solution, - local_solution_values); - fe_values.get_function_grads (present_solution, - local_solution_grads); - - // Then loop over all quadrature - // points: - for (unsigned int q_point=0; q_point u_prime = local_solution_grads[q_point]; - - // Then do the double loop over all - // shape functions to compute the - // local contribution to the - // matrix. The terms are simple - // equivalents of the formula - // stated in the introduction. Note - // how we extract the size of an - // element from the iterator to the - // present cell: - for (unsigned int i=0; idiameter() * - cell->diameter() - + - fe_values.shape_value(i,q_point) * - fe_values.shape_value(j,q_point)) * - fe_values.JxW(q_point); - - // And here comes the loop over all - // local degrees of freedom to form - // the right hand side. The formula - // looks a little convoluted, but - // is again a simple image of what - // was given in the introduction: - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - for (unsigned int i=0; iif statement - // in front of the second function call. - // - // Note that we need zero boundary - // conditions on both ends, since the space - // in which search for the solution has - // fixed boundary conditions zero and one, - // and we have set the initial values to - // already satisfy them. Thus, the updates - // computed in each nonlinear step must - // have zero boundary values. - std::map boundary_values; - VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction(), - boundary_values); - if (dim == 1) + // Next is the function that assembles the + // linear system. The first part, + // initializing various local variables is + // what we have been doing previously + // already. + template + void MinimizationProblem::assemble_step () + { + // The first two lines of the function + // clear the matrix and right hand side + // values of their prior content, which + // could possibly still be there from the + // previous nonlinear step. + matrix.reinit (sparsity_pattern); + residual.reinit (dof_handler.n_dofs()); + + // Then we initialize a FEValues object + // with a 4-point Gauss quadrature + // formula. This object will be used to + // compute the values and gradients of the + // shape functions at the quadrature + // points, which we need to assemble the + // matrix and right hand side of the + // nonlinear step as outlined in the + // introduction to this example program. In + // order to compute values and gradients, + // we need to pass the update_values + // and update_gradients flags to the + // constructor, and the + // update_JxW_values flag for the + // Jacobian times the weight at a + // quadrature point. In addition, we need + // to have the coordinate values of each + // quadrature point in real space for the + // $x-u^3$ terms; to get these from the + // FEValues object, we need to pass it + // the update_quadrature_points flag. + // + // It is a simple calculation to figure out + // that for linear elements, the integrals + // in the right hand side semilinear form + // is a polynomial of sixth order. Thus, + // the appropriate quadrature formula is + // the one we have chosen here. + QGauss quadrature_formula(4); + FEValues fe_values (fe, quadrature_formula, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); + + // Next, here are the usual two convenience + // variables, followed by declarations for + // the local contributions to matrix and + // right hand side, as well as an array to + // hold the indices of the local degrees of + // freedom on each cell: + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + Vector cell_rhs (dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + // The next two variables are needed since + // the problem we consider is nonlinear, + // and thus the right hand side depends on + // the previous solution (in a Newton + // method, for example, the left hand side + // matrix would also depend on the previous + // solution, but as explained in the + // introduction, we only use a simple + // gradient-type method in which the matrix + // is a scaled Laplace-type matrix). In + // order to compute the values of the + // integrand for the right hand side, we + // therefore need to have the values and + // gradients of the previous solution at + // the quadrature points. We will get them + // from the FEValues object above, and + // will put them into the following two + // variables: + std::vector local_solution_values (n_q_points); + std::vector > local_solution_grads (n_q_points); + + // Now, here comes the main loop over all + // the cells of the mesh: + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + { + // First, clear the objects that hold + // the local matrix and right hand side + // contributions for this cell: + cell_matrix = 0; + cell_rhs = 0; + + // Then initialize the values and + // gradients of the shape functions at + // the quadrature points of this cell: + fe_values.reinit (cell); + + // And get the values and gradients of + // the previous solution at the + // quadrature points. To get them, we + // don't actually have to do much, + // except for giving the FEValues + // object the global node vector from + // which to compute this data, and a + // reference to the objects into which + // to put them. After the calls, the + // local_solution_values and + // local_solution_values variables + // will contain values and gradients + // for each of the quadrature points on + // this cell. + fe_values.get_function_values (present_solution, + local_solution_values); + fe_values.get_function_grads (present_solution, + local_solution_grads); + + // Then loop over all quadrature + // points: + for (unsigned int q_point=0; q_point u_prime = local_solution_grads[q_point]; + + // Then do the double loop over all + // shape functions to compute the + // local contribution to the + // matrix. The terms are simple + // equivalents of the formula + // stated in the introduction. Note + // how we extract the size of an + // element from the iterator to the + // present cell: + for (unsigned int i=0; idiameter() * + cell->diameter() + + + fe_values.shape_value(i,q_point) * + fe_values.shape_value(j,q_point)) * + fe_values.JxW(q_point); + + // And here comes the loop over all + // local degrees of freedom to form + // the right hand side. The formula + // looks a little convoluted, but + // is again a simple image of what + // was given in the introduction: + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + for (unsigned int i=0; iif statement + // in front of the second function call. + // + // Note that we need zero boundary + // conditions on both ends, since the space + // in which search for the solution has + // fixed boundary conditions zero and one, + // and we have set the initial values to + // already satisfy them. Thus, the updates + // computed in each nonlinear step must + // have zero boundary values. + std::map boundary_values; VectorTools::interpolate_boundary_values (dof_handler, - 1, - ZeroFunction(), - boundary_values); - Vector dummy (residual.size()); - MatrixTools::apply_boundary_values (boundary_values, - matrix, - dummy, - residual); -} - - - // Once we have a search (update) direction, - // we need to figure out how far to go in - // this direction. This is what line search - // is good for, and this function does - // exactly this: compute and return the - // length of the update step. - // - // Since we already know the direction, we - // only have to solve the one-dimensional - // problem of minimizing the energy along - // this direction. Note, however, that in - // general we do not have the gradient of the - // energy functional in this direction, so we - // have to approximate it (and the second - // derivatives) using finite differences. - // - // In most applications, it is sufficient to - // find an approximate minimizer of this - // one-dimensional problem, or even just a - // point that may not be a minimizer but - // instead just satisfies a few conditions - // like those of Armijo and Goldstein. The - // rational for this is generally that - // evaluating the objective function too - // often is too expensive. However, here, we - // are a little more lenient, since the - // overall run-time is dominated by inverting - // the system matrix in each nonlinear - // step. Thus, we will do this minimization - // by using a fixed number of five Newton - // steps in this one-dimensional problem, and - // using a bisection algorithm as a substep - // in it. - // - // As is quite common in step length - // procedures, this function contains a fair - // number of heuristics and strategies that - // might not be obvious at first. Step length - // determination is notorious for its - // complications, and this implementation is - // not an exception. Note that if one tries - // to omit the special-casing, then one - // oftentimes encounters situations where the - // found step length is really not very good. -template -double -MinimizationProblem::line_search (const Vector &update) const -{ - // Start out with a zero step length: - double alpha = 0.; - Vector tmp (present_solution.size()); - - // Then do at most five Newton steps: - for (unsigned int step=0; step<5; ++step) - { - // At the present location, which is - // present_solution+alpha*update, - // evaluate the energy - tmp = present_solution; - tmp.add (alpha, update); - const double f_a = energy (dof_handler, tmp); - - // Then determine a finite difference - // step length dalpha, and also - // evaluate the energy functional at - // positions alpha+dalpha and - // alpha-dalpha along the search - // direction: - const double dalpha = (alpha != 0 ? alpha/100 : 0.01); - - tmp = present_solution; - tmp.add (alpha+dalpha, update); - const double f_a_plus = energy (dof_handler, tmp); - - tmp = present_solution; - tmp.add (alpha-dalpha, update); - const double f_a_minus = energy (dof_handler, tmp); - - // From these three data points, we can - // compute a finite difference - // approximation of the first and - // second derivatives: - const double f_a_prime = (f_a_plus-f_a_minus) / (2*dalpha); - const double f_a_doubleprime = ((f_a_plus-2*f_a+f_a_minus) / - (dalpha*dalpha)); - - // If the gradient is (relative to the - // energy value) too small, then this - // means that we have found a minimum - // of the energy functional along the - // search direction. In this case, - // abort here and return the found step - // length value: - if (std::fabs(f_a_prime) < 1e-7*std::fabs(f_a)) - break; - - // Alternatively, also abort if the - // curvature is too small, because we - // can't compute a Newton step - // then. This is somewhat - // unsatisfactory, since we are not at - // a minimum, and can certainly be - // improved. There are a number of - // other strategies for this case, - // which we leave for interested - // readers: - if (std::fabs(f_a_doubleprime) < 1e-7*std::fabs(f_a_prime)) - break; - - // Then compute the Newton step as the - // negative of the inverse Hessian - // applied to the gradient. - double step_length = -f_a_prime / f_a_doubleprime; - - // And do a number of correcting steps: - // if the energy at the predicted new - // position would be larger than at the - // present position, then halve the - // step length and try again. If this - // does not help after three such - // cycles, then simply give up and use - // the value we have. - for (unsigned int i=0; i<3; ++i) - { - tmp = present_solution; - tmp.add (alpha+step_length, update); - const double e = energy (dof_handler, tmp); - - if (e >= f_a) - step_length /= 2; - else - break; - } - - // After all this, update alpha and go - // on to the next Newton step. - alpha += step_length; - } - - // Finally, return with the computed step length. - return alpha; -} - + 0, + ZeroFunction(), + boundary_values); + if (dim == 1) + VectorTools::interpolate_boundary_values (dof_handler, + 1, + ZeroFunction(), + boundary_values); + Vector dummy (residual.size()); + MatrixTools::apply_boundary_values (boundary_values, + matrix, + dummy, + residual); + } - // The next function is again a rather boring - // one: it does one nonlinear step, by - // calling the function that assembles the - // linear system, then solving it, computing - // a step length, and finally updating the - // solution vector. This should all be mostly - // self-explanatory, given that we have shown - // the solution of a linear system before. -template -void MinimizationProblem::do_step () -{ - assemble_step (); - - Vector update (present_solution.size()); + // Once we have a search (update) direction, + // we need to figure out how far to go in + // this direction. This is what line search + // is good for, and this function does + // exactly this: compute and return the + // length of the update step. + // + // Since we already know the direction, we + // only have to solve the one-dimensional + // problem of minimizing the energy along + // this direction. Note, however, that in + // general we do not have the gradient of the + // energy functional in this direction, so we + // have to approximate it (and the second + // derivatives) using finite differences. + // + // In most applications, it is sufficient to + // find an approximate minimizer of this + // one-dimensional problem, or even just a + // point that may not be a minimizer but + // instead just satisfies a few conditions + // like those of Armijo and Goldstein. The + // rational for this is generally that + // evaluating the objective function too + // often is too expensive. However, here, we + // are a little more lenient, since the + // overall run-time is dominated by inverting + // the system matrix in each nonlinear + // step. Thus, we will do this minimization + // by using a fixed number of five Newton + // steps in this one-dimensional problem, and + // using a bisection algorithm as a substep + // in it. + // + // As is quite common in step length + // procedures, this function contains a fair + // number of heuristics and strategies that + // might not be obvious at first. Step length + // determination is notorious for its + // complications, and this implementation is + // not an exception. Note that if one tries + // to omit the special-casing, then one + // oftentimes encounters situations where the + // found step length is really not very good. + template + double + MinimizationProblem::line_search (const Vector &update) const { - SolverControl solver_control (residual.size(), - 1e-2*residual.l2_norm()); - SolverCG<> solver (solver_control); - - PreconditionSSOR<> preconditioner; - preconditioner.initialize(matrix); - - solver.solve (matrix, update, residual, - preconditioner); - hanging_node_constraints.distribute (update); + // Start out with a zero step length: + double alpha = 0.; + Vector tmp (present_solution.size()); + + // Then do at most five Newton steps: + for (unsigned int step=0; step<5; ++step) + { + // At the present location, which is + // present_solution+alpha*update, + // evaluate the energy + tmp = present_solution; + tmp.add (alpha, update); + const double f_a = energy (dof_handler, tmp); + + // Then determine a finite difference + // step length dalpha, and also + // evaluate the energy functional at + // positions alpha+dalpha and + // alpha-dalpha along the search + // direction: + const double dalpha = (alpha != 0 ? alpha/100 : 0.01); + + tmp = present_solution; + tmp.add (alpha+dalpha, update); + const double f_a_plus = energy (dof_handler, tmp); + + tmp = present_solution; + tmp.add (alpha-dalpha, update); + const double f_a_minus = energy (dof_handler, tmp); + + // From these three data points, we can + // compute a finite difference + // approximation of the first and + // second derivatives: + const double f_a_prime = (f_a_plus-f_a_minus) / (2*dalpha); + const double f_a_doubleprime = ((f_a_plus-2*f_a+f_a_minus) / + (dalpha*dalpha)); + + // If the gradient is (relative to the + // energy value) too small, then this + // means that we have found a minimum + // of the energy functional along the + // search direction. In this case, + // abort here and return the found step + // length value: + if (std::fabs(f_a_prime) < 1e-7*std::fabs(f_a)) + break; + + // Alternatively, also abort if the + // curvature is too small, because we + // can't compute a Newton step + // then. This is somewhat + // unsatisfactory, since we are not at + // a minimum, and can certainly be + // improved. There are a number of + // other strategies for this case, + // which we leave for interested + // readers: + if (std::fabs(f_a_doubleprime) < 1e-7*std::fabs(f_a_prime)) + break; + + // Then compute the Newton step as the + // negative of the inverse Hessian + // applied to the gradient. + double step_length = -f_a_prime / f_a_doubleprime; + + // And do a number of correcting steps: + // if the energy at the predicted new + // position would be larger than at the + // present position, then halve the + // step length and try again. If this + // does not help after three such + // cycles, then simply give up and use + // the value we have. + for (unsigned int i=0; i<3; ++i) + { + tmp = present_solution; + tmp.add (alpha+step_length, update); + const double e = energy (dof_handler, tmp); + + if (e >= f_a) + step_length /= 2; + else + break; + } + + // After all this, update alpha and go + // on to the next Newton step. + alpha += step_length; + } + + // Finally, return with the computed step length. + return alpha; } - const double step_length = line_search (update); - present_solution.add (step_length, update); -} + // The next function is again a rather boring + // one: it does one nonlinear step, by + // calling the function that assembles the + // linear system, then solving it, computing + // a step length, and finally updating the + // solution vector. This should all be mostly + // self-explanatory, given that we have shown + // the solution of a linear system before. + template + void MinimizationProblem::do_step () + { + assemble_step (); - // The same holds for the function that - // outputs the solution in gnuplot format - // into a file with a name that includes the - // number of the run we are presently - // performing. -template -void -MinimizationProblem::output_results () const -{ - DataOut data_out; - data_out.attach_dof_handler (dof_handler); - data_out.add_data_vector (present_solution, "solution"); - data_out.build_patches (); - - std::ostringstream filename; - filename << "solution-" - << run_number - << ".gnuplot" - << std::ends; - - std::ofstream out (filename.str().c_str()); - data_out.write_gnuplot (out); -} + Vector update (present_solution.size()); + { + SolverControl solver_control (residual.size(), + 1e-2*residual.l2_norm()); + SolverCG<> solver (solver_control); + PreconditionSSOR<> preconditioner; + preconditioner.initialize(matrix); + solver.solve (matrix, update, residual, + preconditioner); + hanging_node_constraints.distribute (update); + } - // The function to compute error indicator - // and refine the mesh accordingly is a - // little more interesting. In particular, it - // shows some more of the techniques usually - // used in 1d applications. First, note that - // this again is a specialization that only - // works in 1d. However, to make later - // extension to higher space dimensions - // simpler, we define a constant integer - // dim at the beginning of the function; - // by using this constant as template - // argument in all places, we are actually - // able to write most of the code as if it - // were dimension independent, thus - // minimizing the amount of later changes. -template <> -void MinimizationProblem<1>::refine_grid () -{ - const unsigned int dim = 1; - - Vector error_indicators (triangulation.n_active_cells()); - - // Then define the quadrature formula, and - // what values we will want to extract from - // the solution. Here, we use the two-point - // trapezoidal rule, i.e. we evaluate the - // residual only at the end points of the - // cells. Incidentally, this also makes - // evaluating the jump terms between cells - // simpler. Note that for the error - // indicators, we not only need values and - // gradients of the solution, but also its - // second derivatives, as well as the - // physical location of quadrature points. - QTrapez quadrature; - FEValues fe_values (fe, quadrature, - update_values | update_gradients | - update_hessians | - update_quadrature_points | update_JxW_values); - - // The error indicator formula presented in - // the introduction requires us to compute - // jumps of the solution and gradient - // across cell boundaries. Since the - // solution itself is continuous, we only - // need to evaluate the gradient on the - // neighbor cells. To avoid some of the - // work needed to reinitialize a - // FEValues object on a cell, we define - // another such object here that we will - // only use for the neighbor cells. The - // data we need from the side of the - // present cell is provided by above - // object. - FEValues neighbor_fe_values (fe, quadrature, - update_gradients); - - // Then, as before, we need objects holding - // values and derivatives of the solution - // at quadrature points. Here, we also need - // second derivatives, which is simple, - // however: - std::vector local_values (quadrature.size()); - std::vector > local_gradients (quadrature.size()); - std::vector > local_2nd_derivs (quadrature.size()); - - // With all this, we can start the loop - // over all cells. Since we need to write - // the result for each cell into - // consecutive elements of a vector, we - // also keep a running index cell_index - // that we increase with each cell treated. - DoFHandler::active_cell_iterator - cell = dof_handler.begin_active (), - endc = dof_handler.end (); - for (unsigned int cell_index = 0; cell!=endc; ++cell, ++cell_index) - { - // After initializing the FEValues - // object on each cell, use it to - // evaluate solution and first and - // second derivatives of it at the - // quadrature points: - fe_values.reinit (cell); - fe_values.get_function_values (present_solution, local_values); - fe_values.get_function_grads (present_solution, local_gradients); - fe_values.get_function_2nd_derivatives (present_solution, local_2nd_derivs); - - // Given the formula in the - // introduction, the computation of the - // cell residuals should actually be - // relatively obvious. The result, - // multiplied by the appropriate power - // of the cell's size is then written - // into the vector of error indicators. - // - // Note that in the following - // computations, we have already made - // use of the fact that we are in 1d, - // since we extract the gradient as a - // scalar value. - double cell_residual_norm = 0; - for (unsigned int q=0; qdiameter() * cell->diameter(); - - // The next step is to evaluate the - // jump terms. To make computations - // somewhat simpler (and to free up the - // local_* variables for use on - // neighboring elements), we define - // some convenience variables for the - // positions of the left and right cell - // boundary point, as well as the - // values and gradients at these - // points. - // - // To be cautious, we don't blindly - // trust that the trapezoidal rule has - // its evaluation points as the left - // and right end point of the cell (it - // could in principle have them in the - // reverse order, i.e. the zeroth point - // is at x=1, and the first one at - // x=0), and use an assertion to - // actually check for this. If this - // would not be the case, an exception - // of the (predefined) class - // ExcInternalError would be - // thrown. Of course, this does not - // happen in this program, but it shows - // a way of defensive coding: if you - // are not sure of an assumption, guard - // it by a test. This also guards us - // against possible future changes in - // the library: the quadrature classes - // do not promise any particular order - // of their quadrature points, so the - // QTrapez class could in principle - // change the order of its two - // evaluation points. In that case, - // your code would tell you that - // something changed, rather than - // computing a wrong result when you - // upgrade to a new version of the - // library. (The point made here is - // theoretical: we are not going to - // change the order of evaluation - // points; the intent is simply how to - // add some defensive touches to a - // program that make sure that it - // really does what it is hoped to do.) - // - // Given that we are now sure that - // x_left and x_right, - // extracted from the zeroth and first - // quadrature point, are indeed the - // left and right vertex of the cell, - // we can also be sure that the values - // we extract for u_left et al. are - // the ones we expect them to be, since - // the order of these values must of - // course match the order of the - // quadrature points. - const double x_left = fe_values.quadrature_point(0)[0]; - const double x_right = fe_values.quadrature_point(1)[0]; - - Assert (x_left == cell->vertex(0)[0], ExcInternalError()); - Assert (x_right == cell->vertex(1)[0], ExcInternalError()); - - const double u_left = local_values[0]; - const double u_right = local_values[1]; - - const double u_prime_left = local_gradients[0][0]; - const double u_prime_right = local_gradients[1][0]; - - // Next, we have to check whether this - // cell has a left neighbor: - if (cell->at_boundary(0) == false) - { - // If so, find its left - // neighbor. We do so by asking for - // the cell that is immediately - // adjacent to the left (the zeroth - // neighbor in 1d). However, this - // may be a cell that in itself has - // children, so to get to the - // active left neighbor, we have to - // recursively check whether that - // cell has children, and if so - // take its right child, since that - // is adjacent to the left of the - // present cell. Note that unless - // you are in 1d, there is no safe - // way to assume that the first - // child of the zeroth neighbor is - // indeed adjacent to the present - // cell. Rather, more than one of - // the children of a neighbor may - // be adjacent to the present - // cell. Also note that in two or - // higher space dimensions, a - // neighbor of an active cell may - // only be at most once refined, - // since we have the rule that - // there can only be one hanging - // node per face. This rule does - // not exist in 1d: neighboring - // cells may have totally - // independent refinement - // levels. Thus, we really need the - // while loop, not only an - // if clause. - DoFHandler::cell_iterator left_neighbor = cell->neighbor(0); - while (left_neighbor->has_children()) - left_neighbor = left_neighbor->child(1); - - // With the so-found neighbor, - // initialize the second - // FEValues object to it, - // extract the gradients of the - // solution there, and from this - // get the gradient at the - // interface (this is the first - // element of local_gradients, - // since the right end point of the - // neighbor cell has index 1) as a - // scalar value (this is the zeroth - // component of - // local_gradients[1]. - neighbor_fe_values.reinit (left_neighbor); - neighbor_fe_values.get_function_grads (present_solution, local_gradients); - - const double neighbor_u_prime_left = local_gradients[1][0]; - - // Then compute the jump, and add a - // suitable multiple to the error - // indicator for this cell: - const double left_jump = std::pow(x_left-std::pow(u_left,3), 2) * - (std::pow(neighbor_u_prime_left,5) - - std::pow(u_prime_left,5)); - error_indicators(cell_index) += left_jump * left_jump * - cell->diameter(); - } + const double step_length = line_search (update); + present_solution.add (step_length, update); + } - // Once we have done the left neighbor, - // we can play exactly the same game - // with the right neighbor: - if (cell->at_boundary(1) == false) - { - DoFHandler::cell_iterator right_neighbor = cell->neighbor(1); - while (right_neighbor->has_children()) - right_neighbor = right_neighbor->child(0); - neighbor_fe_values.reinit (right_neighbor); - neighbor_fe_values.get_function_grads (present_solution, local_gradients); - const double neighbor_u_prime_right = local_gradients[0][0]; + // The same holds for the function that + // outputs the solution in gnuplot format + // into a file with a name that includes the + // number of the run we are presently + // performing. + template + void + MinimizationProblem::output_results () const + { + DataOut data_out; + data_out.attach_dof_handler (dof_handler); + data_out.add_data_vector (present_solution, "solution"); + data_out.build_patches (); + + std::ostringstream filename; + filename << "solution-" + << run_number + << ".gnuplot" + << std::ends; + + std::ofstream out (filename.str().c_str()); + data_out.write_gnuplot (out); + } - const double right_jump = std::pow(x_right-std::pow(u_right,3), 2) * - (std::pow(neighbor_u_prime_right,5) - - std::pow(u_prime_right,5)); - error_indicators(cell_index) += right_jump * right_jump * - cell->diameter(); - } - } - // Now we have all the refinement - // indicators computed, and want to refine - // the grid. In contrast to previous - // examples, however, we would like to - // transfer the solution vector from the - // old to the new grid. This is what the - // SolutionTransfer class is good for, - // but it requires some preliminary - // work. First, we need to tag the cells - // that we want to refine or coarsen, as - // usual: - GridRefinement::refine_and_coarsen_fixed_number (triangulation, - error_indicators, - 0.3, 0.03); - // Then, however, we need an additional - // step: if, for example, you flag a cell - // that is once more refined than its - // neighbor, and that neighbor is not - // flagged for refinement, we would end up - // with a jump of two refinement levels - // across a cell interface. In 1d, this - // would in general be allowed, but not in - // higher space dimensions, and some mesh - // smoothing algorithms in 1d may also - // disallow this. To avoid these - // situations, the library will silently - // also have to refine the neighbor cell - // once. It does so by calling the - // Triangulation::prepare_coarsening_and_refinement - // function before actually doing the - // refinement and coarsening. This function - // flags a set of additional cells for - // refinement or coarsening, to enforce - // rules like the one-hanging-node - // rule. The cells that are flagged for - // refinement and coarsening after calling - // this function are exactly the ones that - // will actually be refined or - // coarsened. Since the - // SolutionTransfer class needs this - // information in order to store the data - // from the old mesh and transfer to the - // new one. - triangulation.prepare_coarsening_and_refinement(); - - // With this out of the way, we initialize - // a SolutionTransfer object with the - // present DoFHandler and attach the - // solution vector to it: - SolutionTransfer solution_transfer(dof_handler); - solution_transfer.prepare_for_coarsening_and_refinement (present_solution); - - // Then we do the actual refinement, and - // distribute degrees of freedom on the new - // mesh: - triangulation.execute_coarsening_and_refinement (); - dof_handler.distribute_dofs (fe); - - // Finally, we retrieve the old solution - // interpolated to the new mesh. Since the - // SolutionTransfer function does not - // actually store the values of the old - // solution, but rather indices, we need to - // preserve the old solution vector until - // we have gotten the new interpolated - // values. Thus, we have the new values - // written into a temporary vector, and - // only afterwards write them into the - // solution vector object: - Vector tmp (dof_handler.n_dofs()); - solution_transfer.interpolate (present_solution, tmp); - present_solution = tmp; - - // Here is some final thing, that is - // actually unnecessary in 1d, but - // necessary for higher space dimensions, - // so we show it anyway: the result of what - // the SolutionTransfer class provides - // is a vector that is interpolated from - // the old to the new mesh. Unfortunately, - // it does not necessarily have the right - // values at constrained (hanging) nodes, - // so we have to fix this up to make the - // solution conforming again. The simplest - // way to do this is this: - hanging_node_constraints.clear (); - DoFTools::make_hanging_node_constraints (dof_handler, - hanging_node_constraints); - hanging_node_constraints.close (); - hanging_node_constraints.distribute (present_solution); - // This is wasteful, since we create a - // ConstraintMatrix object that will be - // recreated again in the next call to - // setup_system_on_mesh immediately - // afterwards. A more efficient - // implementation would make sure that it - // is created only once. We don't care so - // much here, since in 1d there are no - // constraints, so all of these operations - // are really cheap, but we do not - // recommend this as general programming - // strategy. -} + // The function to compute error indicator + // and refine the mesh accordingly is a + // little more interesting. In particular, it + // shows some more of the techniques usually + // used in 1d applications. First, note that + // this again is a specialization that only + // works in 1d. However, to make later + // extension to higher space dimensions + // simpler, we define a constant integer + // dim at the beginning of the function; + // by using this constant as template + // argument in all places, we are actually + // able to write most of the code as if it + // were dimension independent, thus + // minimizing the amount of later changes. + template <> + void MinimizationProblem<1>::refine_grid () + { + const unsigned int dim = 1; + + Vector error_indicators (triangulation.n_active_cells()); + + // Then define the quadrature formula, and + // what values we will want to extract from + // the solution. Here, we use the two-point + // trapezoidal rule, i.e. we evaluate the + // residual only at the end points of the + // cells. Incidentally, this also makes + // evaluating the jump terms between cells + // simpler. Note that for the error + // indicators, we not only need values and + // gradients of the solution, but also its + // second derivatives, as well as the + // physical location of quadrature points. + QTrapez quadrature; + FEValues fe_values (fe, quadrature, + update_values | update_gradients | + update_hessians | + update_quadrature_points | update_JxW_values); + + // The error indicator formula presented in + // the introduction requires us to compute + // jumps of the solution and gradient + // across cell boundaries. Since the + // solution itself is continuous, we only + // need to evaluate the gradient on the + // neighbor cells. To avoid some of the + // work needed to reinitialize a + // FEValues object on a cell, we define + // another such object here that we will + // only use for the neighbor cells. The + // data we need from the side of the + // present cell is provided by above + // object. + FEValues neighbor_fe_values (fe, quadrature, + update_gradients); + + // Then, as before, we need objects holding + // values and derivatives of the solution + // at quadrature points. Here, we also need + // second derivatives, which is simple, + // however: + std::vector local_values (quadrature.size()); + std::vector > local_gradients (quadrature.size()); + std::vector > local_2nd_derivs (quadrature.size()); + + // With all this, we can start the loop + // over all cells. Since we need to write + // the result for each cell into + // consecutive elements of a vector, we + // also keep a running index cell_index + // that we increase with each cell treated. + DoFHandler::active_cell_iterator + cell = dof_handler.begin_active (), + endc = dof_handler.end (); + for (unsigned int cell_index = 0; cell!=endc; ++cell, ++cell_index) + { + // After initializing the FEValues + // object on each cell, use it to + // evaluate solution and first and + // second derivatives of it at the + // quadrature points: + fe_values.reinit (cell); + fe_values.get_function_values (present_solution, local_values); + fe_values.get_function_grads (present_solution, local_gradients); + fe_values.get_function_2nd_derivatives (present_solution, local_2nd_derivs); + + // Given the formula in the + // introduction, the computation of the + // cell residuals should actually be + // relatively obvious. The result, + // multiplied by the appropriate power + // of the cell's size is then written + // into the vector of error indicators. + // + // Note that in the following + // computations, we have already made + // use of the fact that we are in 1d, + // since we extract the gradient as a + // scalar value. + double cell_residual_norm = 0; + for (unsigned int q=0; qdiameter() * cell->diameter(); + + // The next step is to evaluate the + // jump terms. To make computations + // somewhat simpler (and to free up the + // local_* variables for use on + // neighboring elements), we define + // some convenience variables for the + // positions of the left and right cell + // boundary point, as well as the + // values and gradients at these + // points. + // + // To be cautious, we don't blindly + // trust that the trapezoidal rule has + // its evaluation points as the left + // and right end point of the cell (it + // could in principle have them in the + // reverse order, i.e. the zeroth point + // is at x=1, and the first one at + // x=0), and use an assertion to + // actually check for this. If this + // would not be the case, an exception + // of the (predefined) class + // ExcInternalError would be + // thrown. Of course, this does not + // happen in this program, but it shows + // a way of defensive coding: if you + // are not sure of an assumption, guard + // it by a test. This also guards us + // against possible future changes in + // the library: the quadrature classes + // do not promise any particular order + // of their quadrature points, so the + // QTrapez class could in principle + // change the order of its two + // evaluation points. In that case, + // your code would tell you that + // something changed, rather than + // computing a wrong result when you + // upgrade to a new version of the + // library. (The point made here is + // theoretical: we are not going to + // change the order of evaluation + // points; the intent is simply how to + // add some defensive touches to a + // program that make sure that it + // really does what it is hoped to do.) + // + // Given that we are now sure that + // x_left and x_right, + // extracted from the zeroth and first + // quadrature point, are indeed the + // left and right vertex of the cell, + // we can also be sure that the values + // we extract for u_left et al. are + // the ones we expect them to be, since + // the order of these values must of + // course match the order of the + // quadrature points. + const double x_left = fe_values.quadrature_point(0)[0]; + const double x_right = fe_values.quadrature_point(1)[0]; + + Assert (x_left == cell->vertex(0)[0], ExcInternalError()); + Assert (x_right == cell->vertex(1)[0], ExcInternalError()); + + const double u_left = local_values[0]; + const double u_right = local_values[1]; + + const double u_prime_left = local_gradients[0][0]; + const double u_prime_right = local_gradients[1][0]; + + // Next, we have to check whether this + // cell has a left neighbor: + if (cell->at_boundary(0) == false) + { + // If so, find its left + // neighbor. We do so by asking for + // the cell that is immediately + // adjacent to the left (the zeroth + // neighbor in 1d). However, this + // may be a cell that in itself has + // children, so to get to the + // active left neighbor, we have to + // recursively check whether that + // cell has children, and if so + // take its right child, since that + // is adjacent to the left of the + // present cell. Note that unless + // you are in 1d, there is no safe + // way to assume that the first + // child of the zeroth neighbor is + // indeed adjacent to the present + // cell. Rather, more than one of + // the children of a neighbor may + // be adjacent to the present + // cell. Also note that in two or + // higher space dimensions, a + // neighbor of an active cell may + // only be at most once refined, + // since we have the rule that + // there can only be one hanging + // node per face. This rule does + // not exist in 1d: neighboring + // cells may have totally + // independent refinement + // levels. Thus, we really need the + // while loop, not only an + // if clause. + DoFHandler::cell_iterator left_neighbor = cell->neighbor(0); + while (left_neighbor->has_children()) + left_neighbor = left_neighbor->child(1); + + // With the so-found neighbor, + // initialize the second + // FEValues object to it, + // extract the gradients of the + // solution there, and from this + // get the gradient at the + // interface (this is the first + // element of local_gradients, + // since the right end point of the + // neighbor cell has index 1) as a + // scalar value (this is the zeroth + // component of + // local_gradients[1]. + neighbor_fe_values.reinit (left_neighbor); + neighbor_fe_values.get_function_grads (present_solution, local_gradients); + + const double neighbor_u_prime_left = local_gradients[1][0]; + + // Then compute the jump, and add a + // suitable multiple to the error + // indicator for this cell: + const double left_jump = std::pow(x_left-std::pow(u_left,3), 2) * + (std::pow(neighbor_u_prime_left,5) - + std::pow(u_prime_left,5)); + error_indicators(cell_index) += left_jump * left_jump * + cell->diameter(); + } + + // Once we have done the left neighbor, + // we can play exactly the same game + // with the right neighbor: + if (cell->at_boundary(1) == false) + { + DoFHandler::cell_iterator right_neighbor = cell->neighbor(1); + while (right_neighbor->has_children()) + right_neighbor = right_neighbor->child(0); + + neighbor_fe_values.reinit (right_neighbor); + neighbor_fe_values.get_function_grads (present_solution, local_gradients); + + const double neighbor_u_prime_right = local_gradients[0][0]; + + const double right_jump = std::pow(x_right-std::pow(u_right,3), 2) * + (std::pow(neighbor_u_prime_right,5) - + std::pow(u_prime_right,5)); + error_indicators(cell_index) += right_jump * right_jump * + cell->diameter(); + } + } + + // Now we have all the refinement + // indicators computed, and want to refine + // the grid. In contrast to previous + // examples, however, we would like to + // transfer the solution vector from the + // old to the new grid. This is what the + // SolutionTransfer class is good for, + // but it requires some preliminary + // work. First, we need to tag the cells + // that we want to refine or coarsen, as + // usual: + GridRefinement::refine_and_coarsen_fixed_number (triangulation, + error_indicators, + 0.3, 0.03); + // Then, however, we need an additional + // step: if, for example, you flag a cell + // that is once more refined than its + // neighbor, and that neighbor is not + // flagged for refinement, we would end up + // with a jump of two refinement levels + // across a cell interface. In 1d, this + // would in general be allowed, but not in + // higher space dimensions, and some mesh + // smoothing algorithms in 1d may also + // disallow this. To avoid these + // situations, the library will silently + // also have to refine the neighbor cell + // once. It does so by calling the + // Triangulation::prepare_coarsening_and_refinement + // function before actually doing the + // refinement and coarsening. This function + // flags a set of additional cells for + // refinement or coarsening, to enforce + // rules like the one-hanging-node + // rule. The cells that are flagged for + // refinement and coarsening after calling + // this function are exactly the ones that + // will actually be refined or + // coarsened. Since the + // SolutionTransfer class needs this + // information in order to store the data + // from the old mesh and transfer to the + // new one. + triangulation.prepare_coarsening_and_refinement(); + + // With this out of the way, we initialize + // a SolutionTransfer object with the + // present DoFHandler and attach the + // solution vector to it: + SolutionTransfer solution_transfer(dof_handler); + solution_transfer.prepare_for_coarsening_and_refinement (present_solution); + + // Then we do the actual refinement, and + // distribute degrees of freedom on the new + // mesh: + triangulation.execute_coarsening_and_refinement (); + dof_handler.distribute_dofs (fe); + + // Finally, we retrieve the old solution + // interpolated to the new mesh. Since the + // SolutionTransfer function does not + // actually store the values of the old + // solution, but rather indices, we need to + // preserve the old solution vector until + // we have gotten the new interpolated + // values. Thus, we have the new values + // written into a temporary vector, and + // only afterwards write them into the + // solution vector object: + Vector tmp (dof_handler.n_dofs()); + solution_transfer.interpolate (present_solution, tmp); + present_solution = tmp; + + // Here is some final thing, that is + // actually unnecessary in 1d, but + // necessary for higher space dimensions, + // so we show it anyway: the result of what + // the SolutionTransfer class provides + // is a vector that is interpolated from + // the old to the new mesh. Unfortunately, + // it does not necessarily have the right + // values at constrained (hanging) nodes, + // so we have to fix this up to make the + // solution conforming again. The simplest + // way to do this is this: + hanging_node_constraints.clear (); + DoFTools::make_hanging_node_constraints (dof_handler, + hanging_node_constraints); + hanging_node_constraints.close (); + hanging_node_constraints.distribute (present_solution); + // This is wasteful, since we create a + // ConstraintMatrix object that will be + // recreated again in the next call to + // setup_system_on_mesh immediately + // afterwards. A more efficient + // implementation would make sure that it + // is created only once. We don't care so + // much here, since in 1d there are no + // constraints, so all of these operations + // are really cheap, but we do not + // recommend this as general programming + // strategy. + } - // Before going over to the framework - // functions, we still need to look at the - // implementation of the function that - // computes the energy of a nodal vector in - // the functional considered in this example - // program. Its idea is simple: take a nodal - // vector and the DoFHandler object it is - // living on, then loop over all cells and - // add up the local contributions to the - // energy: -template -double -MinimizationProblem::energy (const DoFHandler &dof_handler, - const Vector &function) -{ - // First define the quadrature formula and - // a FEValues object with which to - // compute the values of the input function - // at the quadrature points. Note again - // that the integrand is a polynomial of - // degree six, so a 4-point Gauss formula - // is appropriate: - QGauss quadrature_formula(4); - FEValues fe_values (dof_handler.get_fe(), quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); - - const unsigned int n_q_points = quadrature_formula.size(); - - // Then, just as when we integrated the - // linear system, we need two variables - // that will hold the values and gradients - // of the given function at the quadrature - // points: - std::vector local_solution_values (n_q_points); - std::vector > local_solution_grads (n_q_points); - - // With this, define an energy variable, - // and loop over all the cells: - double energy = 0.; - - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - // On each cell, initialize the - // FEValues object, and extract - // values and gradients of the given - // function: - fe_values.reinit (cell); - fe_values.get_function_values (function, - local_solution_values); - fe_values.get_function_grads (function, - local_solution_grads); - - // Then loop over all quadrature points - // on this cell, and add up the - // contribution of each to the global - // energy: - for (unsigned int q_point=0; q_pointDoFHandler object it is + // living on, then loop over all cells and + // add up the local contributions to the + // energy: + template + double + MinimizationProblem::energy (const DoFHandler &dof_handler, + const Vector &function) + { + // First define the quadrature formula and + // a FEValues object with which to + // compute the values of the input function + // at the quadrature points. Note again + // that the integrand is a polynomial of + // degree six, so a 4-point Gauss formula + // is appropriate: + QGauss quadrature_formula(4); + FEValues fe_values (dof_handler.get_fe(), quadrature_formula, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); + + const unsigned int n_q_points = quadrature_formula.size(); + + // Then, just as when we integrated the + // linear system, we need two variables + // that will hold the values and gradients + // of the given function at the quadrature + // points: + std::vector local_solution_values (n_q_points); + std::vector > local_solution_grads (n_q_points); + + // With this, define an energy variable, + // and loop over all the cells: + double energy = 0.; + + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + { + // On each cell, initialize the + // FEValues object, and extract + // values and gradients of the given + // function: + fe_values.reinit (cell); + fe_values.get_function_values (function, + local_solution_values); + fe_values.get_function_grads (function, + local_solution_grads); + + // Then loop over all quadrature points + // on this cell, and add up the + // contribution of each to the global + // energy: + for (unsigned int q_point=0; q_pointrun(). It generate a coarse mesh, - // refines it a couple of times, and - // initializes the starting values. It then - // goes into a loop in which we first set up - // the member variables for the new mesh, and - // then do a fixed number of five gradient - // steps. If after this the energy has not - // significantly decreased compares to the - // last time we checked, we assume that we - // have converged and exit, otherwise we - // refine the mesh and start over. Once we - // have determined that the computations have - // converged somewhere, we output the - // results. -template -void MinimizationProblem::run () -{ - GridGenerator::hyper_cube (triangulation, 0., 1.); - triangulation.refine_global (4); - dof_handler.distribute_dofs (fe); - initialize_solution (); + // So here is the driver function, + // run(). It generate a coarse mesh, + // refines it a couple of times, and + // initializes the starting values. It then + // goes into a loop in which we first set up + // the member variables for the new mesh, and + // then do a fixed number of five gradient + // steps. If after this the energy has not + // significantly decreased compares to the + // last time we checked, we assume that we + // have converged and exit, otherwise we + // refine the mesh and start over. Once we + // have determined that the computations have + // converged somewhere, we output the + // results. + template + void MinimizationProblem::run () + { + GridGenerator::hyper_cube (triangulation, 0., 1.); + triangulation.refine_global (4); + dof_handler.distribute_dofs (fe); + initialize_solution (); - double last_energy = energy (dof_handler, present_solution); + double last_energy = energy (dof_handler, present_solution); - while (true) - { - setup_system_on_mesh (); + while (true) + { + setup_system_on_mesh (); - for (unsigned int iteration=0; iteration<5; ++iteration) - do_step (); + for (unsigned int iteration=0; iteration<5; ++iteration) + do_step (); - const double this_energy = energy (dof_handler, present_solution); - std::cout << " Energy: " << this_energy << std::endl; + const double this_energy = energy (dof_handler, present_solution); + std::cout << " Energy: " << this_energy << std::endl; - if ((last_energy-this_energy) < 1e-5*last_energy) - break; + if ((last_energy-this_energy) < 1e-5*last_energy) + break; - last_energy = this_energy; + last_energy = this_energy; - refine_grid (); - } + refine_grid (); + } - output_results (); + output_results (); - std::cout << std::endl; + std::cout << std::endl; + } } @@ -1371,6 +1374,9 @@ int main () { try { + using namespace dealii; + using namespace Step15; + deallog.depth_console (0); const unsigned int n_realizations = 10; diff --git a/deal.II/examples/step-16/step-16.cc b/deal.II/examples/step-16/step-16.cc index 817ff6c6c5..f4510f780a 100644 --- a/deal.II/examples/step-16/step-16.cc +++ b/deal.II/examples/step-16/step-16.cc @@ -5,7 +5,7 @@ /* $Id$ */ /* */ -/* Copyright (C) 2003, 2004, 2006, 2007, 2008, 2009, 2010 by the deal.II authors */ +/* Copyright (C) 2003, 2004, 2006, 2007, 2008, 2009, 2010, 2011 by the deal.II authors */ /* */ /* This file is subject to QPL and may not be distributed */ /* without copyright and license information. Please refer */ @@ -89,912 +89,915 @@ // The last step is as in all // previous programs: -using namespace dealii; - - - // @sect3{The LaplaceProblem class template} - - // This main class is basically the same - // class as in step-6. As far as member - // functions is concerned, the only addition - // is the assemble_multigrid - // function that assembles the matrices that - // correspond to the discrete operators on - // intermediate levels: -template -class LaplaceProblem +namespace Step16 { - public: - LaplaceProblem (const unsigned int deg); - void run (); - - private: - void setup_system (); - void assemble_system (); - void assemble_multigrid (); - void solve (); - void refine_grid (); - void output_results (const unsigned int cycle) const; - - Triangulation triangulation; - FE_Q fe; - MGDoFHandler mg_dof_handler; - - SparsityPattern sparsity_pattern; - SparseMatrix system_matrix; - - // We need an additional object for the - // hanging nodes constraints. They are - // handed to the transfer object in the - // multigrid. Since we call a compress - // inside the multigrid these constraints - // are not allowed to be inhomogeneous so - // we store them in different ConstraintMatrix - // objects. - ConstraintMatrix hanging_node_constraints; - ConstraintMatrix constraints; - - Vector solution; - Vector system_rhs; - - const unsigned int degree; - - // The following four objects are the - // only additional member variables, - // compared to step-6. They first three - // represent the - // operators that act on individual - // levels of the multilevel hierarchy, - // rather than on the finest mesh as do - // the objects above while the last object - // stores information about the boundary - // indices on each level and information - // about indices lying on a refinement - // edge between two different refinement - // levels. - // - // To facilitate having objects on each - // level of a multilevel hierarchy, - // deal.II has the MGLevelObject class - // template that provides storage for - // objects on each level. What we need - // here are matrices on each level, which - // implies that we also need sparsity - // patterns on each level. As outlined in - // the @ref mg_paper, the operators - // (matrices) that we need are actually - // twofold: one on the interior of each - // level, and one at the interface - // between each level and that part of - // the domain where the mesh is - // coarser. In fact, we will need the - // latter in two versions: for the - // direction from coarse to fine mesh and - // from fine to coarse. Fortunately, - // however, we here have a self-adjoint - // problem for which one of these is the - // transpose of the other, and so we only - // have to build one; we choose the one - // from coarse to fine. - MGLevelObject mg_sparsity_patterns; - MGLevelObject > mg_matrices; - MGLevelObject > mg_interface_matrices; - MGConstrainedDoFs mg_constrained_dofs; -}; - - - - // @sect3{Nonconstant coefficients} - - // The implementation of nonconstant - // coefficients is copied verbatim - // from step-5 and step-6: - -template -class Coefficient : public Function -{ - public: - Coefficient () : Function() {} - - virtual double value (const Point &p, - const unsigned int component = 0) const; - - virtual void value_list (const std::vector > &points, - std::vector &values, - const unsigned int component = 0) const; -}; - - - -template -double Coefficient::value (const Point &p, - const unsigned int) const -{ - if (p.square() < 0.5*0.5) - return 20; - else - return 1; -} + using namespace dealii; + + + // @sect3{The LaplaceProblem class template} + + // This main class is basically the same + // class as in step-6. As far as member + // functions is concerned, the only addition + // is the assemble_multigrid + // function that assembles the matrices that + // correspond to the discrete operators on + // intermediate levels: + template + class LaplaceProblem + { + public: + LaplaceProblem (const unsigned int deg); + void run (); + + private: + void setup_system (); + void assemble_system (); + void assemble_multigrid (); + void solve (); + void refine_grid (); + void output_results (const unsigned int cycle) const; + + Triangulation triangulation; + FE_Q fe; + MGDoFHandler mg_dof_handler; + + SparsityPattern sparsity_pattern; + SparseMatrix system_matrix; + + // We need an additional object for the + // hanging nodes constraints. They are + // handed to the transfer object in the + // multigrid. Since we call a compress + // inside the multigrid these constraints + // are not allowed to be inhomogeneous so + // we store them in different ConstraintMatrix + // objects. + ConstraintMatrix hanging_node_constraints; + ConstraintMatrix constraints; + + Vector solution; + Vector system_rhs; + + const unsigned int degree; + + // The following four objects are the + // only additional member variables, + // compared to step-6. They first three + // represent the + // operators that act on individual + // levels of the multilevel hierarchy, + // rather than on the finest mesh as do + // the objects above while the last object + // stores information about the boundary + // indices on each level and information + // about indices lying on a refinement + // edge between two different refinement + // levels. + // + // To facilitate having objects on each + // level of a multilevel hierarchy, + // deal.II has the MGLevelObject class + // template that provides storage for + // objects on each level. What we need + // here are matrices on each level, which + // implies that we also need sparsity + // patterns on each level. As outlined in + // the @ref mg_paper, the operators + // (matrices) that we need are actually + // twofold: one on the interior of each + // level, and one at the interface + // between each level and that part of + // the domain where the mesh is + // coarser. In fact, we will need the + // latter in two versions: for the + // direction from coarse to fine mesh and + // from fine to coarse. Fortunately, + // however, we here have a self-adjoint + // problem for which one of these is the + // transpose of the other, and so we only + // have to build one; we choose the one + // from coarse to fine. + MGLevelObject mg_sparsity_patterns; + MGLevelObject > mg_matrices; + MGLevelObject > mg_interface_matrices; + MGConstrainedDoFs mg_constrained_dofs; + }; + + + + // @sect3{Nonconstant coefficients} + + // The implementation of nonconstant + // coefficients is copied verbatim + // from step-5 and step-6: + + template + class Coefficient : public Function + { + public: + Coefficient () : Function() {} + + virtual double value (const Point &p, + const unsigned int component = 0) const; + + virtual void value_list (const std::vector > &points, + std::vector &values, + const unsigned int component = 0) const; + }; + + + + template + double Coefficient::value (const Point &p, + const unsigned int) const + { + if (p.square() < 0.5*0.5) + return 20; + else + return 1; + } -template -void Coefficient::value_list (const std::vector > &points, - std::vector &values, - const unsigned int component) const -{ - const unsigned int n_points = points.size(); + template + void Coefficient::value_list (const std::vector > &points, + std::vector &values, + const unsigned int component) const + { + const unsigned int n_points = points.size(); - Assert (values.size() == n_points, - ExcDimensionMismatch (values.size(), n_points)); + Assert (values.size() == n_points, + ExcDimensionMismatch (values.size(), n_points)); - Assert (component == 0, - ExcIndexRange (component, 0, 1)); + Assert (component == 0, + ExcIndexRange (component, 0, 1)); - for (unsigned int i=0; i::value (points[i]); -} + for (unsigned int i=0; i::value (points[i]); + } - // @sect3{The LaplaceProblem class implementation} + // @sect3{The LaplaceProblem class implementation} - // @sect4{LaplaceProblem::LaplaceProblem} + // @sect4{LaplaceProblem::LaplaceProblem} - // The constructor is left mostly - // unchanged. We take the polynomial degree - // of the finite elements to be used as a - // constructor argument and store it in a - // member variable. - // - // By convention, all adaptively refined - // triangulations in deal.II never change by - // more than one level across a face between - // cells. For our multigrid algorithms, - // however, we need a slightly stricter - // guarantee, namely that the mesh also does - // not change by more than refinement level - // across vertices that might connect two - // cells. In other words, we must prevent the - // following situation: - // - // @image html limit_level_difference_at_vertices.png "" - // - // This is achieved by passing the - // Triangulation::limit_level_difference_at_vertices - // flag to the constructor of the - // triangulation class. -template -LaplaceProblem::LaplaceProblem (const unsigned int degree) - : - triangulation (Triangulation:: - limit_level_difference_at_vertices), - fe (degree), - mg_dof_handler (triangulation), - degree(degree) -{} - - - - // @sect4{LaplaceProblem::setup_system} - - // The following function extends what the - // corresponding one in step-6 did. The top - // part, apart from the additional output, - // does the same: -template -void LaplaceProblem::setup_system () -{ - mg_dof_handler.distribute_dofs (fe); - - // Here we output not only the - // degrees of freedom on the finest - // level, but also in the - // multilevel structure - deallog << "Number of degrees of freedom: " - << mg_dof_handler.n_dofs(); - - for (unsigned int l=0;l + LaplaceProblem::LaplaceProblem (const unsigned int degree) + : + triangulation (Triangulation:: + limit_level_difference_at_vertices), + fe (degree), + mg_dof_handler (triangulation), + degree(degree) + {} + + + + // @sect4{LaplaceProblem::setup_system} + + // The following function extends what the + // corresponding one in step-6 did. The top + // part, apart from the additional output, + // does the same: + template + void LaplaceProblem::setup_system () + { + mg_dof_handler.distribute_dofs (fe); + + // Here we output not only the + // degrees of freedom on the finest + // level, but also in the + // multilevel structure + deallog << "Number of degrees of freedom: " + << mg_dof_handler.n_dofs(); + + for (unsigned int l=0;l::type dirichlet_boundary; + ZeroFunction homogeneous_dirichlet_bc (1); + dirichlet_boundary[0] = &homogeneous_dirichlet_bc; + VectorTools::interpolate_boundary_values (static_cast&>(mg_dof_handler), + dirichlet_boundary, + constraints); + constraints.close (); + hanging_node_constraints.close (); + constraints.condense (sparsity_pattern); + sparsity_pattern.compress(); + system_matrix.reinit (sparsity_pattern); + + // The multigrid constraints have to be + // initialized. They need to know about + // the boundary values as well, so we + // pass the dirichlet_boundary + // here as well. + mg_constrained_dofs.clear(); + mg_constrained_dofs.initialize(mg_dof_handler, dirichlet_boundary); + + + // Now for the things that concern the + // multigrid data structures. First, we + // resize the multi-level objects to hold + // matrices and sparsity patterns for every + // level. The coarse level is zero (this is + // mandatory right now but may change in a + // future revision). Note that these + // functions take a complete, inclusive + // range here (not a starting index and + // size), so the finest level is + // n_levels-1. We first have + // to resize the container holding the + // SparseMatrix classes, since they have to + // release their SparsityPattern before the + // can be destroyed upon resizing. + const unsigned int n_levels = triangulation.n_levels(); + + mg_interface_matrices.resize(0, n_levels-1); + mg_interface_matrices.clear (); + mg_matrices.resize(0, n_levels-1); + mg_matrices.clear (); + mg_sparsity_patterns.resize(0, n_levels-1); + + // Now, we have to provide a matrix on each + // level. To this end, we first use the + // MGTools::make_sparsity_pattern function + // to first generate a preliminary + // compressed sparsity pattern on each + // level (see the @ref Sparsity module for + // more information on this topic) and then + // copy it over to the one we really + // want. The next step is to initialize + // both kinds of level matrices with these + // sparsity patterns. + // + // It may be worth pointing out that the + // interface matrices only have entries for + // degrees of freedom that sit at or next + // to the interface between coarser and + // finer levels of the mesh. They are + // therefore even sparser than the matrices + // on the individual levels of our + // multigrid hierarchy. If we were more + // concerned about memory usage (and + // possibly the speed with which we can + // multiply with these matrices), we should + // use separate and different sparsity + // patterns for these two kinds of + // matrices. + for (unsigned int level=0; level::type dirichlet_boundary; - ZeroFunction homogeneous_dirichlet_bc (1); - dirichlet_boundary[0] = &homogeneous_dirichlet_bc; - VectorTools::interpolate_boundary_values (static_cast&>(mg_dof_handler), - dirichlet_boundary, - constraints); - constraints.close (); - hanging_node_constraints.close (); - constraints.condense (sparsity_pattern); - sparsity_pattern.compress(); - system_matrix.reinit (sparsity_pattern); - - // The multigrid constraints have to be - // initialized. They need to know about - // the boundary values as well, so we - // pass the dirichlet_boundary - // here as well. - mg_constrained_dofs.clear(); - mg_constrained_dofs.initialize(mg_dof_handler, dirichlet_boundary); - - - // Now for the things that concern the - // multigrid data structures. First, we - // resize the multi-level objects to hold - // matrices and sparsity patterns for every - // level. The coarse level is zero (this is - // mandatory right now but may change in a - // future revision). Note that these - // functions take a complete, inclusive - // range here (not a starting index and - // size), so the finest level is - // n_levels-1. We first have - // to resize the container holding the - // SparseMatrix classes, since they have to - // release their SparsityPattern before the - // can be destroyed upon resizing. - const unsigned int n_levels = triangulation.n_levels(); - - mg_interface_matrices.resize(0, n_levels-1); - mg_interface_matrices.clear (); - mg_matrices.resize(0, n_levels-1); - mg_matrices.clear (); - mg_sparsity_patterns.resize(0, n_levels-1); - - // Now, we have to provide a matrix on each - // level. To this end, we first use the - // MGTools::make_sparsity_pattern function - // to first generate a preliminary - // compressed sparsity pattern on each - // level (see the @ref Sparsity module for - // more information on this topic) and then - // copy it over to the one we really - // want. The next step is to initialize - // both kinds of level matrices with these - // sparsity patterns. + // matrix. This is not only simpler but also + // more efficient for large problems. // - // It may be worth pointing out that the - // interface matrices only have entries for - // degrees of freedom that sit at or next - // to the interface between coarser and - // finer levels of the mesh. They are - // therefore even sparser than the matrices - // on the individual levels of our - // multigrid hierarchy. If we were more - // concerned about memory usage (and - // possibly the speed with which we can - // multiply with these matrices), we should - // use separate and different sparsity - // patterns for these two kinds of - // matrices. - for (unsigned int level=0; level -void LaplaceProblem::assemble_system () -{ - const QGauss quadrature_formula(degree+1); - - FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); - - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); - - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - Vector cell_rhs (dofs_per_cell); - - std::vector local_dof_indices (dofs_per_cell); - - const Coefficient coefficient; - std::vector coefficient_values (n_q_points); - - typename MGDoFHandler::active_cell_iterator - cell = mg_dof_handler.begin_active(), - endc = mg_dof_handler.end(); - for (; cell!=endc; ++cell) - { - cell_matrix = 0; - cell_rhs = 0; - - fe_values.reinit (cell); - - coefficient.value_list (fe_values.get_quadrature_points(), - coefficient_values); - - for (unsigned int q_point=0; q_point + void LaplaceProblem::assemble_system () + { + const QGauss quadrature_formula(degree+1); + + FEValues fe_values (fe, quadrature_formula, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + Vector cell_rhs (dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + const Coefficient coefficient; + std::vector coefficient_values (n_q_points); + + typename MGDoFHandler::active_cell_iterator + cell = mg_dof_handler.begin_active(), + endc = mg_dof_handler.end(); + for (; cell!=endc; ++cell) + { + cell_matrix = 0; + cell_rhs = 0; + + fe_values.reinit (cell); + + coefficient.value_list (fe_values.get_quadrature_points(), + coefficient_values); + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + constraints.distribute_local_to_global (cell_matrix, cell_rhs, + local_dof_indices, + system_matrix, system_rhs); + } + } + + + // @sect4{LaplaceProblem::assemble_multigrid} + + // The next function is the one that builds + // the linear operators (matrices) that + // define the multigrid method on each level + // of the mesh. The integration core is the + // same as above, but the loop below will go + // over all existing cells instead of just + // the active ones, and the results must be + // entered into the correct matrix. Note also + // that since we only do multi-level + // preconditioning, no right-hand side needs + // to be assembled here. + // + // Before we go there, however, we have to + // take care of a significant amount of book + // keeping: + template + void LaplaceProblem::assemble_multigrid () + { + QGauss quadrature_formula(1+degree); + + FEValues fe_values (fe, quadrature_formula, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + const Coefficient coefficient; + std::vector coefficient_values (n_q_points); + + // Next a few things that are specific to + // building the multigrid data structures + // (since we only need them in the current + // function, rather than also elsewhere, we + // build them here instead of the + // setup_system + // function). Some of the following may be + // a bit obscure if you're not familiar + // with the algorithm actually implemented + // in deal.II to support multilevel + // algorithms on adaptive meshes; if some + // of the things below seem strange, take a + // look at the @ref mg_paper. + // + // Our first job is to identify those + // degrees of freedom on each level that + // are located on interfaces between + // adaptively refined levels, and those + // that lie on the interface but also on + // the exterior boundary of the domain. As + // in many other parts of the library, we + // do this by using boolean masks, + // i.e. vectors of booleans each element of + // which indicates whether the + // corresponding degree of freedom index is + // an interface DoF or not. The MGConstraints + // already computed the information for us + // when we called initialize in setup_system(). + std::vector > interface_dofs + = mg_constrained_dofs.get_refinement_edge_indices (); + std::vector > boundary_interface_dofs + = mg_constrained_dofs.get_refinement_edge_boundary_indices (); + + // The indices just identified will later + // be used to decide where the assembled value + // has to be added into on each level. + // On the other hand, + // we also have to impose zero boundary + // conditions on the external boundary of + // each level. But this the MGConstraints + // knows it. So we simply ask for them by calling + // get_boundary_indices (). + // The third step is to construct + // constraints on all those degrees of + // freedom: their value should be zero + // after each application of the level + // operators. To this end, we construct + // ConstraintMatrix objects for each level, + // and add to each of these constraints for + // each degree of freedom. Due to the way + // the ConstraintMatrix stores its data, + // the function to add a constraint on a + // single degree of freedom and force it to + // be zero is called + // Constraintmatrix::add_line(); doing so + // for several degrees of freedom at once + // can be done using + // Constraintmatrix::add_lines(): + std::vector boundary_constraints (triangulation.n_levels()); + std::vector boundary_interface_constraints (triangulation.n_levels()); + for (unsigned int level=0; levelassemble_system, with two + // exceptions: (i) we don't need a right + // hand side, and more significantly (ii) we + // don't just loop over all active cells, + // but in fact all cells, active or + // not. Consequently, the correct iterator + // to use is MGDoFHandler::cell_iterator + // rather than + // MGDoFHandler::active_cell_iterator. Let's + // go about it: + typename MGDoFHandler::cell_iterator cell = mg_dof_handler.begin(), + endc = mg_dof_handler.end(); + + for (; cell!=endc; ++cell) + { + cell_matrix = 0; + fe_values.reinit (cell); + + coefficient.value_list (fe_values.get_quadrature_points(), + coefficient_values); + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - constraints.distribute_local_to_global (cell_matrix, cell_rhs, - local_dof_indices, - system_matrix, system_rhs); - } -} - - - // @sect4{LaplaceProblem::assemble_multigrid} - - // The next function is the one that builds - // the linear operators (matrices) that - // define the multigrid method on each level - // of the mesh. The integration core is the - // same as above, but the loop below will go - // over all existing cells instead of just - // the active ones, and the results must be - // entered into the correct matrix. Note also - // that since we only do multi-level - // preconditioning, no right-hand side needs - // to be assembled here. - // - // Before we go there, however, we have to - // take care of a significant amount of book - // keeping: -template -void LaplaceProblem::assemble_multigrid () -{ - QGauss quadrature_formula(1+degree); - - FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); - - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); - - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - - std::vector local_dof_indices (dofs_per_cell); - - const Coefficient coefficient; - std::vector coefficient_values (n_q_points); - - // Next a few things that are specific to - // building the multigrid data structures - // (since we only need them in the current - // function, rather than also elsewhere, we - // build them here instead of the - // setup_system - // function). Some of the following may be - // a bit obscure if you're not familiar - // with the algorithm actually implemented - // in deal.II to support multilevel - // algorithms on adaptive meshes; if some - // of the things below seem strange, take a - // look at the @ref mg_paper. - // - // Our first job is to identify those - // degrees of freedom on each level that - // are located on interfaces between - // adaptively refined levels, and those - // that lie on the interface but also on - // the exterior boundary of the domain. As - // in many other parts of the library, we - // do this by using boolean masks, - // i.e. vectors of booleans each element of - // which indicates whether the - // corresponding degree of freedom index is - // an interface DoF or not. The MGConstraints - // already computed the information for us - // when we called initialize in setup_system(). - std::vector > interface_dofs - = mg_constrained_dofs.get_refinement_edge_indices (); - std::vector > boundary_interface_dofs - = mg_constrained_dofs.get_refinement_edge_boundary_indices (); - - // The indices just identified will later - // be used to decide where the assembled value - // has to be added into on each level. - // On the other hand, - // we also have to impose zero boundary - // conditions on the external boundary of - // each level. But this the MGConstraints - // knows it. So we simply ask for them by calling - // get_boundary_indices (). - // The third step is to construct - // constraints on all those degrees of - // freedom: their value should be zero - // after each application of the level - // operators. To this end, we construct - // ConstraintMatrix objects for each level, - // and add to each of these constraints for - // each degree of freedom. Due to the way - // the ConstraintMatrix stores its data, - // the function to add a constraint on a - // single degree of freedom and force it to - // be zero is called - // Constraintmatrix::add_line(); doing so - // for several degrees of freedom at once - // can be done using - // Constraintmatrix::add_lines(): - std::vector boundary_constraints (triangulation.n_levels()); - std::vector boundary_interface_constraints (triangulation.n_levels()); - for (unsigned int level=0; levelassemble_system, with two - // exceptions: (i) we don't need a right - // hand side, and more significantly (ii) we - // don't just loop over all active cells, - // but in fact all cells, active or - // not. Consequently, the correct iterator - // to use is MGDoFHandler::cell_iterator - // rather than - // MGDoFHandler::active_cell_iterator. Let's - // go about it: - typename MGDoFHandler::cell_iterator cell = mg_dof_handler.begin(), - endc = mg_dof_handler.end(); - - for (; cell!=endc; ++cell) - { - cell_matrix = 0; - fe_values.reinit (cell); - - coefficient.value_list (fe_values.get_quadrature_points(), - coefficient_values); - - for (unsigned int q_point=0; q_pointget_mg_dof_indices (local_dof_indices); + + // Next, we need to copy local + // contributions into the level + // objects. We can do this in the same + // way as in the global assembly, using + // a constraint object that takes care + // of constrained degrees (which here + // are only boundary nodes, as the + // individual levels have no hanging + // node constraints). Note that the + // boundary_constraints + // object makes sure that the level + // matrices contains no contributions + // from degrees of freedom at the + // interface between cells of different + // refinement level. + boundary_constraints[cell->level()] + .distribute_local_to_global (cell_matrix, + local_dof_indices, + mg_matrices[cell->level()]); + + // The next step is again slightly more + // obscure (but explained in the @ref + // mg_paper): We need the remainder of + // the operator that we just copied + // into the mg_matrices + // object, namely the part on the + // interface between cells at the + // current level and cells one level + // coarser. This matrix exists in two + // directions: for interior DoFs (index + // $i$) of the current level to those + // sitting on the interface (index + // $j$), and the other way around. Of + // course, since we have a symmetric + // operator, one of these matrices is + // the transpose of the other. + // + // The way we assemble these matrices + // is as follows: since the are formed + // from parts of the local + // contributions, we first delete all + // those parts of the local + // contributions that we are not + // interested in, namely all those + // elements of the local matrix for + // which not $i$ is an interface DoF + // and $j$ is not. The result is one of + // the two matrices that we are + // interested in, and we then copy it + // into the + // mg_interface_matrices + // object. The + // boundary_interface_constraints + // object at the same time makes sure + // that we delete contributions from + // all degrees of freedom that are not + // only on the interface but also on + // the external boundary of the domain. + // + // The last part to remember is how to + // get the other matrix. Since it is + // only the transpose, we will later + // (in the solve() + // function) be able to just pass the + // transpose matrix where necessary. for (unsigned int i=0; iget_mg_dof_indices (local_dof_indices); - - // Next, we need to copy local - // contributions into the level - // objects. We can do this in the same - // way as in the global assembly, using - // a constraint object that takes care - // of constrained degrees (which here - // are only boundary nodes, as the - // individual levels have no hanging - // node constraints). Note that the - // boundary_constraints - // object makes sure that the level - // matrices contains no contributions - // from degrees of freedom at the - // interface between cells of different - // refinement level. - boundary_constraints[cell->level()] - .distribute_local_to_global (cell_matrix, - local_dof_indices, - mg_matrices[cell->level()]); - - // The next step is again slightly more - // obscure (but explained in the @ref - // mg_paper): We need the remainder of - // the operator that we just copied - // into the mg_matrices - // object, namely the part on the - // interface between cells at the - // current level and cells one level - // coarser. This matrix exists in two - // directions: for interior DoFs (index - // $i$) of the current level to those - // sitting on the interface (index - // $j$), and the other way around. Of - // course, since we have a symmetric - // operator, one of these matrices is - // the transpose of the other. - // - // The way we assemble these matrices - // is as follows: since the are formed - // from parts of the local - // contributions, we first delete all - // those parts of the local - // contributions that we are not - // interested in, namely all those - // elements of the local matrix for - // which not $i$ is an interface DoF - // and $j$ is not. The result is one of - // the two matrices that we are - // interested in, and we then copy it - // into the - // mg_interface_matrices - // object. The - // boundary_interface_constraints - // object at the same time makes sure - // that we delete contributions from - // all degrees of freedom that are not - // only on the interface but also on - // the external boundary of the domain. - // - // The last part to remember is how to - // get the other matrix. Since it is - // only the transpose, we will later - // (in the solve() - // function) be able to just pass the - // transpose matrix where necessary. - for (unsigned int i=0; ilevel()][local_dof_indices[i]]==true && - interface_dofs[cell->level()][local_dof_indices[j]]==false)) - cell_matrix(i,j) = 0; - - boundary_interface_constraints[cell->level()] - .distribute_local_to_global (cell_matrix, - local_dof_indices, - mg_interface_matrices[cell->level()]); - } -} + if( !(interface_dofs[cell->level()][local_dof_indices[i]]==true && + interface_dofs[cell->level()][local_dof_indices[j]]==false)) + cell_matrix(i,j) = 0; + boundary_interface_constraints[cell->level()] + .distribute_local_to_global (cell_matrix, + local_dof_indices, + mg_interface_matrices[cell->level()]); + } + } - // @sect4{LaplaceProblem::solve} - // This is the other function that is - // significantly different in support of the - // multigrid solver (or, in fact, the - // preconditioner for which we use the - // multigrid method). - // - // Let us start out by setting up two of the - // components of multilevel methods: transfer - // operators between levels, and a solver on - // the coarsest level. In finite element - // methods, the transfer operators are - // derived from the finite element function - // spaces involved and can often be computed - // in a generic way independent of the - // problem under consideration. In that case, - // we can use the MGTransferPrebuilt class - // that, given the constraints on the global - // level and an MGDoFHandler object computes - // the matrices corresponding to these - // transfer operators. - // - // The second part of the following lines - // deals with the coarse grid solver. Since - // our coarse grid is very coarse indeed, we - // decide for a direct solver (a Householder - // decomposition of the coarsest level - // matrix), even if its implementation is not - // particularly sophisticated. If our coarse - // mesh had many more cells than the five we - // have here, something better suited would - // obviously be necessary here. -template -void LaplaceProblem::solve () -{ + // @sect4{LaplaceProblem::solve} - // Create the object that deals with the transfer - // between different refinement levels. We need to - // pass it the hanging node constraints. - MGTransferPrebuilt > mg_transfer(hanging_node_constraints, mg_constrained_dofs); - // Now the prolongation matrix has to be built. - // This matrix needs to take the boundary values on - // each level into account and needs to know about - // the indices at the refinement egdes. The - // MGConstraints knows about that so - // pass it as an argument. - mg_transfer.build_matrices(mg_dof_handler); - - FullMatrix coarse_matrix; - coarse_matrix.copy_from (mg_matrices[0]); - MGCoarseGridHouseholder<> coarse_grid_solver; - coarse_grid_solver.initialize (coarse_matrix); - - // The next component of a multilevel - // solver or preconditioner is that we need - // a smoother on each level. A common - // choice for this is to use the - // application of a relaxation method (such - // as the SOR, Jacobi or Richardson method) - // or a small number of iterations of a - // solver method (such as CG or GMRES). The - // MGSmootherRelaxation and - // MGSmootherPrecondition classes provide - // support for these two kinds of - // smoothers. Here, we opt for the - // application of a single SOR - // iteration. To this end, we define an - // appropriate typedef and - // then setup a smoother object. - // - // Since this smoother needs temporary - // vectors to store intermediate results, - // we need to provide a VectorMemory - // object. Since these vectors will be - // reused over and over, the - // GrowingVectorMemory is more time - // efficient than the PrimitiveVectorMemory - // class in the current case. + // This is the other function that is + // significantly different in support of the + // multigrid solver (or, in fact, the + // preconditioner for which we use the + // multigrid method). // - // The last step is to initialize the - // smoother object with our level matrices - // and to set some smoothing parameters. - // The initialize() function - // can optionally take additional arguments - // that will be passed to the smoother - // object on each level. In the current - // case for the SOR smoother, this could, - // for example, include a relaxation - // parameter. However, we here leave these - // at their default values. The call to - // set_steps() indicates that - // we will use two pre- and two - // post-smoothing steps on each level; to - // use a variable number of smoother steps - // on different levels, more options can be - // set in the constructor call to the - // mg_smoother object. + // Let us start out by setting up two of the + // components of multilevel methods: transfer + // operators between levels, and a solver on + // the coarsest level. In finite element + // methods, the transfer operators are + // derived from the finite element function + // spaces involved and can often be computed + // in a generic way independent of the + // problem under consideration. In that case, + // we can use the MGTransferPrebuilt class + // that, given the constraints on the global + // level and an MGDoFHandler object computes + // the matrices corresponding to these + // transfer operators. // - // The last step results from the fact that - // we use the SOR method as a smoother - - // which is not symmetric - but we use the - // conjugate gradient iteration (which - // requires a symmetric preconditioner) - // below, we need to let the multilevel - // preconditioner make sure that we get a - // symmetric operator even for nonsymmetric - // smoothers: - typedef PreconditionSOR > Smoother; - GrowingVectorMemory<> vector_memory; - MGSmootherRelaxation, Smoother, Vector > - mg_smoother(vector_memory); - mg_smoother.initialize(mg_matrices); - mg_smoother.set_steps(2); - mg_smoother.set_symmetric(true); - - // The next preparatory step is that we - // must wrap our level and interface - // matrices in an object having the - // required multiplication functions. We - // will create two objects for the - // interface objects going from coarse to - // fine and the other way around; the - // multigrid algorithm will later use the - // transpose operator for the latter - // operation, allowing us to initialize - // both up and down versions of the - // operator with the matrices we already - // built: - MGMatrix<> mg_matrix(&mg_matrices); - MGMatrix<> mg_interface_up(&mg_interface_matrices); - MGMatrix<> mg_interface_down(&mg_interface_matrices); - - // Now, we are ready to set up the - // V-cycle operator and the - // multilevel preconditioner. - Multigrid > mg(mg_dof_handler, - mg_matrix, - coarse_grid_solver, - mg_transfer, - mg_smoother, - mg_smoother); - mg.set_edge_matrices(mg_interface_down, mg_interface_up); - - PreconditionMG, MGTransferPrebuilt > > - preconditioner(mg_dof_handler, mg, mg_transfer); - - // With all this together, we can finally - // get about solving the linear system in - // the usual way: - SolverControl solver_control (1000, 1e-12); - SolverCG<> cg (solver_control); - - solution = 0; - - cg.solve (system_matrix, solution, system_rhs, - preconditioner); - constraints.distribute (solution); - - std::cout << " " << solver_control.last_step() - << " CG iterations needed to obtain convergence." - << std::endl; -} - - - - // @sect4{Postprocessing} - - // The following two functions postprocess a - // solution once it is computed. In - // particular, the first one refines the mesh - // at the beginning of each cycle while the - // second one outputs results at the end of - // each such cycle. The functions are almost - // unchanged from those in step-6, with the - // exception of two minor differences: The - // KellyErrorEstimator::estimate function - // wants an argument of type DoFHandler, not - // MGDoFHandler, and so we have to cast from - // derived to base class; and we generate - // output in VTK format, to use the more - // modern visualization programs available - // today compared to those that were - // available when step-6 was written. -template -void LaplaceProblem::refine_grid () -{ - Vector estimated_error_per_cell (triangulation.n_active_cells()); - - KellyErrorEstimator::estimate (static_cast&>(mg_dof_handler), - QGauss(3), - typename FunctionMap::type(), - solution, - estimated_error_per_cell); - GridRefinement::refine_and_coarsen_fixed_number (triangulation, - estimated_error_per_cell, - 0.3, 0.03); - triangulation.execute_coarsening_and_refinement (); -} - - - -template -void LaplaceProblem::output_results (const unsigned int cycle) const -{ - DataOut data_out; - - data_out.attach_dof_handler (mg_dof_handler); - data_out.add_data_vector (solution, "solution"); - data_out.build_patches (); - - std::ostringstream filename; - filename << "solution-" - << cycle - << ".vtk"; - - std::ofstream output (filename.str().c_str()); - data_out.write_vtk (output); -} - - - // @sect4{LaplaceProblem::run} - - // Like several of the functions above, this - // is almost exactly a copy of of the - // corresponding function in step-6. The only - // difference is the call to - // assemble_multigrid that takes - // care of forming the matrices on every - // level that we need in the multigrid - // method. -template -void LaplaceProblem::run () -{ - for (unsigned int cycle=0; cycle<8; ++cycle) - { - std::cout << "Cycle " << cycle << ':' << std::endl; - - if (cycle == 0) - { - GridGenerator::hyper_ball (triangulation); + // The second part of the following lines + // deals with the coarse grid solver. Since + // our coarse grid is very coarse indeed, we + // decide for a direct solver (a Householder + // decomposition of the coarsest level + // matrix), even if its implementation is not + // particularly sophisticated. If our coarse + // mesh had many more cells than the five we + // have here, something better suited would + // obviously be necessary here. + template + void LaplaceProblem::solve () + { + + // Create the object that deals with the transfer + // between different refinement levels. We need to + // pass it the hanging node constraints. + MGTransferPrebuilt > mg_transfer(hanging_node_constraints, mg_constrained_dofs); + // Now the prolongation matrix has to be built. + // This matrix needs to take the boundary values on + // each level into account and needs to know about + // the indices at the refinement egdes. The + // MGConstraints knows about that so + // pass it as an argument. + mg_transfer.build_matrices(mg_dof_handler); + + FullMatrix coarse_matrix; + coarse_matrix.copy_from (mg_matrices[0]); + MGCoarseGridHouseholder<> coarse_grid_solver; + coarse_grid_solver.initialize (coarse_matrix); + + // The next component of a multilevel + // solver or preconditioner is that we need + // a smoother on each level. A common + // choice for this is to use the + // application of a relaxation method (such + // as the SOR, Jacobi or Richardson method) + // or a small number of iterations of a + // solver method (such as CG or GMRES). The + // MGSmootherRelaxation and + // MGSmootherPrecondition classes provide + // support for these two kinds of + // smoothers. Here, we opt for the + // application of a single SOR + // iteration. To this end, we define an + // appropriate typedef and + // then setup a smoother object. + // + // Since this smoother needs temporary + // vectors to store intermediate results, + // we need to provide a VectorMemory + // object. Since these vectors will be + // reused over and over, the + // GrowingVectorMemory is more time + // efficient than the PrimitiveVectorMemory + // class in the current case. + // + // The last step is to initialize the + // smoother object with our level matrices + // and to set some smoothing parameters. + // The initialize() function + // can optionally take additional arguments + // that will be passed to the smoother + // object on each level. In the current + // case for the SOR smoother, this could, + // for example, include a relaxation + // parameter. However, we here leave these + // at their default values. The call to + // set_steps() indicates that + // we will use two pre- and two + // post-smoothing steps on each level; to + // use a variable number of smoother steps + // on different levels, more options can be + // set in the constructor call to the + // mg_smoother object. + // + // The last step results from the fact that + // we use the SOR method as a smoother - + // which is not symmetric - but we use the + // conjugate gradient iteration (which + // requires a symmetric preconditioner) + // below, we need to let the multilevel + // preconditioner make sure that we get a + // symmetric operator even for nonsymmetric + // smoothers: + typedef PreconditionSOR > Smoother; + GrowingVectorMemory<> vector_memory; + MGSmootherRelaxation, Smoother, Vector > + mg_smoother(vector_memory); + mg_smoother.initialize(mg_matrices); + mg_smoother.set_steps(2); + mg_smoother.set_symmetric(true); + + // The next preparatory step is that we + // must wrap our level and interface + // matrices in an object having the + // required multiplication functions. We + // will create two objects for the + // interface objects going from coarse to + // fine and the other way around; the + // multigrid algorithm will later use the + // transpose operator for the latter + // operation, allowing us to initialize + // both up and down versions of the + // operator with the matrices we already + // built: + MGMatrix<> mg_matrix(&mg_matrices); + MGMatrix<> mg_interface_up(&mg_interface_matrices); + MGMatrix<> mg_interface_down(&mg_interface_matrices); + + // Now, we are ready to set up the + // V-cycle operator and the + // multilevel preconditioner. + Multigrid > mg(mg_dof_handler, + mg_matrix, + coarse_grid_solver, + mg_transfer, + mg_smoother, + mg_smoother); + mg.set_edge_matrices(mg_interface_down, mg_interface_up); + + PreconditionMG, MGTransferPrebuilt > > + preconditioner(mg_dof_handler, mg, mg_transfer); + + // With all this together, we can finally + // get about solving the linear system in + // the usual way: + SolverControl solver_control (1000, 1e-12); + SolverCG<> cg (solver_control); + + solution = 0; + + cg.solve (system_matrix, solution, system_rhs, + preconditioner); + constraints.distribute (solution); + + std::cout << " " << solver_control.last_step() + << " CG iterations needed to obtain convergence." + << std::endl; + } + + + + // @sect4{Postprocessing} + + // The following two functions postprocess a + // solution once it is computed. In + // particular, the first one refines the mesh + // at the beginning of each cycle while the + // second one outputs results at the end of + // each such cycle. The functions are almost + // unchanged from those in step-6, with the + // exception of two minor differences: The + // KellyErrorEstimator::estimate function + // wants an argument of type DoFHandler, not + // MGDoFHandler, and so we have to cast from + // derived to base class; and we generate + // output in VTK format, to use the more + // modern visualization programs available + // today compared to those that were + // available when step-6 was written. + template + void LaplaceProblem::refine_grid () + { + Vector estimated_error_per_cell (triangulation.n_active_cells()); + + KellyErrorEstimator::estimate (static_cast&>(mg_dof_handler), + QGauss(3), + typename FunctionMap::type(), + solution, + estimated_error_per_cell); + GridRefinement::refine_and_coarsen_fixed_number (triangulation, + estimated_error_per_cell, + 0.3, 0.03); + triangulation.execute_coarsening_and_refinement (); + } + + + + template + void LaplaceProblem::output_results (const unsigned int cycle) const + { + DataOut data_out; + + data_out.attach_dof_handler (mg_dof_handler); + data_out.add_data_vector (solution, "solution"); + data_out.build_patches (); + + std::ostringstream filename; + filename << "solution-" + << cycle + << ".vtk"; + + std::ofstream output (filename.str().c_str()); + data_out.write_vtk (output); + } + + + // @sect4{LaplaceProblem::run} + + // Like several of the functions above, this + // is almost exactly a copy of of the + // corresponding function in step-6. The only + // difference is the call to + // assemble_multigrid that takes + // care of forming the matrices on every + // level that we need in the multigrid + // method. + template + void LaplaceProblem::run () + { + for (unsigned int cycle=0; cycle<8; ++cycle) + { + std::cout << "Cycle " << cycle << ':' << std::endl; + + if (cycle == 0) + { + GridGenerator::hyper_ball (triangulation); - static const HyperBallBoundary boundary; - triangulation.set_boundary (0, boundary); + static const HyperBallBoundary boundary; + triangulation.set_boundary (0, boundary); - triangulation.refine_global (1); - } - else - refine_grid (); + triangulation.refine_global (1); + } + else + refine_grid (); - std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl; + std::cout << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl; - setup_system (); + setup_system (); - std::cout << " Number of degrees of freedom: " - << mg_dof_handler.n_dofs() - << " (by level: "; - for (unsigned int level=0; level laplace_problem(1); diff --git a/deal.II/examples/step-17/step-17.cc b/deal.II/examples/step-17/step-17.cc index 0a09bb1b44..191a0134d5 100644 --- a/deal.II/examples/step-17/step-17.cc +++ b/deal.II/examples/step-17/step-17.cc @@ -3,7 +3,7 @@ /* $Id$ */ /* */ -/* Copyright (C) 2000, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors */ +/* Copyright (C) 2000, 2004, 2005, 2006, 2007, 2008, 2009, 2011 by the deal.II authors */ /* */ /* This file is subject to QPL and may not be distributed */ /* without copyright and license information. Please refer */ @@ -99,1112 +99,1115 @@ // The last step is as in all // previous programs: -using namespace dealii; - - // Now, here comes the declaration of the - // main class and of various other things - // below it. As mentioned in the - // introduction, almost all of this has been - // copied verbatim from step-8, so we only - // comment on the few things that are - // different. There is one (cosmetic) change - // in that we let solve return a value, - // namely the number of iterations it took to - // converge, so that we can output this to - // the screen at the appropriate place. In - // addition, we introduce a stream-like - // variable pcout, explained below: -template -class ElasticProblem +namespace Step17 { - public: - ElasticProblem (); - ~ElasticProblem (); - void run (); - - private: - void setup_system (); - void assemble_system (); - unsigned int solve (); - void refine_grid (); - void output_results (const unsigned int cycle) const; - - // The first variable is basically only - // for convenience: in %parallel program, - // if each process outputs status - // information, then there quickly is a - // lot of clutter. Rather, we would want - // to only have one process output - // everything once, for example the one - // with process number - // zero. ConditionalOStream does - // exactly this: it acts as if it were a - // stream, but only forwards to a real, - // underlying stream if a flag is set. By - // setting this condition to - // this_mpi_process==0, we make sure - // that output is only generated from the - // first process and that we don't get - // the same lines of output over and over - // again, once per process. - // - // With this simple trick, we make sure - // that we don't have to guard each and - // every write to std::cout by a - // prefixed if(this_mpi_process==0). - ConditionalOStream pcout; - - // The next few variables are taken - // verbatim from step-8: - Triangulation triangulation; - DoFHandler dof_handler; - - FESystem fe; - - ConstraintMatrix hanging_node_constraints; - - // In step-8, this would have been the - // place where we would have declared the - // member variables for the sparsity - // pattern, the system matrix, right - // hand, and solution vector. We change - // these declarations to use %parallel - // PETSc objects instead (note that the - // fact that we use the %parallel versions - // is denoted the fact that we use the - // classes from the - // PETScWrappers::MPI namespace; - // sequential versions of these classes - // are in the PETScWrappers - // namespace, i.e. without the MPI - // part). Note also that we do not use a - // separate sparsity pattern, since PETSc - // manages that as part of its matrix - // data structures. - PETScWrappers::MPI::SparseMatrix system_matrix; - - PETScWrappers::MPI::Vector solution; - PETScWrappers::MPI::Vector system_rhs; - - // The next change is that we have to - // declare a variable that indicates the - // MPI communicator over which we are - // supposed to distribute our - // computations. Note that if this is a - // sequential job without support by MPI, - // then PETSc provides some dummy type - // for MPI_Comm, so we do not have to - // care here whether the job is really a - // %parallel one: - MPI_Comm mpi_communicator; - - // Then we have two variables that tell - // us where in the %parallel world we - // are. The first of the following - // variables, n_mpi_processes tells - // us how many MPI processes there exist - // in total, while the second one, - // this_mpi_process, indicates which - // is the number of the present process - // within this space of processes. The - // latter variable will have a unique - // value for each process between zero - // and (less than) - // n_mpi_processes. If this program - // is run on a single machine without MPI - // support, then their values are 1 - // and 0, respectively. - const unsigned int n_mpi_processes; - const unsigned int this_mpi_process; -}; - - - // The following is again taken from step-8 - // without change: -template -class RightHandSide : public Function -{ - public: - RightHandSide (); - - virtual void vector_value (const Point &p, - Vector &values) const; - - virtual void vector_value_list (const std::vector > &points, - std::vector > &value_list) const; -}; - - -template -RightHandSide::RightHandSide () : - Function (dim) -{} - - -template -inline -void RightHandSide::vector_value (const Point &p, - Vector &values) const -{ - Assert (values.size() == dim, - ExcDimensionMismatch (values.size(), dim)); - Assert (dim >= 2, ExcInternalError()); - - Point point_1, point_2; - point_1(0) = 0.5; - point_2(0) = -0.5; - - if (((p-point_1).square() < 0.2*0.2) || - ((p-point_2).square() < 0.2*0.2)) - values(0) = 1; - else - values(0) = 0; - - if (p.square() < 0.2*0.2) - values(1) = 1; - else - values(1) = 0; -} - - - -template -void RightHandSide::vector_value_list (const std::vector > &points, - std::vector > &value_list) const -{ - const unsigned int n_points = points.size(); - - Assert (value_list.size() == n_points, - ExcDimensionMismatch (value_list.size(), n_points)); - - for (unsigned int p=0; p::vector_value (points[p], - value_list[p]); -} - - - // The first step in the actual - // implementation of things is the - // constructor of the main class. Apart from - // initializing the same member variables - // that we already had in step-8, we here - // initialize the MPI communicator variable - // we shall use with the global MPI - // communicator linking all processes - // together (in more complex applications, - // one could here use a communicator object - // that only links a subset of all - // processes), and call the Utilities helper - // functions to determine the number of - // processes and where the present one fits - // into this picture. In addition, we make - // sure that output is only generated by the - // (globally) first process. As, - // this_mpi_process is determined after - // creation of pcout, we cannot set the - // condition through the constructor, i.e. by - // pcout(std::cout, this_mpi_process==0), but - // set the condition separately. -template -ElasticProblem::ElasticProblem () - : - pcout (std::cout), - dof_handler (triangulation), - fe (FE_Q(1), dim), - mpi_communicator (MPI_COMM_WORLD), - n_mpi_processes (Utilities::System::get_n_mpi_processes(mpi_communicator)), - this_mpi_process (Utilities::System::get_this_mpi_process(mpi_communicator)) -{ - pcout.set_condition(this_mpi_process == 0); -} - - - -template -ElasticProblem::~ElasticProblem () -{ - dof_handler.clear (); -} - - - // The second step is the function in which - // we set up the various variables for the - // global linear system to be solved. -template -void ElasticProblem::setup_system () -{ - // Before we even start out setting up the - // system, there is one thing to do for a - // %parallel program: we need to assign - // cells to each of the processes. We do - // this by splitting (partitioning) the - // mesh cells into as many chunks - // (subdomains) as there are processes - // in this MPI job (if this is a sequential - // job, then there is only one job and all - // cells will get a zero as subdomain - // indicator). This is done using an - // interface to the METIS library that does - // this in a very efficient way, trying to - // minimize the number of nodes on the - // interfaces between subdomains. All this - // is hidden behind the following call to a - // deal.II library function: - GridTools::partition_triangulation (n_mpi_processes, triangulation); - - // As for the linear system: First, we need - // to generate an enumeration for the - // degrees of freedom in our - // problem. Further below, we will show how - // we assign each cell to one of the MPI - // processes before we even get here. What - // we then need to do is to enumerate the - // degrees of freedom in a way so that all - // degrees of freedom associated with cells - // in subdomain zero (which resides on - // process zero) come before all DoFs - // associated with cells on subdomain one, - // before those on cells on process two, - // and so on. We need this since we have to - // split the global vectors for right hand - // side and solution, as well as the matrix - // into contiguous chunks of rows that live - // on each of the processors, and we will - // want to do this in a way that requires - // minimal communication. This is done - // using the following two functions, which - // first generates an initial ordering of - // all degrees of freedom, and then re-sort - // them according to above criterion: - dof_handler.distribute_dofs (fe); - DoFRenumbering::subdomain_wise (dof_handler); - - // While we're at it, let us also count how - // many degrees of freedom there exist on - // the present process: - const unsigned int n_local_dofs - = DoFTools::count_dofs_with_subdomain_association (dof_handler, - this_mpi_process); - - // Then we initialize the system matrix, - // solution, and right hand side - // vectors. Since they all need to work in - // %parallel, we have to pass them an MPI - // communication object, as well as their - // global sizes (both dimensions are equal - // to the number of degrees of freedom), - // and also how many rows out of this - // global size are to be stored locally - // (n_local_dofs). In addition, PETSc - // needs to know how to partition the - // columns in the chunk of the matrix that - // is stored locally; for square matrices, - // the columns should be partitioned in the - // same way as the rows (indicated by the - // second n_local_dofs in the call) but - // in the case of rectangular matrices one - // has to partition the columns in the same - // way as vectors are partitioned with - // which the matrix is multiplied, while - // rows have to partitioned in the same way - // as destination vectors of matrix-vector - // multiplications: - system_matrix.reinit (mpi_communicator, - dof_handler.n_dofs(), - dof_handler.n_dofs(), - n_local_dofs, - n_local_dofs, - dof_handler.max_couplings_between_dofs()); - - solution.reinit (mpi_communicator, dof_handler.n_dofs(), n_local_dofs); - system_rhs.reinit (mpi_communicator, dof_handler.n_dofs(), n_local_dofs); - - // Finally, we need to initialize the - // objects denoting hanging node - // constraints for the present grid. Note - // that since PETSc handles the sparsity - // pattern internally to the matrix, there - // is no need to set up an independent - // sparsity pattern here, and to condense - // it for constraints, as we have done in - // all other example programs. - hanging_node_constraints.clear (); - DoFTools::make_hanging_node_constraints (dof_handler, - hanging_node_constraints); - hanging_node_constraints.close (); -} - + using namespace dealii; + + // Now, here comes the declaration of the + // main class and of various other things + // below it. As mentioned in the + // introduction, almost all of this has been + // copied verbatim from step-8, so we only + // comment on the few things that are + // different. There is one (cosmetic) change + // in that we let solve return a value, + // namely the number of iterations it took to + // converge, so that we can output this to + // the screen at the appropriate place. In + // addition, we introduce a stream-like + // variable pcout, explained below: + template + class ElasticProblem + { + public: + ElasticProblem (); + ~ElasticProblem (); + void run (); + + private: + void setup_system (); + void assemble_system (); + unsigned int solve (); + void refine_grid (); + void output_results (const unsigned int cycle) const; + + // The first variable is basically only + // for convenience: in %parallel program, + // if each process outputs status + // information, then there quickly is a + // lot of clutter. Rather, we would want + // to only have one process output + // everything once, for example the one + // with process number + // zero. ConditionalOStream does + // exactly this: it acts as if it were a + // stream, but only forwards to a real, + // underlying stream if a flag is set. By + // setting this condition to + // this_mpi_process==0, we make sure + // that output is only generated from the + // first process and that we don't get + // the same lines of output over and over + // again, once per process. + // + // With this simple trick, we make sure + // that we don't have to guard each and + // every write to std::cout by a + // prefixed if(this_mpi_process==0). + ConditionalOStream pcout; + + // The next few variables are taken + // verbatim from step-8: + Triangulation triangulation; + DoFHandler dof_handler; + + FESystem fe; + + ConstraintMatrix hanging_node_constraints; + + // In step-8, this would have been the + // place where we would have declared the + // member variables for the sparsity + // pattern, the system matrix, right + // hand, and solution vector. We change + // these declarations to use %parallel + // PETSc objects instead (note that the + // fact that we use the %parallel versions + // is denoted the fact that we use the + // classes from the + // PETScWrappers::MPI namespace; + // sequential versions of these classes + // are in the PETScWrappers + // namespace, i.e. without the MPI + // part). Note also that we do not use a + // separate sparsity pattern, since PETSc + // manages that as part of its matrix + // data structures. + PETScWrappers::MPI::SparseMatrix system_matrix; + + PETScWrappers::MPI::Vector solution; + PETScWrappers::MPI::Vector system_rhs; + + // The next change is that we have to + // declare a variable that indicates the + // MPI communicator over which we are + // supposed to distribute our + // computations. Note that if this is a + // sequential job without support by MPI, + // then PETSc provides some dummy type + // for MPI_Comm, so we do not have to + // care here whether the job is really a + // %parallel one: + MPI_Comm mpi_communicator; + + // Then we have two variables that tell + // us where in the %parallel world we + // are. The first of the following + // variables, n_mpi_processes tells + // us how many MPI processes there exist + // in total, while the second one, + // this_mpi_process, indicates which + // is the number of the present process + // within this space of processes. The + // latter variable will have a unique + // value for each process between zero + // and (less than) + // n_mpi_processes. If this program + // is run on a single machine without MPI + // support, then their values are 1 + // and 0, respectively. + const unsigned int n_mpi_processes; + const unsigned int this_mpi_process; + }; + + + // The following is again taken from step-8 + // without change: + template + class RightHandSide : public Function + { + public: + RightHandSide (); + + virtual void vector_value (const Point &p, + Vector &values) const; + + virtual void vector_value_list (const std::vector > &points, + std::vector > &value_list) const; + }; + + + template + RightHandSide::RightHandSide () : + Function (dim) + {} + + + template + inline + void RightHandSide::vector_value (const Point &p, + Vector &values) const + { + Assert (values.size() == dim, + ExcDimensionMismatch (values.size(), dim)); + Assert (dim >= 2, ExcInternalError()); + + Point point_1, point_2; + point_1(0) = 0.5; + point_2(0) = -0.5; + + if (((p-point_1).square() < 0.2*0.2) || + ((p-point_2).square() < 0.2*0.2)) + values(0) = 1; + else + values(0) = 0; + + if (p.square() < 0.2*0.2) + values(1) = 1; + else + values(1) = 0; + } + + + + template + void RightHandSide::vector_value_list (const std::vector > &points, + std::vector > &value_list) const + { + const unsigned int n_points = points.size(); + + Assert (value_list.size() == n_points, + ExcDimensionMismatch (value_list.size(), n_points)); + + for (unsigned int p=0; p::vector_value (points[p], + value_list[p]); + } + + + // The first step in the actual + // implementation of things is the + // constructor of the main class. Apart from + // initializing the same member variables + // that we already had in step-8, we here + // initialize the MPI communicator variable + // we shall use with the global MPI + // communicator linking all processes + // together (in more complex applications, + // one could here use a communicator object + // that only links a subset of all + // processes), and call the Utilities helper + // functions to determine the number of + // processes and where the present one fits + // into this picture. In addition, we make + // sure that output is only generated by the + // (globally) first process. As, + // this_mpi_process is determined after + // creation of pcout, we cannot set the + // condition through the constructor, i.e. by + // pcout(std::cout, this_mpi_process==0), but + // set the condition separately. + template + ElasticProblem::ElasticProblem () + : + pcout (std::cout), + dof_handler (triangulation), + fe (FE_Q(1), dim), + mpi_communicator (MPI_COMM_WORLD), + n_mpi_processes (Utilities::System::get_n_mpi_processes(mpi_communicator)), + this_mpi_process (Utilities::System::get_this_mpi_process(mpi_communicator)) + { + pcout.set_condition(this_mpi_process == 0); + } + + + + template + ElasticProblem::~ElasticProblem () + { + dof_handler.clear (); + } + + + // The second step is the function in which + // we set up the various variables for the + // global linear system to be solved. + template + void ElasticProblem::setup_system () + { + // Before we even start out setting up the + // system, there is one thing to do for a + // %parallel program: we need to assign + // cells to each of the processes. We do + // this by splitting (partitioning) the + // mesh cells into as many chunks + // (subdomains) as there are processes + // in this MPI job (if this is a sequential + // job, then there is only one job and all + // cells will get a zero as subdomain + // indicator). This is done using an + // interface to the METIS library that does + // this in a very efficient way, trying to + // minimize the number of nodes on the + // interfaces between subdomains. All this + // is hidden behind the following call to a + // deal.II library function: + GridTools::partition_triangulation (n_mpi_processes, triangulation); + + // As for the linear system: First, we need + // to generate an enumeration for the + // degrees of freedom in our + // problem. Further below, we will show how + // we assign each cell to one of the MPI + // processes before we even get here. What + // we then need to do is to enumerate the + // degrees of freedom in a way so that all + // degrees of freedom associated with cells + // in subdomain zero (which resides on + // process zero) come before all DoFs + // associated with cells on subdomain one, + // before those on cells on process two, + // and so on. We need this since we have to + // split the global vectors for right hand + // side and solution, as well as the matrix + // into contiguous chunks of rows that live + // on each of the processors, and we will + // want to do this in a way that requires + // minimal communication. This is done + // using the following two functions, which + // first generates an initial ordering of + // all degrees of freedom, and then re-sort + // them according to above criterion: + dof_handler.distribute_dofs (fe); + DoFRenumbering::subdomain_wise (dof_handler); + + // While we're at it, let us also count how + // many degrees of freedom there exist on + // the present process: + const unsigned int n_local_dofs + = DoFTools::count_dofs_with_subdomain_association (dof_handler, + this_mpi_process); + + // Then we initialize the system matrix, + // solution, and right hand side + // vectors. Since they all need to work in + // %parallel, we have to pass them an MPI + // communication object, as well as their + // global sizes (both dimensions are equal + // to the number of degrees of freedom), + // and also how many rows out of this + // global size are to be stored locally + // (n_local_dofs). In addition, PETSc + // needs to know how to partition the + // columns in the chunk of the matrix that + // is stored locally; for square matrices, + // the columns should be partitioned in the + // same way as the rows (indicated by the + // second n_local_dofs in the call) but + // in the case of rectangular matrices one + // has to partition the columns in the same + // way as vectors are partitioned with + // which the matrix is multiplied, while + // rows have to partitioned in the same way + // as destination vectors of matrix-vector + // multiplications: + system_matrix.reinit (mpi_communicator, + dof_handler.n_dofs(), + dof_handler.n_dofs(), + n_local_dofs, + n_local_dofs, + dof_handler.max_couplings_between_dofs()); + + solution.reinit (mpi_communicator, dof_handler.n_dofs(), n_local_dofs); + system_rhs.reinit (mpi_communicator, dof_handler.n_dofs(), n_local_dofs); + + // Finally, we need to initialize the + // objects denoting hanging node + // constraints for the present grid. Note + // that since PETSc handles the sparsity + // pattern internally to the matrix, there + // is no need to set up an independent + // sparsity pattern here, and to condense + // it for constraints, as we have done in + // all other example programs. + hanging_node_constraints.clear (); + DoFTools::make_hanging_node_constraints (dof_handler, + hanging_node_constraints); + hanging_node_constraints.close (); + } + + + // The third step is to actually assemble the + // matrix and right hand side of the + // problem. There are some things worth + // mentioning before we go into + // detail. First, we will be assembling the + // system in %parallel, i.e. each process will + // be responsible for assembling on cells + // that belong to this particular + // processor. Note that the degrees of + // freedom are split in a way such that all + // DoFs in the interior of cells and between + // cells belonging to the same subdomain + // belong to the process that owns the + // cell. However, even then we sometimes need + // to assemble on a cell with a neighbor that + // belongs to a different process, and in + // these cases when we write the local + // contributions into the global matrix or + // right hand side vector, we actually have + // to transfer these entries to the other + // process. Fortunately, we don't have to do + // this by hand, PETSc does all this for us + // by caching these elements locally, and + // sending them to the other processes as + // necessary when we call the compress() + // functions on the matrix and vector at the + // end of this function. + // + // The second point is that once we + // have handed over matrix and vector + // contributions to PETSc, it is a) + // hard, and b) very inefficient to + // get them back for + // modifications. This is not only + // the fault of PETSc, it is also a + // consequence of the distributed + // nature of this program: if an + // entry resides on another + // processor, then it is necessarily + // expensive to get it. The + // consequence of this is that where + // we previously first assembled the + // matrix and right hand side as if + // there were no hanging node + // constraints and boundary values, + // and then eliminated these in a + // second step, we should now try to + // do that while still assembling the + // local systems, and before handing + // these entries over to PETSc. At + // least as far as eliminating + // hanging nodes is concerned, this + // is actually possible, though + // removing boundary nodes isn't that + // simple. deal.II provides functions + // to do this first part: instead of + // copying elements by hand into the + // global matrix, we use the + // distribute_local_to_global + // functions below to take care of + // hanging nodes at the same + // time. The second step, elimination + // of boundary nodes, is then done in + // exactly the same way as in all + // previous example programs. + // + // So, here is the actual implementation: + template + void ElasticProblem::assemble_system () + { + // The infrastructure to assemble linear + // systems is the same as in all the other + // programs, and in particular unchanged + // from step-8. Note that we still use the + // deal.II full matrix and vector types for + // the local systems. + QGauss quadrature_formula(2); + FEValues fe_values (fe, quadrature_formula, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + Vector cell_rhs (dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + std::vector lambda_values (n_q_points); + std::vector mu_values (n_q_points); + + ConstantFunction lambda(1.), mu(1.); + + RightHandSide right_hand_side; + std::vector > rhs_values (n_q_points, + Vector(dim)); + + + // The next thing is the loop over all + // elements. Note that we do not have to do + // all the work: our job here is only to + // assemble the system on cells that + // actually belong to this MPI process, all + // other cells will be taken care of by + // other processes. This is what the + // if-clause immediately after the for-loop + // takes care of: it queries the subdomain + // identifier of each cell, which is a + // number associated with each cell that + // tells which process handles it. In more + // generality, the subdomain id is used to + // split a domain into several parts (we do + // this above, at the beginning of + // setup_system), and which allows to + // identify which subdomain a cell is + // living on. In this application, we have + // each process handle exactly one + // subdomain, so we identify the terms + // subdomain and MPI process with + // each other. + // + // Apart from this, assembling the local + // system is relatively uneventful if you + // have understood how this is done in + // step-8, and only becomes interesting + // again once we start distributing it into + // the global matrix and right hand sides. + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + if (cell->subdomain_id() == this_mpi_process) + { + cell_matrix = 0; + cell_rhs = 0; + + fe_values.reinit (cell); + + lambda.value_list (fe_values.get_quadrature_points(), lambda_values); + mu.value_list (fe_values.get_quadrature_points(), mu_values); + + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + hanging_node_constraints + .distribute_local_to_global (cell_matrix, + local_dof_indices, + system_matrix); + + hanging_node_constraints + .distribute_local_to_global (cell_rhs, + local_dof_indices, + system_rhs); + } - // The third step is to actually assemble the - // matrix and right hand side of the - // problem. There are some things worth - // mentioning before we go into - // detail. First, we will be assembling the - // system in %parallel, i.e. each process will - // be responsible for assembling on cells - // that belong to this particular - // processor. Note that the degrees of - // freedom are split in a way such that all - // DoFs in the interior of cells and between - // cells belonging to the same subdomain - // belong to the process that owns the - // cell. However, even then we sometimes need - // to assemble on a cell with a neighbor that - // belongs to a different process, and in - // these cases when we write the local - // contributions into the global matrix or - // right hand side vector, we actually have - // to transfer these entries to the other - // process. Fortunately, we don't have to do - // this by hand, PETSc does all this for us - // by caching these elements locally, and - // sending them to the other processes as - // necessary when we call the compress() - // functions on the matrix and vector at the - // end of this function. - // - // The second point is that once we - // have handed over matrix and vector - // contributions to PETSc, it is a) - // hard, and b) very inefficient to - // get them back for - // modifications. This is not only - // the fault of PETSc, it is also a - // consequence of the distributed - // nature of this program: if an - // entry resides on another - // processor, then it is necessarily - // expensive to get it. The - // consequence of this is that where - // we previously first assembled the - // matrix and right hand side as if - // there were no hanging node - // constraints and boundary values, - // and then eliminated these in a - // second step, we should now try to - // do that while still assembling the - // local systems, and before handing - // these entries over to PETSc. At - // least as far as eliminating - // hanging nodes is concerned, this - // is actually possible, though - // removing boundary nodes isn't that - // simple. deal.II provides functions - // to do this first part: instead of - // copying elements by hand into the - // global matrix, we use the - // distribute_local_to_global - // functions below to take care of - // hanging nodes at the same - // time. The second step, elimination - // of boundary nodes, is then done in - // exactly the same way as in all - // previous example programs. - // - // So, here is the actual implementation: -template -void ElasticProblem::assemble_system () -{ - // The infrastructure to assemble linear - // systems is the same as in all the other - // programs, and in particular unchanged - // from step-8. Note that we still use the - // deal.II full matrix and vector types for - // the local systems. - QGauss quadrature_formula(2); - FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); - - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); - - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - Vector cell_rhs (dofs_per_cell); - - std::vector local_dof_indices (dofs_per_cell); - - std::vector lambda_values (n_q_points); - std::vector mu_values (n_q_points); - - ConstantFunction lambda(1.), mu(1.); - - RightHandSide right_hand_side; - std::vector > rhs_values (n_q_points, - Vector(dim)); - - - // The next thing is the loop over all - // elements. Note that we do not have to do - // all the work: our job here is only to - // assemble the system on cells that - // actually belong to this MPI process, all - // other cells will be taken care of by - // other processes. This is what the - // if-clause immediately after the for-loop - // takes care of: it queries the subdomain - // identifier of each cell, which is a - // number associated with each cell that - // tells which process handles it. In more - // generality, the subdomain id is used to - // split a domain into several parts (we do - // this above, at the beginning of - // setup_system), and which allows to - // identify which subdomain a cell is - // living on. In this application, we have - // each process handle exactly one - // subdomain, so we identify the terms - // subdomain and MPI process with - // each other. - // - // Apart from this, assembling the local - // system is relatively uneventful if you - // have understood how this is done in - // step-8, and only becomes interesting - // again once we start distributing it into - // the global matrix and right hand sides. - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - if (cell->subdomain_id() == this_mpi_process) + // The global matrix and right hand side + // vectors have now been formed. Note that + // since we took care of this already + // above, we do not have to condense away + // hanging node constraints any more. + // + // However, we still have to apply boundary + // values, in the same way as we always do: + std::map boundary_values; + VectorTools::interpolate_boundary_values (dof_handler, + 0, + ZeroFunction(dim), + boundary_values); + MatrixTools::apply_boundary_values (boundary_values, + system_matrix, solution, + system_rhs, false); + // The last argument to the call just + // performed allows for some + // optimizations. It controls + // whether we should also delete the + // column corresponding to a boundary + // node, or keep it (and passing + // true as above means: yes, do + // eliminate the column). If we do, + // then the resulting matrix will be + // symmetric again if it was before; + // if we don't, then it won't. The + // solution of the resulting system + // should be the same, though. The + // only reason why we may want to + // make the system symmetric again is + // that we would like to use the CG + // method, which only works with + // symmetric matrices. Experience + // tells that CG also works (and + // works almost as well) if we don't + // remove the columns associated with + // boundary nodes, which can be + // easily explained by the special + // structure of the + // non-symmetry. Since eliminating + // columns from dense matrices is not + // expensive, though, we let the + // function do it; not doing so is + // more important if the linear + // system is either non-symmetric + // anyway, or we are using the + // non-local version of this function + // (as in all the other example + // programs before) and want to save + // a few cycles during this + // operation. + } + + + + // The fourth step is to solve the linear + // system, with its distributed matrix and + // vector objects. Fortunately, PETSc offers + // a variety of sequential and %parallel + // solvers, for which we have written + // wrappers that have almost the same + // interface as is used for the deal.II + // solvers used in all previous example + // programs. + template + unsigned int ElasticProblem::solve () + { + // First, we have to set up a convergence + // monitor, and assign it the accuracy to + // which we would like to solve the linear + // system. Next, an actual solver object + // using PETSc's CG solver which also works + // with %parallel (distributed) vectors and + // matrices. And finally a preconditioner; + // we choose to use a block Jacobi + // preconditioner which works by computing + // an incomplete LU decomposition on each + // block (i.e. the chunk of matrix that is + // stored on each MPI process). That means + // that if you run the program with only + // one process, then you will use an ILU(0) + // as a preconditioner, while if it is run + // on many processes, then we will have a + // number of blocks on the diagonal and the + // preconditioner is the ILU(0) of each of + // these blocks. + SolverControl solver_control (solution.size(), + 1e-8*system_rhs.l2_norm()); + PETScWrappers::SolverCG cg (solver_control, + mpi_communicator); + + PETScWrappers::PreconditionBlockJacobi preconditioner(system_matrix); + + // Then solve the system: + cg.solve (system_matrix, solution, system_rhs, + preconditioner); + + // The next step is to distribute hanging + // node constraints. This is a little + // tricky, since to fill in the value of a + // constrained node you need access to the + // values of the nodes to which it is + // constrained (for example, for a Q1 + // element in 2d, we need access to the two + // nodes on the big side of a hanging node + // face, to compute the value of the + // constrained node in the middle). Since + // PETSc (and, for that matter, the MPI + // model on which it is built) does not + // allow to query the value of another node + // in a simple way if we should need it, + // what we do here is to get a copy of the + // distributed vector where we keep all + // elements locally. This is simple, since + // the deal.II wrappers have a conversion + // constructor for the non-MPI vector + // class: + PETScWrappers::Vector localized_solution (solution); + + // Then we distribute hanging node + // constraints on this local copy, i.e. we + // compute the values of all constrained + // nodes: + hanging_node_constraints.distribute (localized_solution); + + // Then transfer everything back + // into the global vector. The + // following operation copies those + // elements of the localized + // solution that we store locally + // in the distributed solution, and + // does not touch the other + // ones. Since we do the same + // operation on all processors, we + // end up with a distributed vector + // that has all the constrained + // nodes fixed. + solution = localized_solution; + + // After this has happened, flush the PETSc + // buffers. This may or may not be strictly + // necessary here (the PETSc documentation + // is not very verbose on these things), + // but certainly doesn't hurt either. + solution.compress (); + + // Finally return the number of iterations + // it took to converge, to allow for some + // output: + return solver_control.last_step(); + } + + + + // Step five is to output the results we + // computed in this iteration. This is + // actually the same as done in step-8 + // before, with two small differences. First, + // all processes call this function, but not + // all of them need to do the work associated + // with generating output. In fact, they + // shouldn't, since we would try to write to + // the same file multiple times at once. So + // we let only the first job do this, and all + // the other ones idle around during this + // time (or start their work for the next + // iteration, or simply yield their CPUs to + // other jobs that happen to run at the same + // time). The second thing is that we not + // only output the solution vector, but also + // a vector that indicates which subdomain + // each cell belongs to. This will make for + // some nice pictures of partitioned domains. + // + // In practice, the present implementation of + // the output function is a major bottleneck + // of this program, since generating + // graphical output is expensive and doing so + // only on one process does, of course, not + // scale if we significantly increase the + // number of processes. In effect, this + // function will consume most of the run-time + // if you go to very large numbers of + // unknowns and processes, and real + // applications should limit the number of + // times they generate output through this + // function. + // + // The solution to this is to have + // each process generate output data + // only for it's own local cells, and + // write them to separate files, one + // file per process. This would + // distribute the work of generating + // the output to all processes + // equally. In a second step, + // separate from running this + // program, we would then take all + // the output files for a given cycle + // and merge these parts into one + // single output file. This has to be + // done sequentially, but can be done + // on a different machine, and should + // be relatively cheap. However, the + // necessary functionality for this + // is not yet implemented in the + // library, and since we are too + // close to the next release, we do + // not want to do such major + // destabilizing changes any + // more. This has been fixed in the + // meantime, though, and a better way + // to do things is explained in the + // step-18 example program. + template + void ElasticProblem::output_results (const unsigned int cycle) const + { + // One point to realize is that when we + // want to generate output on process zero + // only, we need to have access to all + // elements of the solution vector. So we + // need to get a local copy of the + // distributed vector, which is in fact + // simple: + const PETScWrappers::Vector localized_solution (solution); + // The thing to notice, however, is that + // we do this localization operation on all + // processes, not only the one that + // actually needs the data. This can't be + // avoided, however, with the communication + // model of MPI: MPI does not have a way to + // query data on another process, both + // sides have to initiate a communication + // at the same time. So even though most of + // the processes do not need the localized + // solution, we have to place the call here + // so that all processes execute it. + // + // (In reality, part of this work can in + // fact be avoided. What we do is send the + // local parts of all processes to all + // other processes. What we would really + // need to do is to initiate an operation + // on all processes where each process + // simply sends its local chunk of data to + // process zero, since this is the only one + // that actually needs it, i.e. we need + // something like a gather operation. PETSc + // can do this, but for simplicity's sake + // we don't attempt to make use of this + // here. We don't, since what we do is not + // very expensive in the grand scheme of + // things: it is one vector communication + // among all processes , which has to be + // compared to the number of communications + // we have to do when solving the linear + // system, setting up the block-ILU for the + // preconditioner, and other operations.) + + // This being done, process zero goes ahead + // with setting up the output file as in + // step-8, and attaching the (localized) + // solution vector to the output + // object:. (The code to generate the output + // file name is stolen and slightly + // modified from step-5, since we expect + // that we can do a number of cycles + // greater than 10, which is the maximum of + // what the code in step-8 could handle.) + if (this_mpi_process == 0) { - cell_matrix = 0; - cell_rhs = 0; - - fe_values.reinit (cell); - - lambda.value_list (fe_values.get_quadrature_points(), lambda_values); - mu.value_list (fe_values.get_quadrature_points(), mu_values); - - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - hanging_node_constraints - .distribute_local_to_global (cell_matrix, - local_dof_indices, - system_matrix); - - hanging_node_constraints - .distribute_local_to_global (cell_rhs, - local_dof_indices, - system_rhs); + std::ostringstream filename; + filename << "solution-" << cycle << ".gmv"; + + std::ofstream output (filename.str().c_str()); + + DataOut data_out; + data_out.attach_dof_handler (dof_handler); + + std::vector solution_names; + switch (dim) + { + case 1: + solution_names.push_back ("displacement"); + break; + case 2: + solution_names.push_back ("x_displacement"); + solution_names.push_back ("y_displacement"); + break; + case 3: + solution_names.push_back ("x_displacement"); + solution_names.push_back ("y_displacement"); + solution_names.push_back ("z_displacement"); + break; + default: + Assert (false, ExcInternalError()); + } + + data_out.add_data_vector (localized_solution, solution_names); + + // The only thing we do here + // additionally is that we also output + // one value per cell indicating which + // subdomain (i.e. MPI process) it + // belongs to. This requires some + // conversion work, since the data the + // library provides us with is not the + // one the output class expects, but + // this is not difficult. First, set up + // a vector of integers, one per cell, + // that is then filled by the number of + // subdomain each cell is in: + std::vector partition_int (triangulation.n_active_cells()); + GridTools::get_subdomain_association (triangulation, partition_int); + + // Then convert this integer vector + // into a floating point vector just as + // the output functions want to see: + const Vector partitioning(partition_int.begin(), + partition_int.end()); + + // And finally add this vector as well: + data_out.add_data_vector (partitioning, "partitioning"); + + // This all being done, generate the + // intermediate format and write it out + // in GMV output format: + data_out.build_patches (); + data_out.write_gmv (output); } - - // The global matrix and right hand side - // vectors have now been formed. Note that - // since we took care of this already - // above, we do not have to condense away - // hanging node constraints any more. - // - // However, we still have to apply boundary - // values, in the same way as we always do: - std::map boundary_values; - VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction(dim), - boundary_values); - MatrixTools::apply_boundary_values (boundary_values, - system_matrix, solution, - system_rhs, false); - // The last argument to the call just - // performed allows for some - // optimizations. It controls - // whether we should also delete the - // column corresponding to a boundary - // node, or keep it (and passing - // true as above means: yes, do - // eliminate the column). If we do, - // then the resulting matrix will be - // symmetric again if it was before; - // if we don't, then it won't. The - // solution of the resulting system - // should be the same, though. The - // only reason why we may want to - // make the system symmetric again is - // that we would like to use the CG - // method, which only works with - // symmetric matrices. Experience - // tells that CG also works (and - // works almost as well) if we don't - // remove the columns associated with - // boundary nodes, which can be - // easily explained by the special - // structure of the - // non-symmetry. Since eliminating - // columns from dense matrices is not - // expensive, though, we let the - // function do it; not doing so is - // more important if the linear - // system is either non-symmetric - // anyway, or we are using the - // non-local version of this function - // (as in all the other example - // programs before) and want to save - // a few cycles during this - // operation. -} - - - - // The fourth step is to solve the linear - // system, with its distributed matrix and - // vector objects. Fortunately, PETSc offers - // a variety of sequential and %parallel - // solvers, for which we have written - // wrappers that have almost the same - // interface as is used for the deal.II - // solvers used in all previous example - // programs. -template -unsigned int ElasticProblem::solve () -{ - // First, we have to set up a convergence - // monitor, and assign it the accuracy to - // which we would like to solve the linear - // system. Next, an actual solver object - // using PETSc's CG solver which also works - // with %parallel (distributed) vectors and - // matrices. And finally a preconditioner; - // we choose to use a block Jacobi - // preconditioner which works by computing - // an incomplete LU decomposition on each - // block (i.e. the chunk of matrix that is - // stored on each MPI process). That means - // that if you run the program with only - // one process, then you will use an ILU(0) - // as a preconditioner, while if it is run - // on many processes, then we will have a - // number of blocks on the diagonal and the - // preconditioner is the ILU(0) of each of - // these blocks. - SolverControl solver_control (solution.size(), - 1e-8*system_rhs.l2_norm()); - PETScWrappers::SolverCG cg (solver_control, - mpi_communicator); - - PETScWrappers::PreconditionBlockJacobi preconditioner(system_matrix); - - // Then solve the system: - cg.solve (system_matrix, solution, system_rhs, - preconditioner); - - // The next step is to distribute hanging - // node constraints. This is a little - // tricky, since to fill in the value of a - // constrained node you need access to the - // values of the nodes to which it is - // constrained (for example, for a Q1 - // element in 2d, we need access to the two - // nodes on the big side of a hanging node - // face, to compute the value of the - // constrained node in the middle). Since - // PETSc (and, for that matter, the MPI - // model on which it is built) does not - // allow to query the value of another node - // in a simple way if we should need it, - // what we do here is to get a copy of the - // distributed vector where we keep all - // elements locally. This is simple, since - // the deal.II wrappers have a conversion - // constructor for the non-MPI vector - // class: - PETScWrappers::Vector localized_solution (solution); - - // Then we distribute hanging node - // constraints on this local copy, i.e. we - // compute the values of all constrained - // nodes: - hanging_node_constraints.distribute (localized_solution); - - // Then transfer everything back - // into the global vector. The - // following operation copies those - // elements of the localized - // solution that we store locally - // in the distributed solution, and - // does not touch the other - // ones. Since we do the same - // operation on all processors, we - // end up with a distributed vector - // that has all the constrained - // nodes fixed. - solution = localized_solution; - - // After this has happened, flush the PETSc - // buffers. This may or may not be strictly - // necessary here (the PETSc documentation - // is not very verbose on these things), - // but certainly doesn't hurt either. - solution.compress (); - - // Finally return the number of iterations - // it took to converge, to allow for some - // output: - return solver_control.last_step(); -} - - - - // Step five is to output the results we - // computed in this iteration. This is - // actually the same as done in step-8 - // before, with two small differences. First, - // all processes call this function, but not - // all of them need to do the work associated - // with generating output. In fact, they - // shouldn't, since we would try to write to - // the same file multiple times at once. So - // we let only the first job do this, and all - // the other ones idle around during this - // time (or start their work for the next - // iteration, or simply yield their CPUs to - // other jobs that happen to run at the same - // time). The second thing is that we not - // only output the solution vector, but also - // a vector that indicates which subdomain - // each cell belongs to. This will make for - // some nice pictures of partitioned domains. - // - // In practice, the present implementation of - // the output function is a major bottleneck - // of this program, since generating - // graphical output is expensive and doing so - // only on one process does, of course, not - // scale if we significantly increase the - // number of processes. In effect, this - // function will consume most of the run-time - // if you go to very large numbers of - // unknowns and processes, and real - // applications should limit the number of - // times they generate output through this - // function. - // - // The solution to this is to have - // each process generate output data - // only for it's own local cells, and - // write them to separate files, one - // file per process. This would - // distribute the work of generating - // the output to all processes - // equally. In a second step, - // separate from running this - // program, we would then take all - // the output files for a given cycle - // and merge these parts into one - // single output file. This has to be - // done sequentially, but can be done - // on a different machine, and should - // be relatively cheap. However, the - // necessary functionality for this - // is not yet implemented in the - // library, and since we are too - // close to the next release, we do - // not want to do such major - // destabilizing changes any - // more. This has been fixed in the - // meantime, though, and a better way - // to do things is explained in the - // step-18 example program. -template -void ElasticProblem::output_results (const unsigned int cycle) const -{ - // One point to realize is that when we - // want to generate output on process zero - // only, we need to have access to all - // elements of the solution vector. So we - // need to get a local copy of the - // distributed vector, which is in fact - // simple: - const PETScWrappers::Vector localized_solution (solution); - // The thing to notice, however, is that - // we do this localization operation on all - // processes, not only the one that - // actually needs the data. This can't be - // avoided, however, with the communication - // model of MPI: MPI does not have a way to - // query data on another process, both - // sides have to initiate a communication - // at the same time. So even though most of - // the processes do not need the localized - // solution, we have to place the call here - // so that all processes execute it. - // - // (In reality, part of this work can in - // fact be avoided. What we do is send the - // local parts of all processes to all - // other processes. What we would really - // need to do is to initiate an operation - // on all processes where each process - // simply sends its local chunk of data to - // process zero, since this is the only one - // that actually needs it, i.e. we need - // something like a gather operation. PETSc - // can do this, but for simplicity's sake - // we don't attempt to make use of this - // here. We don't, since what we do is not - // very expensive in the grand scheme of - // things: it is one vector communication - // among all processes , which has to be - // compared to the number of communications - // we have to do when solving the linear - // system, setting up the block-ILU for the - // preconditioner, and other operations.) - - // This being done, process zero goes ahead - // with setting up the output file as in - // step-8, and attaching the (localized) - // solution vector to the output - // object:. (The code to generate the output - // file name is stolen and slightly - // modified from step-5, since we expect - // that we can do a number of cycles - // greater than 10, which is the maximum of - // what the code in step-8 could handle.) - if (this_mpi_process == 0) - { - std::ostringstream filename; - filename << "solution-" << cycle << ".gmv"; - - std::ofstream output (filename.str().c_str()); - - DataOut data_out; - data_out.attach_dof_handler (dof_handler); - - std::vector solution_names; - switch (dim) - { - case 1: - solution_names.push_back ("displacement"); - break; - case 2: - solution_names.push_back ("x_displacement"); - solution_names.push_back ("y_displacement"); - break; - case 3: - solution_names.push_back ("x_displacement"); - solution_names.push_back ("y_displacement"); - solution_names.push_back ("z_displacement"); - break; - default: - Assert (false, ExcInternalError()); - } - - data_out.add_data_vector (localized_solution, solution_names); - - // The only thing we do here - // additionally is that we also output - // one value per cell indicating which - // subdomain (i.e. MPI process) it - // belongs to. This requires some - // conversion work, since the data the - // library provides us with is not the - // one the output class expects, but - // this is not difficult. First, set up - // a vector of integers, one per cell, - // that is then filled by the number of - // subdomain each cell is in: - std::vector partition_int (triangulation.n_active_cells()); - GridTools::get_subdomain_association (triangulation, partition_int); - - // Then convert this integer vector - // into a floating point vector just as - // the output functions want to see: - const Vector partitioning(partition_int.begin(), - partition_int.end()); - - // And finally add this vector as well: - data_out.add_data_vector (partitioning, "partitioning"); - - // This all being done, generate the - // intermediate format and write it out - // in GMV output format: - data_out.build_patches (); - data_out.write_gmv (output); - } -} - - - - // The sixth step is to take the solution - // just computed, and evaluate some kind of - // refinement indicator to refine the - // mesh. The problem is basically the same as - // with distributing hanging node - // constraints: in order to compute the error - // indicator, we need access to all elements - // of the solution vector. We then compute - // the indicators for the cells that belong - // to the present process, but then we need - // to distribute the refinement indicators - // into a distributed vector so that all - // processes have the values of the - // refinement indicator for all cells. But - // then, in order for each process to refine - // its copy of the mesh, they need to have - // acces to all refinement indicators - // locally, so they have to copy the global - // vector back into a local one. That's a - // little convoluted, but thinking about it - // quite straightforward nevertheless. So - // here's how we do it: -template -void ElasticProblem::refine_grid () -{ - // So, first part: get a local copy of the - // distributed solution vector. This is - // necessary since the error estimator - // needs to get at the value of neighboring - // cells even if they do not belong to the - // subdomain associated with the present - // MPI process: - const PETScWrappers::Vector localized_solution (solution); - - // Second part: set up a vector of error - // indicators for all cells and let the - // Kelly class compute refinement - // indicators for all cells belonging to - // the present subdomain/process. Note that - // the last argument of the call indicates - // which subdomain we are interested - // in. The three arguments before it are - // various other default arguments that one - // usually doesn't need (and doesn't state - // values for, but rather uses the - // defaults), but which we have to state - // here explicitly since we want to modify - // the value of a following argument - // (i.e. the one indicating the subdomain): - Vector local_error_per_cell (triangulation.n_active_cells()); - KellyErrorEstimator::estimate (dof_handler, - QGauss(2), - typename FunctionMap::type(), - localized_solution, - local_error_per_cell, - std::vector(), - 0, - multithread_info.n_default_threads, - this_mpi_process); - - // Now all processes have computed error - // indicators for their own cells and - // stored them in the respective elements - // of the local_error_per_cell - // vector. The elements of this vector for - // cells not on the present process are - // zero. However, since all processes have - // a copy of a copy of the entire - // triangulation and need to keep these - // copies in synch, they need the values of - // refinement indicators for all cells of - // the triangulation. Thus, we need to - // distribute our results. We do this by - // creating a distributed vector where each - // process has its share, and sets the - // elements it has computed. We will then - // later generate a local sequential copy - // of this distributed vector to allow each - // process to access all elements of this - // vector. - // - // So in the first step, we need to set up - // a %parallel vector. For simplicity, every - // process will own a chunk with as many - // elements as this process owns cells, so - // that the first chunk of elements is - // stored with process zero, the next chunk - // with process one, and so on. It is - // important to remark, however, that these - // elements are not necessarily the ones we - // will write to. This is so, since the - // order in which cells are arranged, - // i.e. the order in which the elements of - // the vector correspond to cells, is not - // ordered according to the subdomain these - // cells belong to. In other words, if on - // this process we compute indicators for - // cells of a certain subdomain, we may - // write the results to more or less random - // elements if the distributed vector, that - // do not necessarily lie within the chunk - // of vector we own on the present - // process. They will subsequently have to - // be copied into another process's memory - // space then, an operation that PETSc does - // for us when we call the compress - // function. This inefficiency could be - // avoided with some more code, but we - // refrain from it since it is not a major - // factor in the program's total runtime. - // - // So here's how we do it: count how many - // cells belong to this process, set up a - // distributed vector with that many - // elements to be stored locally, and copy - // over the elements we computed locally, - // then compress the result. In fact, we - // really only copy the elements that are - // nonzero, so we may miss a few that we - // computed to zero, but this won't hurt - // since the original values of the vector - // is zero anyway. - const unsigned int n_local_cells - = GridTools::count_cells_with_subdomain_association (triangulation, - this_mpi_process); - PETScWrappers::MPI::Vector - distributed_all_errors (mpi_communicator, - triangulation.n_active_cells(), - n_local_cells); - - for (unsigned int i=0; i localized_all_errors (distributed_all_errors); - - // ...which we can the subsequently use to - // finally refine the grid: - GridRefinement::refine_and_coarsen_fixed_number (triangulation, - localized_all_errors, - 0.3, 0.03); - triangulation.execute_coarsening_and_refinement (); -} - - - - // Lastly, here is the driver function. It is - // almost unchanged from step-8, with the - // exception that we replace std::cout by - // the pcout stream. Apart from this, the - // only other cosmetic change is that we - // output how many degrees of freedom there - // are per process, and how many iterations - // it took for the linear solver to converge: -template -void ElasticProblem::run () -{ - for (unsigned int cycle=0; cycle<10; ++cycle) - { - pcout << "Cycle " << cycle << ':' << std::endl; - - if (cycle == 0) - { - GridGenerator::hyper_cube (triangulation, -1, 1); - triangulation.refine_global (3); - } - else - refine_grid (); - - pcout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl; - - setup_system (); - - pcout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << " (by partition:"; - for (unsigned int p=0; p + void ElasticProblem::refine_grid () + { + // So, first part: get a local copy of the + // distributed solution vector. This is + // necessary since the error estimator + // needs to get at the value of neighboring + // cells even if they do not belong to the + // subdomain associated with the present + // MPI process: + const PETScWrappers::Vector localized_solution (solution); + + // Second part: set up a vector of error + // indicators for all cells and let the + // Kelly class compute refinement + // indicators for all cells belonging to + // the present subdomain/process. Note that + // the last argument of the call indicates + // which subdomain we are interested + // in. The three arguments before it are + // various other default arguments that one + // usually doesn't need (and doesn't state + // values for, but rather uses the + // defaults), but which we have to state + // here explicitly since we want to modify + // the value of a following argument + // (i.e. the one indicating the subdomain): + Vector local_error_per_cell (triangulation.n_active_cells()); + KellyErrorEstimator::estimate (dof_handler, + QGauss(2), + typename FunctionMap::type(), + localized_solution, + local_error_per_cell, + std::vector(), + 0, + multithread_info.n_default_threads, + this_mpi_process); + + // Now all processes have computed error + // indicators for their own cells and + // stored them in the respective elements + // of the local_error_per_cell + // vector. The elements of this vector for + // cells not on the present process are + // zero. However, since all processes have + // a copy of a copy of the entire + // triangulation and need to keep these + // copies in synch, they need the values of + // refinement indicators for all cells of + // the triangulation. Thus, we need to + // distribute our results. We do this by + // creating a distributed vector where each + // process has its share, and sets the + // elements it has computed. We will then + // later generate a local sequential copy + // of this distributed vector to allow each + // process to access all elements of this + // vector. + // + // So in the first step, we need to set up + // a %parallel vector. For simplicity, every + // process will own a chunk with as many + // elements as this process owns cells, so + // that the first chunk of elements is + // stored with process zero, the next chunk + // with process one, and so on. It is + // important to remark, however, that these + // elements are not necessarily the ones we + // will write to. This is so, since the + // order in which cells are arranged, + // i.e. the order in which the elements of + // the vector correspond to cells, is not + // ordered according to the subdomain these + // cells belong to. In other words, if on + // this process we compute indicators for + // cells of a certain subdomain, we may + // write the results to more or less random + // elements if the distributed vector, that + // do not necessarily lie within the chunk + // of vector we own on the present + // process. They will subsequently have to + // be copied into another process's memory + // space then, an operation that PETSc does + // for us when we call the compress + // function. This inefficiency could be + // avoided with some more code, but we + // refrain from it since it is not a major + // factor in the program's total runtime. + // + // So here's how we do it: count how many + // cells belong to this process, set up a + // distributed vector with that many + // elements to be stored locally, and copy + // over the elements we computed locally, + // then compress the result. In fact, we + // really only copy the elements that are + // nonzero, so we may miss a few that we + // computed to zero, but this won't hurt + // since the original values of the vector + // is zero anyway. + const unsigned int n_local_cells + = GridTools::count_cells_with_subdomain_association (triangulation, + this_mpi_process); + PETScWrappers::MPI::Vector + distributed_all_errors (mpi_communicator, + triangulation.n_active_cells(), + n_local_cells); + + for (unsigned int i=0; i localized_all_errors (distributed_all_errors); + + // ...which we can the subsequently use to + // finally refine the grid: + GridRefinement::refine_and_coarsen_fixed_number (triangulation, + localized_all_errors, + 0.3, 0.03); + triangulation.execute_coarsening_and_refinement (); + } + + + + // Lastly, here is the driver function. It is + // almost unchanged from step-8, with the + // exception that we replace std::cout by + // the pcout stream. Apart from this, the + // only other cosmetic change is that we + // output how many degrees of freedom there + // are per process, and how many iterations + // it took for the linear solver to converge: + template + void ElasticProblem::run () + { + for (unsigned int cycle=0; cycle<10; ++cycle) + { + pcout << "Cycle " << cycle << ':' << std::endl; + + if (cycle == 0) + { + GridGenerator::hyper_cube (triangulation, -1, 1); + triangulation.refine_global (3); + } + else + refine_grid (); + + pcout << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl; + + setup_system (); + + pcout << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << " (by partition:"; + for (unsigned int p=0; p::run () // delegates work to the run function of // a master object, and only wraps everything // into some code to catch exceptions: -int main (int argc, char **argv) +int main (int argc, char **argv) { try { + using namespace dealii; + using namespace Step17; + // Here is the only real difference: // PETSc requires that we initialize it // at the beginning of the program, and @@ -1245,7 +1251,7 @@ int main (int argc, char **argv) elastic_problem.run (); } - PetscFinalize(); + PetscFinalize(); } catch (std::exception &exc) { @@ -1257,10 +1263,10 @@ int main (int argc, char **argv) << "Aborting!" << std::endl << "----------------------------------------------------" << std::endl; - + return 1; } - catch (...) + catch (...) { std::cerr << std::endl << std::endl << "----------------------------------------------------" diff --git a/deal.II/examples/step-18/step-18.cc b/deal.II/examples/step-18/step-18.cc index cad5022f30..bc60888313 100644 --- a/deal.II/examples/step-18/step-18.cc +++ b/deal.II/examples/step-18/step-18.cc @@ -3,7 +3,7 @@ /* $Id$ */ /* */ -/* Copyright (C) 2000, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors */ +/* Copyright (C) 2000, 2004, 2005, 2006, 2007, 2008, 2009, 2011 by the deal.II authors */ /* */ /* This file is subject to QPL and may not be distributed */ /* without copyright and license information. Please refer */ @@ -69,17 +69,9 @@ // The last step is as in all // previous programs: -using namespace dealii; - - // So much for the header files. As a - // matter of good practice, I have - // started to put everything that - // corresponds to a certain project - // into a namespace of its own, named - // after the problem that we are - // solving: -namespace QuasiStaticElasticity +namespace Step18 { + using namespace dealii; // @sect3{The PointHistory class} @@ -116,20 +108,20 @@ namespace QuasiStaticElasticity }; - // @sect3{The stress-strain tensor} - - // Next, we define the linear relationship - // between the stress and the strain in - // elasticity. It is given by a tensor of - // rank 4 that is usually written in the - // form $C_{ijkl} = \mu (\delta_{ik} - // \delta_{jl} + \delta_{il} \delta_{jk}) + - // \lambda \delta_{ij} \delta_{kl}$. This - // tensor maps symmetric tensor of rank 2 - // to symmetric tensors of rank 2. A - // function implementing its creation for - // given values of the Lame constants - // lambda and mu is straightforward: + // @sect3{The stress-strain tensor} + + // Next, we define the linear relationship + // between the stress and the strain in + // elasticity. It is given by a tensor of + // rank 4 that is usually written in the + // form $C_{ijkl} = \mu (\delta_{ik} + // \delta_{jl} + \delta_{il} \delta_{jk}) + + // \lambda \delta_{ij} \delta_{kl}$. This + // tensor maps symmetric tensor of rank 2 + // to symmetric tensors of rank 2. A + // function implementing its creation for + // given values of the Lame constants + // lambda and mu is straightforward: template SymmetricTensor<4,dim> get_stress_strain_tensor (const double lambda, const double mu) @@ -137,42 +129,42 @@ namespace QuasiStaticElasticity SymmetricTensor<4,dim> tmp; for (unsigned int i=0; i strain; for (unsigned int i=0; i get_rotation_matrix (const std::vector > &grad_u) { @@ -394,20 +386,20 @@ namespace QuasiStaticElasticity // gradients. Note that we are in 2d, so // the rotation is a scalar: const double curl = (grad_u[1][0] - grad_u[0][1]); - + // From this, compute the angle of // rotation: const double angle = std::atan (curl); - // And from this, build the antisymmetric - // rotation matrix: + // And from this, build the antisymmetric + // rotation matrix: const double t[2][2] = {{ cos(angle), sin(angle) }, {-sin(angle), cos(angle) }}; return Tensor<2,2>(t); } - // The 3d case is a little more contrived: + // The 3d case is a little more contrived: Tensor<2,3> get_rotation_matrix (const std::vector > &grad_u) { @@ -415,9 +407,9 @@ namespace QuasiStaticElasticity // velocity field. This time, it is a // real vector: const Point<3> curl (grad_u[2][1] - grad_u[1][2], - grad_u[0][2] - grad_u[2][0], - grad_u[1][0] - grad_u[0][1]); - + grad_u[0][2] - grad_u[2][0], + grad_u[1][0] - grad_u[0][1]); + // From this vector, using its magnitude, // compute the tangent of the angle of // rotation, and from it the actual @@ -425,42 +417,42 @@ namespace QuasiStaticElasticity const double tan_angle = std::sqrt(curl*curl); const double angle = std::atan (tan_angle); - // Now, here's one problem: if the angle - // of rotation is too small, that means - // that there is no rotation going on - // (for example a translational - // motion). In that case, the rotation - // matrix is the identity matrix. - // - // The reason why we stress that is that - // in this case we have that - // tan_angle==0. Further down, we - // need to divide by that number in the - // computation of the axis of rotation, - // and we would get into trouble when - // dividing doing so. Therefore, let's - // shortcut this and simply return the - // identity matrix if the angle of - // rotation is really small: + // Now, here's one problem: if the angle + // of rotation is too small, that means + // that there is no rotation going on + // (for example a translational + // motion). In that case, the rotation + // matrix is the identity matrix. + // + // The reason why we stress that is that + // in this case we have that + // tan_angle==0. Further down, we + // need to divide by that number in the + // computation of the axis of rotation, + // and we would get into trouble when + // dividing doing so. Therefore, let's + // shortcut this and simply return the + // identity matrix if the angle of + // rotation is really small: if (angle < 1e-9) { - static const double rotation[3][3] - = {{ 1, 0, 0}, { 0, 1, 0 }, { 0, 0, 1 } }; - static const Tensor<2,3> rot(rotation); - return rot; + static const double rotation[3][3] + = {{ 1, 0, 0}, { 0, 1, 0 }, { 0, 0, 1 } }; + static const Tensor<2,3> rot(rotation); + return rot; } - // Otherwise compute the real rotation - // matrix. The algorithm for this is not - // exactly obvious, but can be found in a - // number of books, particularly on - // computer games where rotation is a - // very frequent operation. Online, you - // can find a description at - // http://www.makegames.com/3drotation/ - // and (this particular form, with the - // signs as here) at - // http://www.gamedev.net/reference/articles/article1199.asp: + // Otherwise compute the real rotation + // matrix. The algorithm for this is not + // exactly obvious, but can be found in a + // number of books, particularly on + // computer games where rotation is a + // very frequent operation. Online, you + // can find a description at + // http://www.makegames.com/3drotation/ + // and (this particular form, with the + // signs as here) at + // http://www.gamedev.net/reference/articles/article1199.asp: const double c = std::cos(angle); const double s = std::sin(angle); const double t = 1-c; @@ -468,50 +460,50 @@ namespace QuasiStaticElasticity const Point<3> axis = curl/tan_angle; const double rotation[3][3] = {{ t*axis[0]*axis[0]+c, - t*axis[0]*axis[1]+s*axis[2], - t*axis[0]*axis[2]-s*axis[1]}, - { t*axis[0]*axis[1]-s*axis[2], - t*axis[1]*axis[1]+c, - t*axis[1]*axis[2]+s*axis[0]}, - { t*axis[0]*axis[2]+s*axis[1], - t*axis[1]*axis[1]-s*axis[0], - t*axis[2]*axis[2]+c } }; + t*axis[0]*axis[1]+s*axis[2], + t*axis[0]*axis[2]-s*axis[1]}, + { t*axis[0]*axis[1]-s*axis[2], + t*axis[1]*axis[1]+c, + t*axis[1]*axis[2]+s*axis[0]}, + { t*axis[0]*axis[2]+s*axis[1], + t*axis[1]*axis[1]-s*axis[0], + t*axis[2]*axis[2]+c } }; return Tensor<2,3>(rotation); } - + // @sect3{The TopLevel class} - + // This is the main class of the // program. Since the namespace already // indicates what problem we are solving, // let's call it by what it does: it // directs the flow of the program, i.e. it // is the toplevel driver. - // - // The member variables of this class are - // essentially as before, i.e. it has to - // have a triangulation, a DoF handler and - // associated objects such as constraints, - // variables that describe the linear - // system, etc. There are a good number of - // more member functions now, which we will - // explain below. - // - // The external interface of the class, - // however, is unchanged: it has a public - // constructor and desctructor, and it has - // a run function that initiated all - // the work. + // + // The member variables of this class are + // essentially as before, i.e. it has to + // have a triangulation, a DoF handler and + // associated objects such as constraints, + // variables that describe the linear + // system, etc. There are a good number of + // more member functions now, which we will + // explain below. + // + // The external interface of the class, + // however, is unchanged: it has a public + // constructor and desctructor, and it has + // a run function that initiated all + // the work. template - class TopLevel + class TopLevel { public: TopLevel (); ~TopLevel (); void run (); - + private: // The private interface is more // extensive than in step-17. First, we @@ -530,29 +522,29 @@ namespace QuasiStaticElasticity // output the solution vector on the // currect mesh: void create_coarse_grid (); - + void setup_system (); - + void assemble_system (); - + void solve_timestep (); unsigned int solve_linear_problem (); void output_results () const; - // All, except for the first two, of - // these functions are called in each - // timestep. Since the first time step - // is a little special, we have - // separate functions that describe - // what has to happen in a timestep: - // one for the first, and one for all - // following timesteps: + // All, except for the first two, of + // these functions are called in each + // timestep. Since the first time step + // is a little special, we have + // separate functions that describe + // what has to happen in a timestep: + // one for the first, and one for all + // following timesteps: void do_initial_timestep (); void do_timestep (); - + // Then we need a whole bunch of // functions that do various // things. The first one refines the @@ -572,12 +564,12 @@ namespace QuasiStaticElasticity // each quadrature point. void refine_initial_grid (); - // At the end of each time step, we - // want to move the mesh vertices - // around according to the incremental - // displacement computed in this time - // step. This is the function in which - // this is done: + // At the end of each time step, we + // want to move the mesh vertices + // around according to the incremental + // displacement computed in this time + // step. This is the function in which + // this is done: void move_mesh (); // Next are two functions that handle @@ -596,10 +588,10 @@ namespace QuasiStaticElasticity // timestep: void update_quadrature_point_history (); - // After the member functions, here are - // the member variables. The first ones - // have all been discussed in more - // detail in previous example programs: + // After the member functions, here are + // the member variables. The first ones + // have all been discussed in more + // detail in previous example programs: Triangulation triangulation; FESystem fe; @@ -636,98 +628,98 @@ namespace QuasiStaticElasticity // processors). std::vector > quadrature_point_history; - // The way this object is accessed is - // through a user pointer that each - // cell, face, or edge holds: it is a - // void* pointer that can be used - // by application programs to associate - // arbitrary data to cells, faces, or - // edges. What the program actually - // does with this data is within its - // own responsibility, the library just - // allocates some space for these - // pointers, and application programs - // can set and read the pointers for - // each of these objects. - - - // Further: we need the objects of - // linear systems to be solved, - // i.e. matrix, right hand side vector, - // and the solution vector. Since we - // anticipate solving big problems, we - // use the same types as in step-17, - // i.e. distributed %parallel matrices - // and vectors built on top of the - // PETSc library. Conveniently, they - // can also be used when running on - // only a single machine, in which case - // this machine happens to be the only - // one in our %parallel universe. - // - // However, as a difference to step-17, - // we do not store the solution vector - // -- which here is the incremental - // displacements computed in each time - // step -- in a distributed - // fashion. I.e., of course it must be - // a distributed vector when computing - // it, but immediately after that we - // make sure each processor has a - // complete copy. The reason is that we - // had already seen in step-17 that - // many functions needed a complete - // copy. While it is not hard to get - // it, this requires communication on - // the network, and is thus slow. In - // addition, these were repeatedly the - // same operations, which is certainly - // undesirable unless the gains of not - // always having to store the entire - // vector outweighs it. When writing - // this program, it turned out that we - // need a complete copy of the solution - // in so many places that it did not - // seem worthwhile to only get it when - // necessary. Instead, we opted to - // obtain the complete copy once and - // for all, and instead get rid of the - // distributed copy immediately. Thus, - // note that the declaration of - // inremental_displacement does not - // denote a distribute vector as would - // be indicated by the middle namespace - // MPI: + // The way this object is accessed is + // through a user pointer that each + // cell, face, or edge holds: it is a + // void* pointer that can be used + // by application programs to associate + // arbitrary data to cells, faces, or + // edges. What the program actually + // does with this data is within its + // own responsibility, the library just + // allocates some space for these + // pointers, and application programs + // can set and read the pointers for + // each of these objects. + + + // Further: we need the objects of + // linear systems to be solved, + // i.e. matrix, right hand side vector, + // and the solution vector. Since we + // anticipate solving big problems, we + // use the same types as in step-17, + // i.e. distributed %parallel matrices + // and vectors built on top of the + // PETSc library. Conveniently, they + // can also be used when running on + // only a single machine, in which case + // this machine happens to be the only + // one in our %parallel universe. + // + // However, as a difference to step-17, + // we do not store the solution vector + // -- which here is the incremental + // displacements computed in each time + // step -- in a distributed + // fashion. I.e., of course it must be + // a distributed vector when computing + // it, but immediately after that we + // make sure each processor has a + // complete copy. The reason is that we + // had already seen in step-17 that + // many functions needed a complete + // copy. While it is not hard to get + // it, this requires communication on + // the network, and is thus slow. In + // addition, these were repeatedly the + // same operations, which is certainly + // undesirable unless the gains of not + // always having to store the entire + // vector outweighs it. When writing + // this program, it turned out that we + // need a complete copy of the solution + // in so many places that it did not + // seem worthwhile to only get it when + // necessary. Instead, we opted to + // obtain the complete copy once and + // for all, and instead get rid of the + // distributed copy immediately. Thus, + // note that the declaration of + // inremental_displacement does not + // denote a distribute vector as would + // be indicated by the middle namespace + // MPI: PETScWrappers::MPI::SparseMatrix system_matrix; PETScWrappers::MPI::Vector system_rhs; PETScWrappers::Vector incremental_displacement; - // The next block of variables is then - // related to the time dependent nature - // of the problem: they denote the - // length of the time interval which we - // want to simulate, the present time - // and number of time step, and length - // of present timestep: + // The next block of variables is then + // related to the time dependent nature + // of the problem: they denote the + // length of the time interval which we + // want to simulate, the present time + // and number of time step, and length + // of present timestep: double present_time; double present_timestep; double end_time; unsigned int timestep_no; - // Then a few variables that have to do - // with %parallel processing: first, a - // variable denoting the MPI - // communicator we use, and then two - // numbers telling us how many - // participating processors there are, - // and where in this world we - // are. Finally, a stream object that - // makes sure only one processor is - // actually generating output to the - // console. This is all the same as in - // step-17: + // Then a few variables that have to do + // with %parallel processing: first, a + // variable denoting the MPI + // communicator we use, and then two + // numbers telling us how many + // participating processors there are, + // and where in this world we + // are. Finally, a stream object that + // makes sure only one processor is + // actually generating output to the + // console. This is all the same as in + // step-17: MPI_Comm mpi_communicator; const unsigned int n_mpi_processes; @@ -736,13 +728,13 @@ namespace QuasiStaticElasticity ConditionalOStream pcout; - // Here is a vector where each entry - // denotes the numbers of degrees of - // freedom that are stored on the - // processor with that particular - // number: + // Here is a vector where each entry + // denotes the numbers of degrees of + // freedom that are stored on the + // processor with that particular + // number: std::vector local_dofs_per_process; - + // Next, how many degrees of freedom // the present processor stores. This // is, of course, an abbreviation to @@ -777,8 +769,8 @@ namespace QuasiStaticElasticity }; - // @sect3{The BodyForce class} - + // @sect3{The BodyForce class} + // Before we go on to the main // functionality of this program, we have // to define what forces will act on the @@ -808,39 +800,39 @@ namespace QuasiStaticElasticity // in the function, and we take as the // density 7700 kg/m^3, a value commonly // assumed for steel. - // - // To be a little more general and to be - // able to do computations in 2d as well, - // we realize that the body force is always - // a function returning a dim - // dimensional vector. We assume that - // gravity acts along the negative - // direction of the last, i.e. dim-1th - // coordinate. The rest of the - // implementation of this function should - // be mostly self-explanatory given similar - // definitions in previous example - // programs. Note that the body force is - // independent of the location; to avoid - // compiler warnings about unused function - // arguments, we therefore comment out the - // name of the first argument of the - // vector_value function: + // + // To be a little more general and to be + // able to do computations in 2d as well, + // we realize that the body force is always + // a function returning a dim + // dimensional vector. We assume that + // gravity acts along the negative + // direction of the last, i.e. dim-1th + // coordinate. The rest of the + // implementation of this function should + // be mostly self-explanatory given similar + // definitions in previous example + // programs. Note that the body force is + // independent of the location; to avoid + // compiler warnings about unused function + // arguments, we therefore comment out the + // name of the first argument of the + // vector_value function: template - class BodyForce : public Function + class BodyForce : public Function { public: BodyForce (); - + virtual void vector_value (const Point &p, - Vector &values) const; + Vector &values) const; virtual void vector_value_list (const std::vector > &points, - std::vector > &value_list) const; + std::vector > &value_list) const; }; @@ -855,14 +847,14 @@ namespace QuasiStaticElasticity inline void BodyForce::vector_value (const Point &/*p*/, - Vector &values) const + Vector &values) const { - Assert (values.size() == dim, + Assert (values.size() == dim, ExcDimensionMismatch (values.size(), dim)); const double g = 9.81; const double rho = 7700; - + values = 0; values(dim-1) = -rho * g; } @@ -872,86 +864,86 @@ namespace QuasiStaticElasticity template void BodyForce::vector_value_list (const std::vector > &points, - std::vector > &value_list) const + std::vector > &value_list) const { const unsigned int n_points = points.size(); - Assert (value_list.size() == n_points, + Assert (value_list.size() == n_points, ExcDimensionMismatch (value_list.size(), n_points)); for (unsigned int p=0; p::vector_value (points[p], - value_list[p]); + value_list[p]); } - // @sect3{The IncrementalBoundaryValue class} - - // In addition to body forces, movement can - // be induced by boundary forces and forced - // boundary displacement. The latter case - // is equivalent to forces being chosen in - // such a way that they induce certain - // displacement. - // - // For quasistatic displacement, typical - // boundary forces would be pressure on a - // body, or tangential friction against - // another body. We chose a somewhat - // simpler case here: we prescribe a - // certain movement of (parts of) the - // boundary, or at least of certain - // components of the displacement - // vector. We describe this by another - // vector-valued function that, for a given - // point on the boundary, returns the - // prescribed displacement. - // - // Since we have a time-dependent problem, - // the displacement increment of the - // boundary equals the displacement - // accumulated during the length of the - // timestep. The class therefore has to - // know both the present time and the - // length of the present time step, and can - // then approximate the incremental - // displacement as the present velocity - // times the present timestep. - // - // For the purposes of this - // program, we choose a simple form - // of boundary displacement: we - // displace the top boundary with - // constant velocity downwards. The - // rest of the boundary is either - // going to be fixed (and is then - // described using an object of - // type ZeroFunction) or free - // (Neumann-type, in which case - // nothing special has to be done). - // The implementation of the - // class describing the constant - // downward motion should then be - // obvious using the knowledge we - // gained through all the previous - // example programs: + // @sect3{The IncrementalBoundaryValue class} + + // In addition to body forces, movement can + // be induced by boundary forces and forced + // boundary displacement. The latter case + // is equivalent to forces being chosen in + // such a way that they induce certain + // displacement. + // + // For quasistatic displacement, typical + // boundary forces would be pressure on a + // body, or tangential friction against + // another body. We chose a somewhat + // simpler case here: we prescribe a + // certain movement of (parts of) the + // boundary, or at least of certain + // components of the displacement + // vector. We describe this by another + // vector-valued function that, for a given + // point on the boundary, returns the + // prescribed displacement. + // + // Since we have a time-dependent problem, + // the displacement increment of the + // boundary equals the displacement + // accumulated during the length of the + // timestep. The class therefore has to + // know both the present time and the + // length of the present time step, and can + // then approximate the incremental + // displacement as the present velocity + // times the present timestep. + // + // For the purposes of this + // program, we choose a simple form + // of boundary displacement: we + // displace the top boundary with + // constant velocity downwards. The + // rest of the boundary is either + // going to be fixed (and is then + // described using an object of + // type ZeroFunction) or free + // (Neumann-type, in which case + // nothing special has to be done). + // The implementation of the + // class describing the constant + // downward motion should then be + // obvious using the knowledge we + // gained through all the previous + // example programs: template - class IncrementalBoundaryValues : public Function + class IncrementalBoundaryValues : public Function { public: IncrementalBoundaryValues (const double present_time, - const double present_timestep); - + const double present_timestep); + virtual void vector_value (const Point &p, - Vector &values) const; + Vector &values) const; virtual void vector_value_list (const std::vector > &points, - std::vector > &value_list) const; + std::vector > &value_list) const; private: const double velocity; @@ -963,12 +955,12 @@ namespace QuasiStaticElasticity template IncrementalBoundaryValues:: IncrementalBoundaryValues (const double present_time, - const double present_timestep) + const double present_timestep) : Function (dim), velocity (.1), present_time (present_time), - present_timestep (present_timestep) + present_timestep (present_timestep) {} @@ -976,9 +968,9 @@ namespace QuasiStaticElasticity void IncrementalBoundaryValues:: vector_value (const Point &/*p*/, - Vector &values) const + Vector &values) const { - Assert (values.size() == dim, + Assert (values.size() == dim, ExcDimensionMismatch (values.size(), dim)); values = 0; @@ -991,11 +983,11 @@ namespace QuasiStaticElasticity void IncrementalBoundaryValues:: vector_value_list (const std::vector > &points, - std::vector > &value_list) const + std::vector > &value_list) const { const unsigned int n_points = points.size(); - Assert (value_list.size() == n_points, + Assert (value_list.size() == n_points, ExcDimensionMismatch (value_list.size(), n_points)); for (unsigned int p=0; pTopLevel class} + // @sect3{Implementation of the TopLevel class} - // Now for the implementation of the main - // class. First, we initialize the + // Now for the implementation of the main + // class. First, we initialize the // stress-strain tensor, which we // have declared as a static const // variable. We chose Lame @@ -1019,11 +1011,11 @@ namespace QuasiStaticElasticity TopLevel::stress_strain_tensor = get_stress_strain_tensor (/*lambda = */ 9.695e10, /*mu = */ 7.617e10); - - // @sect4{The public interface} - + + // @sect4{The public interface} + // The next step is the definition of // constructors and descructors. There are // no surprises here: we choose linear and @@ -1048,32 +1040,32 @@ namespace QuasiStaticElasticity template - TopLevel::~TopLevel () + TopLevel::~TopLevel () { dof_handler.clear (); } - - - // The last of the public functions is the - // one that directs all the work, - // run(). It initializes the variables - // that describe where in time we presently - // are, then runs the first time step, then - // loops over all the other time - // steps. Note that for simplicity we use a - // fixed time step, whereas a more - // sophisticated program would of course - // have to choose it in some more + + + // The last of the public functions is the + // one that directs all the work, + // run(). It initializes the variables + // that describe where in time we presently + // are, then runs the first time step, then + // loops over all the other time + // steps. Note that for simplicity we use a + // fixed time step, whereas a more + // sophisticated program would of course + // have to choose it in some more // reasonable way adaptively: template - void TopLevel::run () + void TopLevel::run () { present_time = 0; present_timestep = 1; end_time = 10; timestep_no = 0; - + do_initial_timestep (); while (present_time < end_time) @@ -1081,23 +1073,23 @@ namespace QuasiStaticElasticity } - // @sect4{TopLevel::create_coarse_grid} - - // The next function in the order - // in which they were declared - // above is the one that creates - // the coarse grid from which we - // start. For this example program, - // we want to compute the - // deformation of a cylinder under - // axial compression. The first - // step therefore is to generate a - // mesh for a cylinder of length 3 - // and with inner and outer radii - // of 0.8 and 1, - // respectively. Fortunately, there - // is a library function for such a - // mesh. + // @sect4{TopLevel::create_coarse_grid} + + // The next function in the order + // in which they were declared + // above is the one that creates + // the coarse grid from which we + // start. For this example program, + // we want to compute the + // deformation of a cylinder under + // axial compression. The first + // step therefore is to generate a + // mesh for a cylinder of length 3 + // and with inner and outer radii + // of 0.8 and 1, + // respectively. Fortunately, there + // is a library function for such a + // mesh. // // In a second step, we have to associated // boundary conditions with the upper and @@ -1114,7 +1106,7 @@ namespace QuasiStaticElasticity void TopLevel::create_coarse_grid () { const double inner_radius = 0.8, - outer_radius = 1; + outer_radius = 1; GridGenerator::cylinder_shell (triangulation, 3, inner_radius, outer_radius); for (typename Triangulation::active_cell_iterator @@ -1123,115 +1115,115 @@ namespace QuasiStaticElasticity for (unsigned int f=0; f::faces_per_cell; ++f) if (cell->face(f)->at_boundary()) { - const Point face_center = cell->face(f)->center(); - + const Point face_center = cell->face(f)->center(); + if (face_center[2] == 0) cell->face(f)->set_boundary_indicator (0); else if (face_center[2] == 3) cell->face(f)->set_boundary_indicator (1); else if (std::sqrt(face_center[0]*face_center[0] + - face_center[1]*face_center[1]) - < - (inner_radius + outer_radius) / 2) + face_center[1]*face_center[1]) + < + (inner_radius + outer_radius) / 2) cell->face(f)->set_boundary_indicator (2); - else - cell->face(f)->set_boundary_indicator (3); + else + cell->face(f)->set_boundary_indicator (3); } - // In order to make sure that new - // vertices are placed correctly on mesh - // refinement, we have to associate - // objects describing those parts of the - // boundary that do not consist of - // straight parts. Corresponding to the - // cylinder shell generator function used - // above, there are classes that can be - // used to describe the geometry of - // cylinders. We need to use different - // objects for the inner and outer parts - // of the cylinder, with different radii; - // the second argument to the constructor - // indicates the axis around which the - // cylinder revolves -- in this case the - // z-axis. Note that the boundary objects - // need to live as long as the - // triangulation does; we can achieve - // this by making the objects static, - // which means that they live as long as - // the program runs: + // In order to make sure that new + // vertices are placed correctly on mesh + // refinement, we have to associate + // objects describing those parts of the + // boundary that do not consist of + // straight parts. Corresponding to the + // cylinder shell generator function used + // above, there are classes that can be + // used to describe the geometry of + // cylinders. We need to use different + // objects for the inner and outer parts + // of the cylinder, with different radii; + // the second argument to the constructor + // indicates the axis around which the + // cylinder revolves -- in this case the + // z-axis. Note that the boundary objects + // need to live as long as the + // triangulation does; we can achieve + // this by making the objects static, + // which means that they live as long as + // the program runs: static const CylinderBoundary inner_cylinder (inner_radius, 2); static const CylinderBoundary outer_cylinder (outer_radius, 2); - // We then attach these two objects to - // the triangulation, and make them - // correspond to boundary indicators 2 - // and 3: + // We then attach these two objects to + // the triangulation, and make them + // correspond to boundary indicators 2 + // and 3: triangulation.set_boundary (2, inner_cylinder); triangulation.set_boundary (3, outer_cylinder); - // There's one more thing we have to take - // care of (we should have done so above - // already, but for didactic reasons it - // was more appropriate to handle it - // after discussing boundary - // objects). %Boundary indicators in - // deal.II, for mostly historic reasons, - // serve a dual purpose: they describe - // the type of a boundary for other - // places in a program where different - // boundary conditions are implemented; - // and they describe which boundary - // object (as the ones associated above) - // should be queried when new boundary - // points need to be placed upon mesh - // refinement. In the prefix to this - // function, we have discussed the - // boundary condition issue, and the - // boundary geometry issue was mentioned - // just above. But there is a case where - // we have to be careful with geometry: - // what happens if a cell is refined that - // has two faces with different boundary - // indicators? For example one at the - // edges of the cylinder? In that case, - // the library wouldn't know where to put - // new points in the middle of edges (one - // of the twelve lines of a - // hexahedron). In fact, the library - // doesn't even care about the boundary - // indicator of adjacent faces when - // refining edges: it considers the - // boundary indicators associated with - // the edges themselves. So what do we - // want to happen with the edges of the - // cylinder shell: they sit on both faces - // with boundary indicators 2 or 3 (inner - // or outer shell) and 0 or 1 (for which - // no boundary objects have been - // specified, and for which the library - // therefore assumes straight - // lines). Obviously, we want these lines - // to follow the curved shells, so we - // have to assign all edges along faces - // with boundary indicators 2 or 3 these - // same boundary indicators to make sure - // they are refined using the appropriate - // geometry objects. This is easily done: + // There's one more thing we have to take + // care of (we should have done so above + // already, but for didactic reasons it + // was more appropriate to handle it + // after discussing boundary + // objects). %Boundary indicators in + // deal.II, for mostly historic reasons, + // serve a dual purpose: they describe + // the type of a boundary for other + // places in a program where different + // boundary conditions are implemented; + // and they describe which boundary + // object (as the ones associated above) + // should be queried when new boundary + // points need to be placed upon mesh + // refinement. In the prefix to this + // function, we have discussed the + // boundary condition issue, and the + // boundary geometry issue was mentioned + // just above. But there is a case where + // we have to be careful with geometry: + // what happens if a cell is refined that + // has two faces with different boundary + // indicators? For example one at the + // edges of the cylinder? In that case, + // the library wouldn't know where to put + // new points in the middle of edges (one + // of the twelve lines of a + // hexahedron). In fact, the library + // doesn't even care about the boundary + // indicator of adjacent faces when + // refining edges: it considers the + // boundary indicators associated with + // the edges themselves. So what do we + // want to happen with the edges of the + // cylinder shell: they sit on both faces + // with boundary indicators 2 or 3 (inner + // or outer shell) and 0 or 1 (for which + // no boundary objects have been + // specified, and for which the library + // therefore assumes straight + // lines). Obviously, we want these lines + // to follow the curved shells, so we + // have to assign all edges along faces + // with boundary indicators 2 or 3 these + // same boundary indicators to make sure + // they are refined using the appropriate + // geometry objects. This is easily done: for (typename Triangulation::active_face_iterator face=triangulation.begin_active_face(); face!=triangulation.end_face(); ++face) if (face->at_boundary()) - if ((face->boundary_indicator() == 2) - || - (face->boundary_indicator() == 3)) - for (unsigned int edge = 0; edge::lines_per_face; - ++edge) - face->line(edge) - ->set_boundary_indicator (face->boundary_indicator()); - - // Once all this is done, we can refine - // the mesh once globally: + if ((face->boundary_indicator() == 2) + || + (face->boundary_indicator() == 3)) + for (unsigned int edge = 0; edge::lines_per_face; + ++edge) + face->line(edge) + ->set_boundary_indicator (face->boundary_indicator()); + + // Once all this is done, we can refine + // the mesh once globally: triangulation.refine_global (1); - + // As the final step, we need to // set up a clean state of the @@ -1245,13 +1237,13 @@ namespace QuasiStaticElasticity // the following two function // calls: GridTools::partition_triangulation (n_mpi_processes, triangulation); - setup_quadrature_point_history (); + setup_quadrature_point_history (); } - - // @sect4{TopLevel::setup_system} + + // @sect4{TopLevel::setup_system} // The next function is the one // that sets up the data structures @@ -1282,28 +1274,28 @@ namespace QuasiStaticElasticity dof_handler.distribute_dofs (fe); DoFRenumbering::subdomain_wise (dof_handler); - // The next thing is to store some - // information for later use on how many - // cells or degrees of freedom the - // present processor, or any of the - // processors has to work on. First the - // cells local to this processor... + // The next thing is to store some + // information for later use on how many + // cells or degrees of freedom the + // present processor, or any of the + // processors has to work on. First the + // cells local to this processor... n_local_cells = GridTools::count_cells_with_subdomain_association (triangulation, this_mpi_process); - // ...and then a list of numbers of how - // many degrees of freedom each processor - // has to handle: + // ...and then a list of numbers of how + // many degrees of freedom each processor + // has to handle: local_dofs_per_process.resize (n_mpi_processes); for (unsigned int i=0; iCompressedSparsityPattern class - // here that was already introduced in - // step-11, rather than the - // SparsityPattern class that we have - // used in all other cases. The reason - // for this is that for the latter class - // to work we have to give an initial - // upper bound for the number of entries - // in each row, a task that is - // traditionally done by - // DoFHandler::max_couplings_between_dofs(). However, - // this function suffers from a serious - // problem: it has to compute an upper - // bound to the number of nonzero entries - // in each row, and this is a rather - // complicated task, in particular in - // 3d. In effect, while it is quite - // accurate in 2d, it often comes up with - // much too large a number in 3d, and in - // that case the SparsityPattern - // allocates much too much memory at - // first, often several 100 MBs. This is - // later corrected when - // DoFTools::make_sparsity_pattern is - // called and we realize that we don't - // need all that much memory, but at time - // it is already too late: for large - // problems, the temporary allocation of - // too much memory can lead to - // out-of-memory situations. - // - // In order to avoid this, we resort to - // the CompressedSparsityPattern - // class that is slower but does not - // require any up-front estimate on the - // number of nonzero entries per row. It - // therefore only ever allocates as much - // memory as it needs at any given time, - // and we can build it even for large 3d - // problems. - // - // It is also worth noting that the - // sparsity pattern we construct is - // global, i.e. comprises all degrees of - // freedom whether they will be owned by - // the processor we are on or another one - // (in case this program is run in - // %parallel via MPI). This of course is - // not optimal -- it limits the size of - // the problems we can solve, since - // storing the entire sparsity pattern - // (even if only for a short time) on - // each processor does not scale - // well. However, there are several more - // places in the program in which we do - // this, for example we always keep the - // global triangulation and DoF handler - // objects around, even if we only work - // on part of them. At present, deal.II - // does not have the necessary facilities - // to completely distribute these objects - // (a task that, indeed, is very hard to - // achieve with adaptive meshes, since - // well-balanced subdivisions of a domain - // tend to become unbalanced as the mesh - // is adaptively refined). - // - // With this data structure, we can then - // go to the PETSc sparse matrix and tell - // it to pre-allocate all the entries we - // will later want to write to: + // Note that we have used the + // CompressedSparsityPattern class + // here that was already introduced in + // step-11, rather than the + // SparsityPattern class that we have + // used in all other cases. The reason + // for this is that for the latter class + // to work we have to give an initial + // upper bound for the number of entries + // in each row, a task that is + // traditionally done by + // DoFHandler::max_couplings_between_dofs(). However, + // this function suffers from a serious + // problem: it has to compute an upper + // bound to the number of nonzero entries + // in each row, and this is a rather + // complicated task, in particular in + // 3d. In effect, while it is quite + // accurate in 2d, it often comes up with + // much too large a number in 3d, and in + // that case the SparsityPattern + // allocates much too much memory at + // first, often several 100 MBs. This is + // later corrected when + // DoFTools::make_sparsity_pattern is + // called and we realize that we don't + // need all that much memory, but at time + // it is already too late: for large + // problems, the temporary allocation of + // too much memory can lead to + // out-of-memory situations. + // + // In order to avoid this, we resort to + // the CompressedSparsityPattern + // class that is slower but does not + // require any up-front estimate on the + // number of nonzero entries per row. It + // therefore only ever allocates as much + // memory as it needs at any given time, + // and we can build it even for large 3d + // problems. + // + // It is also worth noting that the + // sparsity pattern we construct is + // global, i.e. comprises all degrees of + // freedom whether they will be owned by + // the processor we are on or another one + // (in case this program is run in + // %parallel via MPI). This of course is + // not optimal -- it limits the size of + // the problems we can solve, since + // storing the entire sparsity pattern + // (even if only for a short time) on + // each processor does not scale + // well. However, there are several more + // places in the program in which we do + // this, for example we always keep the + // global triangulation and DoF handler + // objects around, even if we only work + // on part of them. At present, deal.II + // does not have the necessary facilities + // to completely distribute these objects + // (a task that, indeed, is very hard to + // achieve with adaptive meshes, since + // well-balanced subdivisions of a domain + // tend to become unbalanced as the mesh + // is adaptively refined). + // + // With this data structure, we can then + // go to the PETSc sparse matrix and tell + // it to pre-allocate all the entries we + // will later want to write to: system_matrix.reinit (mpi_communicator, sparsity_pattern, local_dofs_per_process, local_dofs_per_process, this_mpi_process); - // After this point, no further explicit - // knowledge of the sparsity pattern is - // required any more and we can let the - // sparsity_pattern variable go out - // of scope without any problem. - - // The last task in this function - // is then only to reset the - // right hand side vector as well - // as the solution vector to its - // correct size; remember that - // the solution vector is a local - // one, unlike the right hand - // side that is a distributed - // %parallel one and therefore - // needs to know the MPI - // communicator over which it is - // supposed to transmit messages: + // After this point, no further explicit + // knowledge of the sparsity pattern is + // required any more and we can let the + // sparsity_pattern variable go out + // of scope without any problem. + + // The last task in this function + // is then only to reset the + // right hand side vector as well + // as the solution vector to its + // correct size; remember that + // the solution vector is a local + // one, unlike the right hand + // side that is a distributed + // %parallel one and therefore + // needs to know the MPI + // communicator over which it is + // supposed to transmit messages: system_rhs.reinit (mpi_communicator, dof_handler.n_dofs(), n_local_dofs); incremental_displacement.reinit (dof_handler.n_dofs()); } - // @sect4{TopLevel::assemble_system} - - // Again, assembling the system - // matrix and right hand side - // follows the same structure as in - // many example programs before. In - // particular, it is mostly - // equivalent to step-17, except - // for the different right hand - // side that now only has to take - // into account internal - // stresses. In addition, - // assembling the matrix is made - // significantly more transparent - // by using the SymmetricTensor - // class: note the elegance of - // forming the scalar products of - // symmetric tensors of rank 2 and - // 4. The implementation is also - // more general since it is - // independent of the fact that we - // may or may not be using an - // isotropic elasticity tensor. - // - // The first part of the assembly routine - // is as always: + // @sect4{TopLevel::assemble_system} + + // Again, assembling the system + // matrix and right hand side + // follows the same structure as in + // many example programs before. In + // particular, it is mostly + // equivalent to step-17, except + // for the different right hand + // side that now only has to take + // into account internal + // stresses. In addition, + // assembling the matrix is made + // significantly more transparent + // by using the SymmetricTensor + // class: note the elegance of + // forming the scalar products of + // symmetric tensors of rank 2 and + // 4. The implementation is also + // more general since it is + // independent of the fact that we + // may or may not be using an + // isotropic elasticity tensor. + // + // The first part of the assembly routine + // is as always: template - void TopLevel::assemble_system () + void TopLevel::assemble_system () { system_rhs = 0; system_matrix = 0; - FEValues fe_values (fe, quadrature_formula, + FEValues fe_values (fe, quadrature_formula, update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_quadrature_points | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -1494,11 +1486,11 @@ namespace QuasiStaticElasticity BodyForce body_force; std::vector > body_force_values (n_q_points, - Vector(dim)); + Vector(dim)); - // As in step-17, we only need to loop - // over all cells that belong to the - // present processor: + // As in step-17, we only need to loop + // over all cells that belong to the + // present processor: typename DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), endc = dof_handler.end(); @@ -1510,25 +1502,25 @@ namespace QuasiStaticElasticity fe_values.reinit (cell); - // Then loop over all indices i,j - // and quadrature points and - // assemble the system matrix - // contributions from this cell. - // Note how we extract the - // symmetric gradients (strains) of - // the shape functions at a given - // quadrature point from the - // FEValues object, and the - // elegance with which we form the - // triple contraction eps_phi_i : - // C : eps_phi_j; the latter - // needs to be compared to the - // clumsy computations needed in - // step-17, both in the - // introduction as well as in the - // respective place in the program: + // Then loop over all indices i,j + // and quadrature points and + // assemble the system matrix + // contributions from this cell. + // Note how we extract the + // symmetric gradients (strains) of + // the shape functions at a given + // quadrature point from the + // FEValues object, and the + // elegance with which we form the + // triple contraction eps_phi_i : + // C : eps_phi_j; the latter + // needs to be compared to the + // clumsy computations needed in + // step-17, both in the + // introduction as well as in the + // respective place in the program: for (unsigned int i=0; i *local_quadrature_points_data - = reinterpret_cast*>(cell->user_pointer()); - // In addition, we need the values - // of the external body forces at - // the quadrature points on this - // cell: - body_force.vector_value_list (fe_values.get_quadrature_points(), - body_force_values); - // Then we can loop over all - // degrees of freedom on this cell - // and compute local contributions - // to the right hand side: - for (unsigned int i=0; i *local_quadrature_points_data + = reinterpret_cast*>(cell->user_pointer()); + // In addition, we need the values + // of the external body forces at + // the quadrature points on this + // cell: + body_force.vector_value_list (fe_values.get_quadrature_points(), + body_force_values); + // Then we can loop over all + // degrees of freedom on this cell + // and compute local contributions + // to the right hand side: + for (unsigned int i=0; i &old_stress = local_quadrature_points_data[q_point].old_stress; - + cell_rhs(i) += (body_force_values[q_point](component_i) * fe_values.shape_value (i,q_point) - - old_stress * + old_stress * get_strain (fe_values,i,q_point)) - * + * fe_values.JxW (q_point); } } @@ -1596,7 +1588,7 @@ namespace QuasiStaticElasticity // done exactly as in step-17: cell->get_dof_indices (local_dof_indices); - hanging_node_constraints + hanging_node_constraints .distribute_local_to_global (cell_matrix, local_dof_indices, system_matrix); @@ -1627,7 +1619,7 @@ namespace QuasiStaticElasticity // vector in the form of a // temporary vector which we then // copy into the sequential one. - + // We make up for this // complication by showing how // boundary values can be used @@ -1695,46 +1687,46 @@ namespace QuasiStaticElasticity std::map boundary_values; VectorTools:: interpolate_boundary_values (dof_handler, - 0, - ZeroFunction (dim), - boundary_values); + 0, + ZeroFunction (dim), + boundary_values); VectorTools:: interpolate_boundary_values (dof_handler, - 1, - IncrementalBoundaryValues(present_time, - present_timestep), - boundary_values, + 1, + IncrementalBoundaryValues(present_time, + present_timestep), + boundary_values, z_component); - + PETScWrappers::MPI::Vector tmp (mpi_communicator, dof_handler.n_dofs(), n_local_dofs); MatrixTools::apply_boundary_values (boundary_values, - system_matrix, tmp, - system_rhs, false); + system_matrix, tmp, + system_rhs, false); incremental_displacement = tmp; } - // @sect4{TopLevel::solve_timestep} + // @sect4{TopLevel::solve_timestep} - // The next function is the one that - // controls what all has to happen within a - // timestep. The order of things should be - // relatively self-explanatory from the - // function names: + // The next function is the one that + // controls what all has to happen within a + // timestep. The order of things should be + // relatively self-explanatory from the + // function names: template void TopLevel::solve_timestep () { pcout << " Assembling system..." << std::flush; assemble_system (); pcout << " norm of rhs is " << system_rhs.l2_norm() - << std::endl; - + << std::endl; + const unsigned int n_iterations = solve_linear_problem (); - + pcout << " Solver converged in " << n_iterations - << " iterations." << std::endl; + << " iterations." << std::endl; pcout << " Updating quadrature point data..." << std::flush; update_quadrature_point_history (); @@ -1743,43 +1735,43 @@ namespace QuasiStaticElasticity - // @sect4{TopLevel::solve_linear_problem} - - // Solving the linear system again - // works mostly as before. The only - // difference is that we want to - // only keep a complete local copy - // of the solution vector instead - // of the distributed one that we - // get as output from PETSc's - // solver routines. To this end, we - // declare a local temporary - // variable for the distributed - // vector and initialize it with - // the contents of the local - // variable (remember that the - // apply_boundary_values - // function called in - // assemble_system preset the - // values of boundary nodes in this - // vector), solve with it, and at - // the end of the function copy it - // again into the complete local - // vector that we declared as a - // member variable. Hanging node - // constraints are then distributed - // only on the local copy, - // i.e. independently of each other - // on each of the processors: + // @sect4{TopLevel::solve_linear_problem} + + // Solving the linear system again + // works mostly as before. The only + // difference is that we want to + // only keep a complete local copy + // of the solution vector instead + // of the distributed one that we + // get as output from PETSc's + // solver routines. To this end, we + // declare a local temporary + // variable for the distributed + // vector and initialize it with + // the contents of the local + // variable (remember that the + // apply_boundary_values + // function called in + // assemble_system preset the + // values of boundary nodes in this + // vector), solve with it, and at + // the end of the function copy it + // again into the complete local + // vector that we declared as a + // member variable. Hanging node + // constraints are then distributed + // only on the local copy, + // i.e. independently of each other + // on each of the processors: template - unsigned int TopLevel::solve_linear_problem () + unsigned int TopLevel::solve_linear_problem () { PETScWrappers::MPI::Vector distributed_incremental_displacement (mpi_communicator, dof_handler.n_dofs(), n_local_dofs); distributed_incremental_displacement = incremental_displacement; - + SolverControl solver_control (dof_handler.n_dofs(), 1e-16*system_rhs.l2_norm()); PETScWrappers::SolverCG cg (solver_control, @@ -1793,7 +1785,7 @@ namespace QuasiStaticElasticity incremental_displacement = distributed_incremental_displacement; hanging_node_constraints.distribute (incremental_displacement); - + return solver_control.last_step(); } @@ -1812,63 +1804,63 @@ namespace QuasiStaticElasticity // file in any of the supported // output files, as mentioned in // the introduction. - // - // The crucial part of this function is to - // give the DataOut class a way to only - // work on the cells that the present - // process owns. This class is already - // well-equipped for that: it has two - // virtual functions first_cell and - // next_cell that return the first cell - // to be worked on, and given one cell - // return the next cell to be worked on. By - // default, these functions return the - // first active cell (i.e. the first one - // that has no children) and the next - // active cell. What we have to do here is - // derive a class from DataOut that - // overloads these two functions to only - // iterate over those cells with the right - // subdomain indicator. - // - // We do this at the beginning of this - // function. The first_cell function - // just starts with the first active cell, - // and then iterates to the next cells - // while the cell presently under - // consideration does not yet have the - // correct subdomain id. The only thing - // that needs to be taken care of is that - // we don't try to keep iterating when we - // have hit the end iterator. - // - // The next_cell function could be - // implemented in a similar way. However, - // we use this occasion as a pretext to - // introduce one more thing that the - // library offers: filtered - // iterators. These are wrappers for the - // iterator classes that just skip all - // cells (or faces, lines, etc) that do not - // satisfy a certain predicate (a predicate - // in computer-lingo is a function that - // when applied to a data element either - // returns true or false). In the present - // case, the predicate is that the cell has - // to have a certain subdomain id, and the - // library already has this predicate built - // in. If the cell iterator is not the end - // iterator, what we then have to do is to - // initialize such a filtered iterator with - // the present cell and the predicate, and - // then increase the iterator exactly - // once. While the more conventional loop - // would probably not have been much - // longer, this is definitely the more - // elegant way -- and then, these example - // programs also serve the purpose of - // introducing what is available in - // deal.II. + // + // The crucial part of this function is to + // give the DataOut class a way to only + // work on the cells that the present + // process owns. This class is already + // well-equipped for that: it has two + // virtual functions first_cell and + // next_cell that return the first cell + // to be worked on, and given one cell + // return the next cell to be worked on. By + // default, these functions return the + // first active cell (i.e. the first one + // that has no children) and the next + // active cell. What we have to do here is + // derive a class from DataOut that + // overloads these two functions to only + // iterate over those cells with the right + // subdomain indicator. + // + // We do this at the beginning of this + // function. The first_cell function + // just starts with the first active cell, + // and then iterates to the next cells + // while the cell presently under + // consideration does not yet have the + // correct subdomain id. The only thing + // that needs to be taken care of is that + // we don't try to keep iterating when we + // have hit the end iterator. + // + // The next_cell function could be + // implemented in a similar way. However, + // we use this occasion as a pretext to + // introduce one more thing that the + // library offers: filtered + // iterators. These are wrappers for the + // iterator classes that just skip all + // cells (or faces, lines, etc) that do not + // satisfy a certain predicate (a predicate + // in computer-lingo is a function that + // when applied to a data element either + // returns true or false). In the present + // case, the predicate is that the cell has + // to have a certain subdomain id, and the + // library already has this predicate built + // in. If the cell iterator is not the end + // iterator, what we then have to do is to + // initialize such a filtered iterator with + // the present cell and the predicate, and + // then increase the iterator exactly + // once. While the more conventional loop + // would probably not have been much + // longer, this is definitely the more + // elegant way -- and then, these example + // programs also serve the purpose of + // introducing what is available in + // deal.II. template class FilteredDataOut : public DataOut { @@ -1877,7 +1869,7 @@ namespace QuasiStaticElasticity : subdomain_id (subdomain_id) {} - + virtual typename DoFHandler::cell_iterator first_cell () { @@ -1886,10 +1878,10 @@ namespace QuasiStaticElasticity while ((cell != this->dofs->end()) && (cell->subdomain_id() != subdomain_id)) ++cell; - + return cell; } - + virtual typename DoFHandler::cell_iterator next_cell (const typename DoFHandler::cell_iterator &old_cell) { @@ -1897,7 +1889,7 @@ namespace QuasiStaticElasticity { const IteratorFilters::SubdomainEqualTo predicate(subdomain_id); - + return ++(FilteredIterator ::active_cell_iterator> @@ -1906,35 +1898,35 @@ namespace QuasiStaticElasticity else return old_cell; } - + private: const unsigned int subdomain_id; }; - + template void TopLevel::output_results () const { - // With this newly defined class, declare - // an object that is going to generate - // the graphical output and attach the - // dof handler with it from which to get - // the solution vector: + // With this newly defined class, declare + // an object that is going to generate + // the graphical output and attach the + // dof handler with it from which to get + // the solution vector: FilteredDataOut data_out(this_mpi_process); data_out.attach_dof_handler (dof_handler); - // Then, just as in step-17, define the - // names of solution variables (which - // here are the displacement increments) - // and queue the solution vector for - // output. Note in the following switch - // how we make sure that if the space - // dimension should be unhandled that we - // throw an exception saying that we - // haven't implemented this case yet - // (another case of defensive - // programming): + // Then, just as in step-17, define the + // names of solution variables (which + // here are the displacement increments) + // and queue the solution vector for + // output. Note in the following switch + // how we make sure that if the space + // dimension should be unhandled that we + // throw an exception saying that we + // haven't implemented this case yet + // (another case of defensive + // programming): std::vector solution_names; switch (dim) { @@ -1958,74 +1950,74 @@ namespace QuasiStaticElasticity solution_names); - // The next thing is that we wanted to - // output something like the average norm - // of the stresses that we have stored in - // each cell. This may seem complicated, - // since on the present processor we only - // store the stresses in quadrature - // points on those cells that actually - // belong to the present process. In - // other words, it seems as if we can't - // compute the average stresses for all - // cells. However, remember that our - // class derived from DataOut only - // iterates over those cells that - // actually do belong to the present - // processor, i.e. we don't have to - // compute anything for all the other - // cells as this information would not be - // touched. The following little loop - // does this. We enclose the entire block - // into a pair of braces to make sure - // that the iterator variables do not - // remain accidentally visible beyond the - // end of the block in which they are - // used: + // The next thing is that we wanted to + // output something like the average norm + // of the stresses that we have stored in + // each cell. This may seem complicated, + // since on the present processor we only + // store the stresses in quadrature + // points on those cells that actually + // belong to the present process. In + // other words, it seems as if we can't + // compute the average stresses for all + // cells. However, remember that our + // class derived from DataOut only + // iterates over those cells that + // actually do belong to the present + // processor, i.e. we don't have to + // compute anything for all the other + // cells as this information would not be + // touched. The following little loop + // does this. We enclose the entire block + // into a pair of braces to make sure + // that the iterator variables do not + // remain accidentally visible beyond the + // end of the block in which they are + // used: Vector norm_of_stress (triangulation.n_active_cells()); { - // Loop over all the cells... + // Loop over all the cells... typename Triangulation::active_cell_iterator - cell = triangulation.begin_active(), - endc = triangulation.end(); + cell = triangulation.begin_active(), + endc = triangulation.end(); for (unsigned int index=0; cell!=endc; ++cell, ++index) - // ... and pick those that are - // relevant to us: - if (cell->subdomain_id() == this_mpi_process) - { - // On these cells, add up the - // stresses over all quadrature - // points... - SymmetricTensor<2,dim> accumulated_stress; - for (unsigned int q=0; - q*>(cell->user_pointer())[q] - .old_stress; - - // ...then write the norm of the - // average to their destination: - norm_of_stress(index) - = (accumulated_stress / - quadrature_formula.size()).norm(); - } - // And on the cells that we are not - // interested in, set the respective - // value in the vector to a bogus value - // (norms must be positive, and a large - // negative value should catch your - // eye) in order to make sure that if - // we were somehow wrong about our - // assumption that these elements would - // not appear in the output file, that - // we would find out by looking at the - // graphical output: - else - norm_of_stress(index) = -1e+20; + // ... and pick those that are + // relevant to us: + if (cell->subdomain_id() == this_mpi_process) + { + // On these cells, add up the + // stresses over all quadrature + // points... + SymmetricTensor<2,dim> accumulated_stress; + for (unsigned int q=0; + q*>(cell->user_pointer())[q] + .old_stress; + + // ...then write the norm of the + // average to their destination: + norm_of_stress(index) + = (accumulated_stress / + quadrature_formula.size()).norm(); + } + // And on the cells that we are not + // interested in, set the respective + // value in the vector to a bogus value + // (norms must be positive, and a large + // negative value should catch your + // eye) in order to make sure that if + // we were somehow wrong about our + // assumption that these elements would + // not appear in the output file, that + // we would find out by looking at the + // graphical output: + else + norm_of_stress(index) = -1e+20; } - // Finally attach this vector as well to - // be treated for output: + // Finally attach this vector as well to + // be treated for output: data_out.add_data_vector (norm_of_stress, "norm_of_stress"); // As a last piece of data, let @@ -2051,7 +2043,7 @@ namespace QuasiStaticElasticity // vectors: data_out.build_patches (); - + // Now that we have generated the // intermediate format, let us // determine the name of the file @@ -2131,37 +2123,37 @@ namespace QuasiStaticElasticity data_out.write_deal_II_intermediate (output); } - - - // @sect4{TopLevel::do_initial_timestep} - - // This and the next function handle the - // overall structure of the first and - // following timesteps, respectively. The - // first timestep is slightly more involved - // because we want to compute it multiple - // times on successively refined meshes, - // each time starting from a clean - // state. At the end of these computations, - // in which we compute the incremental - // displacements each time, we use the last - // results obtained for the incremental - // displacements to compute the resulting - // stress updates and move the mesh - // accordingly. On this new mesh, we then - // output the solution and any additional - // data we consider important. - // - // All this is interspersed by generating - // output to the console to update the - // person watching the screen on what is - // going on. As in step-17, the use of - // pcout instead of std::cout makes - // sure that only one of the parallel - // processes is actually writing to the - // console, without having to explicitly - // code an if-statement in each place where - // we generate output: + + + // @sect4{TopLevel::do_initial_timestep} + + // This and the next function handle the + // overall structure of the first and + // following timesteps, respectively. The + // first timestep is slightly more involved + // because we want to compute it multiple + // times on successively refined meshes, + // each time starting from a clean + // state. At the end of these computations, + // in which we compute the incremental + // displacements each time, we use the last + // results obtained for the incremental + // displacements to compute the resulting + // stress updates and move the mesh + // accordingly. On this new mesh, we then + // output the solution and any additional + // data we consider important. + // + // All this is interspersed by generating + // output to the console to update the + // person watching the screen on what is + // going on. As in step-17, the use of + // pcout instead of std::cout makes + // sure that only one of the parallel + // processes is actually writing to the + // console, without having to explicitly + // code an if-statement in each place where + // we generate output: template void TopLevel::do_initial_timestep () { @@ -2169,7 +2161,7 @@ namespace QuasiStaticElasticity ++timestep_no; pcout << "Timestep " << timestep_no << " at time " << present_time << std::endl; - + for (unsigned int cycle=0; cycle<2; ++cycle) { pcout << " Cycle " << cycle << ':' << std::endl; @@ -2208,14 +2200,14 @@ namespace QuasiStaticElasticity pcout << std::endl; } - - // @sect4{TopLevel::do_timestep} - // Subsequent timesteps are simpler, and - // probably do not require any more - // documentation given the explanations for - // the previous function above: + // @sect4{TopLevel::do_timestep} + + // Subsequent timesteps are simpler, and + // probably do not require any more + // documentation given the explanations for + // the previous function above: template void TopLevel::do_timestep () { @@ -2229,7 +2221,7 @@ namespace QuasiStaticElasticity present_time = end_time; } - + solve_timestep (); move_mesh (); @@ -2239,8 +2231,8 @@ namespace QuasiStaticElasticity } - // @sect4{TopLevel::refine_initial_grid} - + // @sect4{TopLevel::refine_initial_grid} + // The following function is called when // solving the first time step on // successively refined meshes. After each @@ -2251,8 +2243,8 @@ namespace QuasiStaticElasticity template void TopLevel::refine_initial_grid () { - // First, let each process compute error - // indicators for the cells it owns: + // First, let each process compute error + // indicators for the cells it owns: Vector error_per_cell (triangulation.n_active_cells()); KellyErrorEstimator::estimate (dof_handler, QGauss(2), @@ -2264,174 +2256,174 @@ namespace QuasiStaticElasticity multithread_info.n_default_threads, this_mpi_process); - // Then set up a global vector into which - // we merge the local indicators from - // each of the %parallel processes: + // Then set up a global vector into which + // we merge the local indicators from + // each of the %parallel processes: const unsigned int n_local_cells = GridTools::count_cells_with_subdomain_association (triangulation, this_mpi_process); PETScWrappers::MPI::Vector distributed_error_per_cell (mpi_communicator, - triangulation.n_active_cells(), - n_local_cells); - + triangulation.n_active_cells(), + n_local_cells); + for (unsigned int i=0; icell-@>vertex_dof_index(v,d) function - // that returns the index of the dth - // degree of freedom at vertex v of the - // given cell. In the present case, - // displacement in the k-th coordinate - // direction corresonds to the kth - // component of the finite element. Using a - // function like this bears a certain risk, - // because it uses knowledge of the order - // of elements that we have taken together - // for this program in the FESystem - // element. If we decided to add an - // additional variable, for example a - // pressure variable for stabilization, and - // happened to insert it as the first - // variable of the element, then the - // computation below will start to produce - // non-sensical results. In addition, this - // computation rests on other assumptions: - // first, that the element we use has, - // indeed, degrees of freedom that are - // associated with vertices. This is indeed - // the case for the present Q1 element, as - // would be for all Qp elements of - // polynomial order p. However, it - // would not hold for discontinuous - // elements, or elements for mixed - // formulations. Secondly, it also rests on - // the assumption that the displacement at - // a vertex is determined solely by the - // value of the degree of freedom - // associated with this vertex; in other - // words, all shape functions corresponding - // to other degrees of freedom are zero at - // this particular vertex. Again, this is - // the case for the present element, but is - // not so for all elements that are - // presently available in deal.II. Despite - // its risks, we choose to use this way in - // order to present a way to query - // individual degrees of freedom associated - // with vertices. - // - // In this context, it is instructive to - // point out what a more general way would - // be. For general finite elements, the way - // to go would be to take a quadrature - // formula with the quadrature points in - // the vertices of a cell. The QTrapez - // formula for the trapezoidal rule does - // exactly this. With this quadrature - // formula, we would then initialize an - // FEValues object in each cell, and - // use the - // FEValues::get_function_values - // function to obtain the values of the - // solution function in the quadrature - // points, i.e. the vertices of the - // cell. These are the only values that we - // really need, i.e. we are not at all - // interested in the weights (or the - // JxW values) associated with this - // particular quadrature formula, and this - // can be specified as the last argument in - // the constructor to FEValues. The - // only point of minor inconvenience in - // this scheme is that we have to figure - // out which quadrature point corresponds - // to the vertex we consider at present, as - // they may or may not be ordered in the - // same order. - // - // Another point worth explaining about - // this short function is the way in which - // the triangulation class exports - // information about its vertices: through - // the Triangulation::n_vertices - // function, it advertises how many - // vertices there are in the - // triangulation. Not all of them are - // actually in use all the time -- some are - // left-overs from cells that have been - // coarsened previously and remain in - // existence since deal.II never changes - // the number of a vertex once it has come - // into existence, even if vertices with - // lower number go away. Secondly, the - // location returned by cell-@>vertex(v) - // is not only a read-only object of type - // Point@, but in fact a reference - // that can be written to. This allows to - // move around the nodes of a mesh with - // relative ease, but it is worth pointing - // out that it is the responsibility of an - // application program using this feature - // to make sure that the resulting cells - // are still useful, i.e. are not distorted - // so much that the cell is degenerated - // (indicated, for example, by negative - // Jacobians). Note that we do not have any - // provisions in this function to actually - // ensure this, we just have faith. - // - // After this lengthy introduction, here - // are the full 20 or so lines of code: + + + + // @sect4{TopLevel::move_mesh} + + // At the end of each time step, we move + // the nodes of the mesh according to the + // incremental displacements computed in + // this time step. To do this, we keep a + // vector of flags that indicate for each + // vertex whether we have already moved it + // around, and then loop over all cells and + // move those vertices of the cell that + // have not been moved yet. It is worth + // noting that it does not matter from + // which of the cells adjacent to a vertex + // we move this vertex: since we compute + // the displacement using a continuous + // finite element, the displacement field + // is continuous as well and we can compute + // the displacement of a given vertex from + // each of the adjacent cells. We only have + // to make sure that we move each node + // exactly once, which is why we keep the + // vector of flags. + // + // There are two noteworthy things in this + // function. First, how we get the + // displacement field at a given vertex + // using the + // cell-@>vertex_dof_index(v,d) function + // that returns the index of the dth + // degree of freedom at vertex v of the + // given cell. In the present case, + // displacement in the k-th coordinate + // direction corresonds to the kth + // component of the finite element. Using a + // function like this bears a certain risk, + // because it uses knowledge of the order + // of elements that we have taken together + // for this program in the FESystem + // element. If we decided to add an + // additional variable, for example a + // pressure variable for stabilization, and + // happened to insert it as the first + // variable of the element, then the + // computation below will start to produce + // non-sensical results. In addition, this + // computation rests on other assumptions: + // first, that the element we use has, + // indeed, degrees of freedom that are + // associated with vertices. This is indeed + // the case for the present Q1 element, as + // would be for all Qp elements of + // polynomial order p. However, it + // would not hold for discontinuous + // elements, or elements for mixed + // formulations. Secondly, it also rests on + // the assumption that the displacement at + // a vertex is determined solely by the + // value of the degree of freedom + // associated with this vertex; in other + // words, all shape functions corresponding + // to other degrees of freedom are zero at + // this particular vertex. Again, this is + // the case for the present element, but is + // not so for all elements that are + // presently available in deal.II. Despite + // its risks, we choose to use this way in + // order to present a way to query + // individual degrees of freedom associated + // with vertices. + // + // In this context, it is instructive to + // point out what a more general way would + // be. For general finite elements, the way + // to go would be to take a quadrature + // formula with the quadrature points in + // the vertices of a cell. The QTrapez + // formula for the trapezoidal rule does + // exactly this. With this quadrature + // formula, we would then initialize an + // FEValues object in each cell, and + // use the + // FEValues::get_function_values + // function to obtain the values of the + // solution function in the quadrature + // points, i.e. the vertices of the + // cell. These are the only values that we + // really need, i.e. we are not at all + // interested in the weights (or the + // JxW values) associated with this + // particular quadrature formula, and this + // can be specified as the last argument in + // the constructor to FEValues. The + // only point of minor inconvenience in + // this scheme is that we have to figure + // out which quadrature point corresponds + // to the vertex we consider at present, as + // they may or may not be ordered in the + // same order. + // + // Another point worth explaining about + // this short function is the way in which + // the triangulation class exports + // information about its vertices: through + // the Triangulation::n_vertices + // function, it advertises how many + // vertices there are in the + // triangulation. Not all of them are + // actually in use all the time -- some are + // left-overs from cells that have been + // coarsened previously and remain in + // existence since deal.II never changes + // the number of a vertex once it has come + // into existence, even if vertices with + // lower number go away. Secondly, the + // location returned by cell-@>vertex(v) + // is not only a read-only object of type + // Point@, but in fact a reference + // that can be written to. This allows to + // move around the nodes of a mesh with + // relative ease, but it is worth pointing + // out that it is the responsibility of an + // application program using this feature + // to make sure that the resulting cells + // are still useful, i.e. are not distorted + // so much that the cell is degenerated + // (indicated, for example, by negative + // Jacobians). Note that we do not have any + // provisions in this function to actually + // ensure this, we just have faith. + // + // After this lengthy introduction, here + // are the full 20 or so lines of code: template void TopLevel::move_mesh () { @@ -2446,72 +2438,72 @@ namespace QuasiStaticElasticity if (vertex_touched[cell->vertex_index(v)] == false) { vertex_touched[cell->vertex_index(v)] = true; - + Point vertex_displacement; for (unsigned int d=0; dvertex_dof_index(v,d)); - + cell->vertex(v) += vertex_displacement; } } - // @sect4{TopLevel::setup_quadrature_point_history} - - // At the beginning of our computations, we - // needed to set up initial values of the - // history variables, such as the existing - // stresses in the material, that we store - // in each quadrature point. As mentioned - // above, we use the user_pointer for - // this that is available in each cell. - // - // To put this into larger perspective, we - // note that if we had previously available - // stresses in our model (which we assume - // do not exist for the purpose of this - // program), then we would need to - // interpolate the field of pre-existing - // stresses to the quadrature - // points. Likewise, if we were to simulate - // elasto-plastic materials with - // hardening/softening, then we would have - // to store additional history variables - // like the present yield stress of the - // accumulated plastic strains in each - // quadrature points. Pre-existing - // hardening or weakening would then be - // implemented by interpolating these - // variables in the present function as - // well. + // @sect4{TopLevel::setup_quadrature_point_history} + + // At the beginning of our computations, we + // needed to set up initial values of the + // history variables, such as the existing + // stresses in the material, that we store + // in each quadrature point. As mentioned + // above, we use the user_pointer for + // this that is available in each cell. + // + // To put this into larger perspective, we + // note that if we had previously available + // stresses in our model (which we assume + // do not exist for the purpose of this + // program), then we would need to + // interpolate the field of pre-existing + // stresses to the quadrature + // points. Likewise, if we were to simulate + // elasto-plastic materials with + // hardening/softening, then we would have + // to store additional history variables + // like the present yield stress of the + // accumulated plastic strains in each + // quadrature points. Pre-existing + // hardening or weakening would then be + // implemented by interpolating these + // variables in the present function as + // well. template void TopLevel::setup_quadrature_point_history () { - // What we need to do here is to first - // count how many quadrature points are - // within the responsibility of this - // processor. This, of course, equals the - // number of cells that belong to this - // processor times the number of - // quadrature points our quadrature - // formula has on each cell. - // - // For good measure, we also set all user - // pointers of all cells, whether ours of - // not, to the null pointer. This way, if - // we ever access the user pointer of a - // cell which we should not have - // accessed, a segmentation fault will - // let us know that this should not have - // happened: + // What we need to do here is to first + // count how many quadrature points are + // within the responsibility of this + // processor. This, of course, equals the + // number of cells that belong to this + // processor times the number of + // quadrature points our quadrature + // formula has on each cell. + // + // For good measure, we also set all user + // pointers of all cells, whether ours of + // not, to the null pointer. This way, if + // we ever access the user pointer of a + // cell which we should not have + // accessed, a segmentation fault will + // let us know that this should not have + // happened: unsigned int our_cells = 0; for (typename Triangulation::active_cell_iterator cell = triangulation.begin_active(); cell != triangulation.end(); ++cell) if (cell->subdomain_id() == this_mpi_process) ++our_cells; - + triangulation.clear_user_data(); // Next, allocate as many quadrature @@ -2560,21 +2552,21 @@ namespace QuasiStaticElasticity history_index += quadrature_formula.size(); } - // At the end, for good measure make sure - // that our count of elements was correct - // and that we have both used up all - // objects we allocated previously, and - // not point to any objects beyond the - // end of the vector. Such defensive - // programming strategies are always good - // checks to avoid accidental errors and - // to guard against future changes to - // this function that forget to update - // all uses of a variable at the same - // time. Recall that constructs using the - // Assert macro are optimized away in - // optimized mode, so do not affect the - // run time of optimized runs: + // At the end, for good measure make sure + // that our count of elements was correct + // and that we have both used up all + // objects we allocated previously, and + // not point to any objects beyond the + // end of the vector. Such defensive + // programming strategies are always good + // checks to avoid accidental errors and + // to guard against future changes to + // this function that forget to update + // all uses of a variable at the same + // time. Recall that constructs using the + // Assert macro are optimized away in + // optimized mode, so do not affect the + // run time of optimized runs: Assert (history_index == quadrature_point_history.size(), ExcInternalError()); } @@ -2582,113 +2574,113 @@ namespace QuasiStaticElasticity - // @sect4{TopLevel::update_quadrature_point_history} - - // At the end of each time step, we - // should have computed an - // incremental displacement update - // so that the material in its new - // configuration accomodates for - // the difference between the - // external body and boundary - // forces applied during this time - // step minus the forces exerted - // through pre-existing internal - // stresses. In order to have the - // pre-existing stresses available - // at the next time step, we - // therefore have to update the - // pre-existing stresses with the - // stresses due to the incremental - // displacement computed during the - // present time step. Ideally, the - // resulting sum of internal - // stresses would exactly counter - // all external forces. Indeed, a - // simple experiment can make sure - // that this is so: if we choose - // boundary conditions and body - // forces to be time independent, - // then the forcing terms (the sum - // of external forces and internal - // stresses) should be exactly - // zero. If you make this - // experiment, you will realize - // from the output of the norm of - // the right hand side in each time - // step that this is almost the - // case: it is not exactly zero, - // since in the first time step the - // incremental displacement and - // stress updates were computed - // relative to the undeformed mesh, - // which was then deformed. In the - // second time step, we again - // compute displacement and stress - // updates, but this time in the - // deformed mesh -- there, the - // resulting updates are very small - // but not quite zero. This can be - // iterated, and in each such - // iteration the residual, i.e. the - // norm of the right hand side - // vector, is reduced; if one makes - // this little experiment, one - // realizes that the norm of this - // residual decays exponentially - // with the number of iterations, - // and after an initial very rapid - // decline is reduced by roughly a - // factor of about 3.5 in each - // iteration (for one testcase I - // looked at, other testcases, and - // other numbers of unknowns change - // the factor, but not the - // exponential decay). - - // In a sense, this can then be considered - // as a quasi-timestepping scheme to - // resolve the nonlinear problem of solving - // large-deformation elasticity on a mesh - // that is moved along in a Lagrangian - // manner. - // - // Another complication is that the - // existing (old) stresses are defined on - // the old mesh, which we will move around - // after updating the stresses. If this - // mesh update involves rotations of the - // cell, then we need to also rotate the - // updated stress, since it was computed - // relative to the coordinate system of the - // old cell. - // - // Thus, what we need is the following: on - // each cell which the present processor - // owns, we need to extract the old stress - // from the data stored with each - // quadrature point, compute the stress - // update, add the two together, and then - // rotate the result together with the - // incremental rotation computed from the - // incremental displacement at the present - // quadrature point. We will detail these - // steps below: + // @sect4{TopLevel::update_quadrature_point_history} + + // At the end of each time step, we + // should have computed an + // incremental displacement update + // so that the material in its new + // configuration accomodates for + // the difference between the + // external body and boundary + // forces applied during this time + // step minus the forces exerted + // through pre-existing internal + // stresses. In order to have the + // pre-existing stresses available + // at the next time step, we + // therefore have to update the + // pre-existing stresses with the + // stresses due to the incremental + // displacement computed during the + // present time step. Ideally, the + // resulting sum of internal + // stresses would exactly counter + // all external forces. Indeed, a + // simple experiment can make sure + // that this is so: if we choose + // boundary conditions and body + // forces to be time independent, + // then the forcing terms (the sum + // of external forces and internal + // stresses) should be exactly + // zero. If you make this + // experiment, you will realize + // from the output of the norm of + // the right hand side in each time + // step that this is almost the + // case: it is not exactly zero, + // since in the first time step the + // incremental displacement and + // stress updates were computed + // relative to the undeformed mesh, + // which was then deformed. In the + // second time step, we again + // compute displacement and stress + // updates, but this time in the + // deformed mesh -- there, the + // resulting updates are very small + // but not quite zero. This can be + // iterated, and in each such + // iteration the residual, i.e. the + // norm of the right hand side + // vector, is reduced; if one makes + // this little experiment, one + // realizes that the norm of this + // residual decays exponentially + // with the number of iterations, + // and after an initial very rapid + // decline is reduced by roughly a + // factor of about 3.5 in each + // iteration (for one testcase I + // looked at, other testcases, and + // other numbers of unknowns change + // the factor, but not the + // exponential decay). + + // In a sense, this can then be considered + // as a quasi-timestepping scheme to + // resolve the nonlinear problem of solving + // large-deformation elasticity on a mesh + // that is moved along in a Lagrangian + // manner. + // + // Another complication is that the + // existing (old) stresses are defined on + // the old mesh, which we will move around + // after updating the stresses. If this + // mesh update involves rotations of the + // cell, then we need to also rotate the + // updated stress, since it was computed + // relative to the coordinate system of the + // old cell. + // + // Thus, what we need is the following: on + // each cell which the present processor + // owns, we need to extract the old stress + // from the data stored with each + // quadrature point, compute the stress + // update, add the two together, and then + // rotate the result together with the + // incremental rotation computed from the + // incremental displacement at the present + // quadrature point. We will detail these + // steps below: template void TopLevel::update_quadrature_point_history () { - // First, set up an FEValues object - // by which we will evaluate the - // incremental displacements and the - // gradients thereof at the quadrature - // points, together with a vector that - // will hold this information: - FEValues fe_values (fe, quadrature_formula, + // First, set up an FEValues object + // by which we will evaluate the + // incremental displacements and the + // gradients thereof at the quadrature + // points, together with a vector that + // will hold this information: + FEValues fe_values (fe, quadrature_formula, update_values | update_gradients); std::vector > > displacement_increment_grads (quadrature_formula.size(), std::vector >(dim)); - + // Then loop over all cells and do the // job in the cells that belong to our // subdomain: @@ -2697,27 +2689,27 @@ namespace QuasiStaticElasticity cell != dof_handler.end(); ++cell) if (cell->subdomain_id() == this_mpi_process) { - // Next, get a pointer to the - // quadrature point history data - // local to the present cell, and, - // as a defensive measure, make - // sure that this pointer is within - // the bounds of the global array: + // Next, get a pointer to the + // quadrature point history data + // local to the present cell, and, + // as a defensive measure, make + // sure that this pointer is within + // the bounds of the global array: PointHistory *local_quadrature_points_history = reinterpret_cast *>(cell->user_pointer()); Assert (local_quadrature_points_history >= - &quadrature_point_history.front(), + &quadrature_point_history.front(), ExcInternalError()); Assert (local_quadrature_points_history < - &quadrature_point_history.back(), + &quadrature_point_history.back(), ExcInternalError()); - // Then initialize the FEValues - // object on the present cell, and - // extract the gradients of the - // displacement at the quadrature - // points for later computation of - // the strains + // Then initialize the FEValues + // object on the present cell, and + // extract the gradients of the + // displacement at the quadrature + // points for later computation of + // the strains fe_values.reinit (cell); fe_values.get_function_grads (incremental_displacement, displacement_increment_grads); @@ -2726,49 +2718,49 @@ namespace QuasiStaticElasticity // points of this cell: for (unsigned int q=0; q new_stress - = (local_quadrature_points_history[q].old_stress - + - (stress_strain_tensor * - get_strain (displacement_increment_grads[q]))); - - // Finally, we have to rotate - // the result. For this, we - // first have to compute a - // rotation matrix at the - // present quadrature point - // from the incremental - // displacements. In fact, it - // can be computed from the - // gradients, and we already - // have a function for that - // purpose: - const Tensor<2,dim> rotation - = get_rotation_matrix (displacement_increment_grads[q]); - // Note that the result, a - // rotation matrix, is in - // general an antisymmetric - // tensor of rank 2, so we must - // store it as a full tensor. - - // With this rotation matrix, - // we can compute the rotated - // tensor by contraction from - // the left and right, after we - // expand the symmetric tensor - // new_stress into a full - // tensor: - const SymmetricTensor<2,dim> rotated_new_stress - = symmetrize(transpose(rotation) * + // On each quadrature point, + // compute the strain increment + // from the gradients, and + // multiply it by the + // stress-strain tensor to get + // the stress update. Then add + // this update to the already + // existing strain at this + // point: + const SymmetricTensor<2,dim> new_stress + = (local_quadrature_points_history[q].old_stress + + + (stress_strain_tensor * + get_strain (displacement_increment_grads[q]))); + + // Finally, we have to rotate + // the result. For this, we + // first have to compute a + // rotation matrix at the + // present quadrature point + // from the incremental + // displacements. In fact, it + // can be computed from the + // gradients, and we already + // have a function for that + // purpose: + const Tensor<2,dim> rotation + = get_rotation_matrix (displacement_increment_grads[q]); + // Note that the result, a + // rotation matrix, is in + // general an antisymmetric + // tensor of rank 2, so we must + // store it as a full tensor. + + // With this rotation matrix, + // we can compute the rotated + // tensor by contraction from + // the left and right, after we + // expand the symmetric tensor + // new_stress into a full + // tensor: + const SymmetricTensor<2,dim> rotated_new_stress + = symmetrize(transpose(rotation) * static_cast >(new_stress) * rotation); // Note that while the @@ -2800,19 +2792,19 @@ namespace QuasiStaticElasticity // result to make it // exactly symmetric. - // The result of all these - // operations is then written - // back into the original - // place: - local_quadrature_points_history[q].old_stress - = rotated_new_stress; + // The result of all these + // operations is then written + // back into the original + // place: + local_quadrature_points_history[q].old_stress + = rotated_new_stress; } } } // This ends the project specific // namespace - // QuasiStaticElasticity. The + // Step18. The // rest is as usual and as already // shown in step-17: A main() // function that initializes and @@ -2824,20 +2816,23 @@ namespace QuasiStaticElasticity } -int main (int argc, char **argv) +int main (int argc, char **argv) { try { + using namespace dealii; + using namespace Step18; + PetscInitialize(&argc,&argv,0,0); { deallog.depth_console (0); - QuasiStaticElasticity::TopLevel<3> elastic_problem; + TopLevel<3> elastic_problem; elastic_problem.run (); } - PetscFinalize(); + PetscFinalize(); } catch (std::exception &exc) { @@ -2849,10 +2844,10 @@ int main (int argc, char **argv) << "Aborting!" << std::endl << "----------------------------------------------------" << std::endl; - + return 1; } - catch (...) + catch (...) { std::cerr << std::endl << std::endl << "----------------------------------------------------" diff --git a/deal.II/examples/step-19/step-19.cc b/deal.II/examples/step-19/step-19.cc index f1663db3c0..50717b36c4 100644 --- a/deal.II/examples/step-19/step-19.cc +++ b/deal.II/examples/step-19/step-19.cc @@ -3,7 +3,7 @@ /* $Id$ */ /* */ -/* Copyright (C) 2005, 2006 by the deal.II authors */ +/* Copyright (C) 2005, 2006, 2011 by the deal.II authors */ /* */ /* This file is subject to QPL and may not be distributed */ /* without copyright and license information. Please refer */ @@ -26,617 +26,624 @@ #include #include - // As mentioned in the first few - // tutorial programs, all names in - // deal.II are declared in a - // namespace dealii. To - // make using these function and - // class names simpler, we import the - // entire content of that namespace - // into the global scope: -using namespace dealii; - - // Before we start with the actual program, - // let us declare a few global variables that - // will be used to hold the parameters this - // program is going to use. Usually, global - // variables are frowned upon for a good - // reason, but since we have such a short - // program here that does only a single - // thing, we may stray from our usual line - // and make these variables global, rather - // than passing them around to all functions - // or encapsulating them into a class. - // - // The variables we have are: first, an - // object that will hold parameters of - // operation, such as output format (unless - // given on the command line); second, the - // names of input and output files; and third, - // the format in which the output is to be - // written: -ParameterHandler prm; -std::vector input_file_names; -std::string output_file; -std::string output_format; - - - // All the stuff this program does can be - // done from here on. As described in the - // introduction, what we have to do is - // declare what values the parameter file can - // have, parse the command line, read the - // input files, then write the output. We - // will do this in this order of operation, - // but before that let us declare a function - // that prints a message about how this - // program is to be used; the function first - // prints a general message, and then goes on - // to list the parameters that are allowed in - // the parameter file (the - // ParameterHandler class has a function - // to do exactly this; see the results - // section for what it prints): -void -print_usage_message () + // As mentioned in the first few tutorial + // programs, all names in deal.II are + // declared in a namespace + // dealii. To make using these + // function and class names simpler, we + // import the entire content of that + // namespace into the global scope. As done + // for all previous programs already, we'll + // also place everything we do here into a + // namespace of its own: +namespace Step19 { - static const char* message - = - "\n" - "Converter from deal.II intermediate format to other graphics formats.\n" - "\n" - "Usage:\n" - " ./step-19 [-p parameter_file] list_of_input_files \n" - " [-x output_format] [-o output_file]\n" - "\n" - "Parameter sequences in brackets can be omitted if a parameter file is\n" - "specified on the command line and if it provides values for these\n" - "missing parameters.\n" - "\n" - "The parameter file has the following format and allows the following\n" - "values (you can cut and paste this and use it for your own parameter\n" - "file):\n" - "\n"; - std::cout << message; - - prm.print_parameters (std::cout, ParameterHandler::Text); -} - - - // @sect4{Declaring parameters for the input file} - - // The second function is used to declare the - // parameters this program accepts from the - // input file. While we don't actually take - // many parameters from the input file except - // for, possibly, the output file name and - // format, we nevertheless want to show how - // to work with parameter files. - // - // In short, the ParameterHandler class - // works as follows: one declares the entries - // of parameters that can be given in input - // files together, and later on one can read - // an input file in which these parameters - // are set to their values. If a parameter is - // not listed in the input file, the default - // value specified in the declaration of that - // parameter is used. After that, the program - // can query the values assigned to certain - // parameters from the ParameterHandler - // object. - // - // Declaring parameters can be done using the - // ParameterHandler::declare_entry - // function. It's arguments are the name of a - // parameter, a default value (given as a - // string, even if the parameter is numeric - // in nature, and thirdly an object that - // describes constraints on values that may - // be passed to this parameter. In the - // example below, we use an object of type - // Patterns::Anything to denote that - // there are no constraints on file names - // (this is, of course, not true -- the - // operating system does have constraints, - // but from an application standpoint, almost - // all names are valid). In other cases, one - // may, for example, use - // Patterns::Integer to make sure that - // only parameters are accepted that can be - // interpreted as integer values (it is also - // possible to specify bounds for integer - // values, and all values outside this range - // are rejected), Patterns::Double for - // floating point values, classes that make - // sure that the given parameter value is a - // comma separated list of things, etc. Take - // a look at the Patterns namespace to - // see what is possible. - // - // The fourth argument to declare_entry - // is a help string that can be printed to - // document what this parameter is meant to - // be used for and other information you may - // consider important when declaring this - // parameter. The default value of this - // fourth argument is the empty string. - // - // I always wanted to have an example program - // describing the ParameterHandler class, - // because it is so particularly useful. It - // would have been useful in a number of - // previous example programs (for example, in - // order to let the tolerance for linear - // solvers, or the number of refinement steps - // be determined by a run-time parameter, - // rather than hard-coding them into the - // program), but it turned out that trying to - // explain this class there would have - // overloaded them with things that would - // have distracted from the main - // purpose. However, while writing this - // program, I realized that there aren't all - // that many parameters this program can - // usefully ask for, or better, it turned - // out: declaring and querying these - // parameters was already done centralized in - // one place of the libray, namely the - // DataOutInterface class that handles - // exactly this -- managing parameters for - // input and output. - // - // So the second function call in this - // function is to let the - // DataOutInterface declare a good number - // of parameters that control everything from - // the output format to what kind of output - // should be generated if output is written - // in a specific graphical format. For - // example, when writing data in encapsulated - // postscript (EPS) format, the result is - // just a 2d projection, not data that can be - // viewed and rotated with a - // viewer. Therefore, one has to choose the - // viewing angle and a number of other - // options up front, when output is - // generated, rather than playing around with - // them later on. The call to - // DataOutInterface::declare_parameters - // declares entries that allow to specify - // them in the parameter input file during - // run-time. If the parameter file does not - // contain entries for them, defaults are - // taken. - // - // As a final note: DataOutInterface is a - // template, because it is usually used to - // write output for a specific space - // dimension. However, this program is - // supposed to be used for all dimensions at - // the same time, so we don't know at compile - // time what the right dimension is when - // specifying the template - // parameter. Fortunately, declaring - // parameters is something that is space - // dimension independent, so we can just pick - // one arbitrarily. We pick 1, but it - // could have been any other number as well. -void declare_parameters () -{ - prm.declare_entry ("Output file", "", - Patterns::Anything(), - "The name of the output file to be generated"); - - DataOutInterface<1>::declare_parameters (prm); - - // Since everything that this program can - // usefully request in terms of input - // parameters is already handled by now, - // let us nevertheless show how to use - // input parameters in other - // circumstances. First, parameters are - // like files in a directory tree: they can - // be in the top-level directory, but you - // can also group them into subdirectories - // to make it easier to find them or to be - // able to use the same parameter name in - // different contexts. - // - // Let us first declare a dummy parameter - // in the top-level section; we assume that - // it will denote the number of iterations, - // and that useful numbers of iterations - // that a user should be able to specify - // are in the range 1...1000, with a - // default value of 42: - prm.declare_entry ("Dummy iterations", "42", - Patterns::Integer (1,1000), - "A dummy parameter asking for an integer"); - - // Next, let us declare a sub-section (the - // equivalent to a subdirectory). When - // entered, all following parameter - // declarations will be within this - // subsection. To also visually group these - // declarations with the subsection name, I - // like to use curly braces to force my - // editor to indent everything that goes - // into this sub-section by one level of - // indentation. In this sub-section, we - // shall have two entries, one that takes a - // boolean parameter and one that takes a - // selection list of values, separated by - // the '|' character: - prm.enter_subsection ("Dummy subsection"); + using namespace dealii; + + // Before we start with the actual program, + // let us declare a few global variables that + // will be used to hold the parameters this + // program is going to use. Usually, global + // variables are frowned upon for a good + // reason, but since we have such a short + // program here that does only a single + // thing, we may stray from our usual line + // and make these variables global, rather + // than passing them around to all functions + // or encapsulating them into a class. + // + // The variables we have are: first, an + // object that will hold parameters of + // operation, such as output format (unless + // given on the command line); second, the + // names of input and output files; and third, + // the format in which the output is to be + // written: + ParameterHandler prm; + std::vector input_file_names; + std::string output_file; + std::string output_format; + + + // All the stuff this program does can be + // done from here on. As described in the + // introduction, what we have to do is + // declare what values the parameter file can + // have, parse the command line, read the + // input files, then write the output. We + // will do this in this order of operation, + // but before that let us declare a function + // that prints a message about how this + // program is to be used; the function first + // prints a general message, and then goes on + // to list the parameters that are allowed in + // the parameter file (the + // ParameterHandler class has a function + // to do exactly this; see the results + // section for what it prints): + void + print_usage_message () { - prm.declare_entry ("Dummy generate output", "true", - Patterns::Bool(), - "A dummy parameter that can be fed with either " - "'true' or 'false'"); - prm.declare_entry ("Dummy color of output", "red", - Patterns::Selection("red|black|blue"), - "A dummy parameter that shows how one can define a " - "parameter that can be assigned values from a finite " - "set of values"); + static const char* message + = + "\n" + "Converter from deal.II intermediate format to other graphics formats.\n" + "\n" + "Usage:\n" + " ./step-19 [-p parameter_file] list_of_input_files \n" + " [-x output_format] [-o output_file]\n" + "\n" + "Parameter sequences in brackets can be omitted if a parameter file is\n" + "specified on the command line and if it provides values for these\n" + "missing parameters.\n" + "\n" + "The parameter file has the following format and allows the following\n" + "values (you can cut and paste this and use it for your own parameter\n" + "file):\n" + "\n"; + std::cout << message; + + prm.print_parameters (std::cout, ParameterHandler::Text); } - prm.leave_subsection (); - // After this, we have left the subsection - // again. You should have gotten the idea - // by now how one can nest subsections to - // separate parameters. There are a number - // of other possible patterns describing - // possible values of parameters; in all - // cases, if you try to pass a parameter to - // the program that does not match the - // expectations of the pattern, it will - // reject the parameter file and ask you to - // fix it. After all, it does not make much - // sense if you had an entry that contained - // the entry "red" for the parameter - // "Generate output". -} - - // @sect4{Parsing the command line} - - // Our next task is to see what information - // has been provided on the command - // line. First, we need to be sure that there - // is at least one parameter: an input - // file. The format and the output file can - // be specified in the parameter file, but - // the list of input files can't, so at least - // one parameter needs to be there. Together - // with the name of the program (the zeroth - // parameter), argc must therefore be at - // least 2. If this is not the case, we print - // an error message and exit: -void -parse_command_line (const int argc, - char *const * argv) -{ - if (argc < 2) - { - print_usage_message (); - exit (1); - } - // Next, collect all parameters in a list - // that will be somewhat simpler to handle - // than the argc/argv mechanism. We - // omit the name of the executable at the - // zeroth index: - std::list args; - for (int i=1; i-p, then there must be a - // parameter file following (which - // we should then read), in case of - // -x it is the name of an - // output format. Finally, for - // -o it is the name of the - // output file. In all cases, once - // we've treated a parameter, we - // remove it from the list of - // parameters: - while (args.size()) + // @sect4{Declaring parameters for the input file} + + // The second function is used to declare the + // parameters this program accepts from the + // input file. While we don't actually take + // many parameters from the input file except + // for, possibly, the output file name and + // format, we nevertheless want to show how + // to work with parameter files. + // + // In short, the ParameterHandler class + // works as follows: one declares the entries + // of parameters that can be given in input + // files together, and later on one can read + // an input file in which these parameters + // are set to their values. If a parameter is + // not listed in the input file, the default + // value specified in the declaration of that + // parameter is used. After that, the program + // can query the values assigned to certain + // parameters from the ParameterHandler + // object. + // + // Declaring parameters can be done using the + // ParameterHandler::declare_entry + // function. It's arguments are the name of a + // parameter, a default value (given as a + // string, even if the parameter is numeric + // in nature, and thirdly an object that + // describes constraints on values that may + // be passed to this parameter. In the + // example below, we use an object of type + // Patterns::Anything to denote that + // there are no constraints on file names + // (this is, of course, not true -- the + // operating system does have constraints, + // but from an application standpoint, almost + // all names are valid). In other cases, one + // may, for example, use + // Patterns::Integer to make sure that + // only parameters are accepted that can be + // interpreted as integer values (it is also + // possible to specify bounds for integer + // values, and all values outside this range + // are rejected), Patterns::Double for + // floating point values, classes that make + // sure that the given parameter value is a + // comma separated list of things, etc. Take + // a look at the Patterns namespace to + // see what is possible. + // + // The fourth argument to declare_entry + // is a help string that can be printed to + // document what this parameter is meant to + // be used for and other information you may + // consider important when declaring this + // parameter. The default value of this + // fourth argument is the empty string. + // + // I always wanted to have an example program + // describing the ParameterHandler class, + // because it is so particularly useful. It + // would have been useful in a number of + // previous example programs (for example, in + // order to let the tolerance for linear + // solvers, or the number of refinement steps + // be determined by a run-time parameter, + // rather than hard-coding them into the + // program), but it turned out that trying to + // explain this class there would have + // overloaded them with things that would + // have distracted from the main + // purpose. However, while writing this + // program, I realized that there aren't all + // that many parameters this program can + // usefully ask for, or better, it turned + // out: declaring and querying these + // parameters was already done centralized in + // one place of the libray, namely the + // DataOutInterface class that handles + // exactly this -- managing parameters for + // input and output. + // + // So the second function call in this + // function is to let the + // DataOutInterface declare a good number + // of parameters that control everything from + // the output format to what kind of output + // should be generated if output is written + // in a specific graphical format. For + // example, when writing data in encapsulated + // postscript (EPS) format, the result is + // just a 2d projection, not data that can be + // viewed and rotated with a + // viewer. Therefore, one has to choose the + // viewing angle and a number of other + // options up front, when output is + // generated, rather than playing around with + // them later on. The call to + // DataOutInterface::declare_parameters + // declares entries that allow to specify + // them in the parameter input file during + // run-time. If the parameter file does not + // contain entries for them, defaults are + // taken. + // + // As a final note: DataOutInterface is a + // template, because it is usually used to + // write output for a specific space + // dimension. However, this program is + // supposed to be used for all dimensions at + // the same time, so we don't know at compile + // time what the right dimension is when + // specifying the template + // parameter. Fortunately, declaring + // parameters is something that is space + // dimension independent, so we can just pick + // one arbitrarily. We pick 1, but it + // could have been any other number as well. + void declare_parameters () + { + prm.declare_entry ("Output file", "", + Patterns::Anything(), + "The name of the output file to be generated"); + + DataOutInterface<1>::declare_parameters (prm); + + // Since everything that this program can + // usefully request in terms of input + // parameters is already handled by now, + // let us nevertheless show how to use + // input parameters in other + // circumstances. First, parameters are + // like files in a directory tree: they can + // be in the top-level directory, but you + // can also group them into subdirectories + // to make it easier to find them or to be + // able to use the same parameter name in + // different contexts. + // + // Let us first declare a dummy parameter + // in the top-level section; we assume that + // it will denote the number of iterations, + // and that useful numbers of iterations + // that a user should be able to specify + // are in the range 1...1000, with a + // default value of 42: + prm.declare_entry ("Dummy iterations", "42", + Patterns::Integer (1,1000), + "A dummy parameter asking for an integer"); + + // Next, let us declare a sub-section (the + // equivalent to a subdirectory). When + // entered, all following parameter + // declarations will be within this + // subsection. To also visually group these + // declarations with the subsection name, I + // like to use curly braces to force my + // editor to indent everything that goes + // into this sub-section by one level of + // indentation. In this sub-section, we + // shall have two entries, one that takes a + // boolean parameter and one that takes a + // selection list of values, separated by + // the '|' character: + prm.enter_subsection ("Dummy subsection"); { - if (args.front() == std::string("-p")) - { - if (args.size() == 1) - { - std::cerr << "Error: flag '-p' must be followed by the " - << "name of a parameter file." - << std::endl; - print_usage_message (); - exit (1); - } - args.pop_front (); - const std::string parameter_file = args.front (); - args.pop_front (); - - // Now read the input file: - prm.read_input (parameter_file); - - // Both the output file name as - // well as the format can be - // specified on the command - // line. We have therefore given - // them global variables that hold - // their values, but they can also - // be set in the parameter file. We - // therefore need to extract them - // from the parameter file here, - // because they may be overridden - // by later command line - // parameters: - if (output_file == "") - output_file = prm.get ("Output file"); - - if (output_format == "") - output_format = prm.get ("Output format"); - - // Finally, let us note that if we - // were interested in the values of - // the parameters declared above in - // the dummy subsection, we would - // write something like this to - // extract the value of the boolean - // flag (the prm.get function - // returns the value of a parameter - // as a string, whereas the - // prm.get_X functions return a - // value already converted to a - // different type): - prm.enter_subsection ("Dummy subsection"); - { - prm.get_bool ("Dummy generate output"); - } - prm.leave_subsection (); - // We would assign the result to a - // variable, or course, but don't - // here in order not to generate an - // unused variable that the - // compiler might warn about. - // - // Alas, let's move on to handling - // of output formats: - } - else if (args.front() == std::string("-x")) - { - if (args.size() == 1) - { - std::cerr << "Error: flag '-x' must be followed by the " - << "name of an output format." - << std::endl; - print_usage_message (); - exit (1); - } - args.pop_front (); - output_format = args.front(); - args.pop_front (); - } - else if (args.front() == std::string("-o")) - { - if (args.size() == 1) - { - std::cerr << "Error: flag '-o' must be followed by the " - << "name of an output file." - << std::endl; - print_usage_message (); - exit (1); - } - args.pop_front (); - output_file = args.front(); - args.pop_front (); - } - - // Otherwise, this is not a parameter - // that starts with a known minus - // sequence, and we should consider it - // to be the name of an input file. Let - // us therefore add this file to the - // list of input files: - else - { - input_file_names.push_back (args.front()); - args.pop_front (); - } + prm.declare_entry ("Dummy generate output", "true", + Patterns::Bool(), + "A dummy parameter that can be fed with either " + "'true' or 'false'"); + prm.declare_entry ("Dummy color of output", "red", + Patterns::Selection("red|black|blue"), + "A dummy parameter that shows how one can define a " + "parameter that can be assigned values from a finite " + "set of values"); } + prm.leave_subsection (); + // After this, we have left the subsection + // again. You should have gotten the idea + // by now how one can nest subsections to + // separate parameters. There are a number + // of other possible patterns describing + // possible values of parameters; in all + // cases, if you try to pass a parameter to + // the program that does not match the + // expectations of the pattern, it will + // reject the parameter file and ask you to + // fix it. After all, it does not make much + // sense if you had an entry that contained + // the entry "red" for the parameter + // "Generate output". + } - // Next check a few things and create - // errors if the checks fail. Firstly, - // there must be at least one input file - if (input_file_names.size() == 0) - { - std::cerr << "Error: No input file specified." << std::endl; - print_usage_message (); - exit (1); - } -} + // @sect4{Parsing the command line} + + // Our next task is to see what information + // has been provided on the command + // line. First, we need to be sure that there + // is at least one parameter: an input + // file. The format and the output file can + // be specified in the parameter file, but + // the list of input files can't, so at least + // one parameter needs to be there. Together + // with the name of the program (the zeroth + // parameter), argc must therefore be at + // least 2. If this is not the case, we print + // an error message and exit: + void + parse_command_line (const int argc, + char *const * argv) + { + if (argc < 2) + { + print_usage_message (); + exit (1); + } + + // Next, collect all parameters in a list + // that will be somewhat simpler to handle + // than the argc/argv mechanism. We + // omit the name of the executable at the + // zeroth index: + std::list args; + for (int i=1; i-p, then there must be a + // parameter file following (which + // we should then read), in case of + // -x it is the name of an + // output format. Finally, for + // -o it is the name of the + // output file. In all cases, once + // we've treated a parameter, we + // remove it from the list of + // parameters: + while (args.size()) + { + if (args.front() == std::string("-p")) + { + if (args.size() == 1) + { + std::cerr << "Error: flag '-p' must be followed by the " + << "name of a parameter file." + << std::endl; + print_usage_message (); + exit (1); + } + args.pop_front (); + const std::string parameter_file = args.front (); + args.pop_front (); + + // Now read the input file: + prm.read_input (parameter_file); + + // Both the output file name as + // well as the format can be + // specified on the command + // line. We have therefore given + // them global variables that hold + // their values, but they can also + // be set in the parameter file. We + // therefore need to extract them + // from the parameter file here, + // because they may be overridden + // by later command line + // parameters: + if (output_file == "") + output_file = prm.get ("Output file"); + + if (output_format == "") + output_format = prm.get ("Output format"); + + // Finally, let us note that if we + // were interested in the values of + // the parameters declared above in + // the dummy subsection, we would + // write something like this to + // extract the value of the boolean + // flag (the prm.get function + // returns the value of a parameter + // as a string, whereas the + // prm.get_X functions return a + // value already converted to a + // different type): + prm.enter_subsection ("Dummy subsection"); + { + prm.get_bool ("Dummy generate output"); + } + prm.leave_subsection (); + // We would assign the result to a + // variable, or course, but don't + // here in order not to generate an + // unused variable that the + // compiler might warn about. + // + // Alas, let's move on to handling + // of output formats: + } + else if (args.front() == std::string("-x")) + { + if (args.size() == 1) + { + std::cerr << "Error: flag '-x' must be followed by the " + << "name of an output format." + << std::endl; + print_usage_message (); + exit (1); + } + args.pop_front (); + output_format = args.front(); + args.pop_front (); + } + else if (args.front() == std::string("-o")) + { + if (args.size() == 1) + { + std::cerr << "Error: flag '-o' must be followed by the " + << "name of an output file." + << std::endl; + print_usage_message (); + exit (1); + } + args.pop_front (); + output_file = args.front(); + args.pop_front (); + } + + // Otherwise, this is not a parameter + // that starts with a known minus + // sequence, and we should consider it + // to be the name of an input file. Let + // us therefore add this file to the + // list of input files: + else + { + input_file_names.push_back (args.front()); + args.pop_front (); + } + } + + // Next check a few things and create + // errors if the checks fail. Firstly, + // there must be at least one input file + if (input_file_names.size() == 0) + { + std::cerr << "Error: No input file specified." << std::endl; + print_usage_message (); + exit (1); + } + } - // @sect4{Generating output} - - // Now that we have all the information, we - // need to read all the input files, merge - // them, and generate a single output - // file. This, after all, was the motivation, - // borne from the necessity encountered in - // the step-18 tutorial program, to write - // this program in the first place. - // - // So what we do first is to declare an - // object into which we will merge the data - // from all the input file, and read in the - // first file through a stream. Note that - // every time we open a file, we use the - // AssertThrow macro to check whether the - // file is really readable -- if it isn't - // then this will trigger an exception and - // corresponding output will be generated - // from the exception handler in main(): -template -void do_convert () -{ - DataOutReader merged_data; - { - std::ifstream input (input_file_names[0].c_str()); - AssertThrow (input, ExcIO()); - - merged_data.read (input); - } + // @sect4{Generating output} + + // Now that we have all the information, we + // need to read all the input files, merge + // them, and generate a single output + // file. This, after all, was the motivation, + // borne from the necessity encountered in + // the step-18 tutorial program, to write + // this program in the first place. + // + // So what we do first is to declare an + // object into which we will merge the data + // from all the input file, and read in the + // first file through a stream. Note that + // every time we open a file, we use the + // AssertThrow macro to check whether the + // file is really readable -- if it isn't + // then this will trigger an exception and + // corresponding output will be generated + // from the exception handler in main(): + template + void do_convert () + { + DataOutReader merged_data; - // For all the other input files, we read - // their data into an intermediate object, - // and then merge that into the first - // object declared above: - for (unsigned int i=1; i additional_data; - additional_data.read (input); - merged_data.merge (additional_data); - } - - // Once we have this, let us open an output - // stream, and parse what we got as the - // name of the output format into an - // identifier. Fortunately, the - // DataOutBase class has a function - // that does this parsing for us, i.e. it - // knows about all the presently supported - // output formats and makes sure that they - // can be specified in the parameter file - // or on the command line. Note that this - // ensures that if the library acquires the - // ability to output in other output - // formats, this program will be able to - // make use of this ability without having - // to be changed! - std::ofstream output_stream (output_file.c_str()); - AssertThrow (output_stream, ExcIO()); - - const DataOutBase::OutputFormat format - = DataOutBase::parse_output_format (output_format); - - // Finally, write the merged data to the - // output: - merged_data.write(output_stream, format); -} + merged_data.read (input); + } + // For all the other input files, we read + // their data into an intermediate object, + // and then merge that into the first + // object declared above: + for (unsigned int i=1; i additional_data; + additional_data.read (input); + merged_data.merge (additional_data); + } + + // Once we have this, let us open an output + // stream, and parse what we got as the + // name of the output format into an + // identifier. Fortunately, the + // DataOutBase class has a function + // that does this parsing for us, i.e. it + // knows about all the presently supported + // output formats and makes sure that they + // can be specified in the parameter file + // or on the command line. Note that this + // ensures that if the library acquires the + // ability to output in other output + // formats, this program will be able to + // make use of this ability without having + // to be changed! + std::ofstream output_stream (output_file.c_str()); + AssertThrow (output_stream, ExcIO()); + + const DataOutBase::OutputFormat format + = DataOutBase::parse_output_format (output_format); + + // Finally, write the merged data to the + // output: + merged_data.write(output_stream, format); + } - // @sect4{Dispatching output generation} - - // The function above takes template - // parameters relating to the space dimension - // of the output, and the dimension of the - // objects to be output. (For example, when - // outputting whole cells, these two - // dimensions are the same, but the - // intermediate files may contain only data - // pertaining to the faces of cells, in which - // case the first parameter will be one less - // than the space dimension.) - // - // The problem is: at compile time, we of - // course don't know the dimensions used in - // the input files. We have to plan for all - // cases, therefore. This is a little clumsy, - // since we need to specify the dimensions - // statically at compile time, even though we - // will only know about them at run time. - // - // So here is what we do: from the first - // input file, we determine (using a function - // in DataOutBase that exists for this - // purpose) these dimensions. We then have a - // series of switches that dispatch, - // statically, to the do_convert - // functions with different template - // arguments. Not pretty, but works. Apart - // from this, the function does nothing -- - // except making sure that it covered the - // dimensions for which it was called, using - // the AssertThrow macro at places in the - // code that shouldn't be reached: -void convert () -{ - AssertThrow (input_file_names.size() > 0, - ExcMessage ("No input files specified.")); - - std::ifstream input(input_file_names[0].c_str()); - AssertThrow (input, ExcIO()); - - const std::pair - dimensions = DataOutBase::determine_intermediate_format_dimensions (input); - - switch (dimensions.first) - { - case 1: - switch (dimensions.second) - { - case 1: - do_convert <1,1> (); - return; - - case 2: - do_convert <1,2> (); - return; - } - AssertThrow (false, ExcNotImplemented()); - - case 2: - switch (dimensions.second) - { - case 2: - do_convert <2,2> (); - return; - - case 3: - do_convert <2,3> (); - return; - } - AssertThrow (false, ExcNotImplemented()); - - case 3: - switch (dimensions.second) - { - case 3: - do_convert <3,3> (); - return; - } - AssertThrow (false, ExcNotImplemented()); - } - - AssertThrow (false, ExcNotImplemented()); + + // @sect4{Dispatching output generation} + + // The function above takes template + // parameters relating to the space dimension + // of the output, and the dimension of the + // objects to be output. (For example, when + // outputting whole cells, these two + // dimensions are the same, but the + // intermediate files may contain only data + // pertaining to the faces of cells, in which + // case the first parameter will be one less + // than the space dimension.) + // + // The problem is: at compile time, we of + // course don't know the dimensions used in + // the input files. We have to plan for all + // cases, therefore. This is a little clumsy, + // since we need to specify the dimensions + // statically at compile time, even though we + // will only know about them at run time. + // + // So here is what we do: from the first + // input file, we determine (using a function + // in DataOutBase that exists for this + // purpose) these dimensions. We then have a + // series of switches that dispatch, + // statically, to the do_convert + // functions with different template + // arguments. Not pretty, but works. Apart + // from this, the function does nothing -- + // except making sure that it covered the + // dimensions for which it was called, using + // the AssertThrow macro at places in the + // code that shouldn't be reached: + void convert () + { + AssertThrow (input_file_names.size() > 0, + ExcMessage ("No input files specified.")); + + std::ifstream input(input_file_names[0].c_str()); + AssertThrow (input, ExcIO()); + + const std::pair + dimensions = DataOutBase::determine_intermediate_format_dimensions (input); + + switch (dimensions.first) + { + case 1: + switch (dimensions.second) + { + case 1: + do_convert <1,1> (); + return; + + case 2: + do_convert <1,2> (); + return; + } + AssertThrow (false, ExcNotImplemented()); + + case 2: + switch (dimensions.second) + { + case 2: + do_convert <2,2> (); + return; + + case 3: + do_convert <2,3> (); + return; + } + AssertThrow (false, ExcNotImplemented()); + + case 3: + switch (dimensions.second) + { + case 3: + do_convert <3,3> (); + return; + } + AssertThrow (false, ExcNotImplemented()); + } + + AssertThrow (false, ExcNotImplemented()); + } } - // @sect4{main()} + // @sect4{main()} - // Finally, the main program. There is not - // much more to do than to make sure - // parameters are declared, the command line - // is parsed (which includes reading - // parameter files), and finally making sure - // the input files are read and output is - // generated. Everything else just has to do - // with handling exceptions and making sure - // that appropriate output is generated if - // one is thrown. + // Finally, the main program. There is not + // much more to do than to make sure + // parameters are declared, the command line + // is parsed (which includes reading + // parameter files), and finally making sure + // the input files are read and output is + // generated. Everything else just has to do + // with handling exceptions and making sure + // that appropriate output is generated if + // one is thrown. int main (int argc, char ** argv) { try { + using namespace Step19; + declare_parameters (); parse_command_line (argc, argv); @@ -645,27 +652,27 @@ int main (int argc, char ** argv) catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; - + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; }; - + return 0; -} +} diff --git a/deal.II/examples/step-20/step-20.cc b/deal.II/examples/step-20/step-20.cc index 73646d2e33..657ec43458 100644 --- a/deal.II/examples/step-20/step-20.cc +++ b/deal.II/examples/step-20/step-20.cc @@ -71,1135 +71,1138 @@ // The last step is as in all // previous programs: -using namespace dealii; - - // @sect3{The MixedLaplaceProblem class template} - - // Again, since this is an adaptation - // of step-6, the main class is - // almost the same as the one in that - // tutorial program. In terms of - // member functions, the main - // differences are that the - // constructor takes the degree of - // the Raviart-Thomas element as an - // argument (and that there is a - // corresponding member variable to - // store this value) and the addition - // of the compute_error function - // in which, no surprise, we will - // compute the difference between the - // exact and the numerical solution - // to determine convergence of our - // computations: -template -class MixedLaplaceProblem +namespace Step20 { - public: - MixedLaplaceProblem (const unsigned int degree); - void run (); - - private: - void make_grid_and_dofs (); - void assemble_system (); - void solve (); - void compute_errors () const; - void output_results () const; - - const unsigned int degree; - - Triangulation triangulation; - FESystem fe; - DoFHandler dof_handler; - - // The second difference is that - // the sparsity pattern, the - // system matrix, and solution - // and right hand side vectors - // are now blocked. What this - // means and what one can do with - // such objects is explained in - // the introduction to this - // program as well as further - // down below when we explain the - // linear solvers and - // preconditioners for this - // problem: - BlockSparsityPattern sparsity_pattern; - BlockSparseMatrix system_matrix; - - BlockVector solution; - BlockVector system_rhs; -}; - - - // @sect3{Right hand side, boundary values, and exact solution} - - // Our next task is to define the - // right hand side of our problem - // (i.e., the scalar right hand side - // for the pressure in the original - // Laplace equation), boundary values - // for the pressure, as well as a - // function that describes both the - // pressure and the velocity of the - // exact solution for later - // computations of the error. Note - // that these functions have one, - // one, and dim+1 components, - // respectively, and that we pass the - // number of components down to the - // Function@ base class. For - // the exact solution, we only - // declare the function that actually - // returns the entire solution vector - // (i.e. all components of it) at - // once. Here are the respective - // declarations: -template -class RightHandSide : public Function -{ - public: - RightHandSide () : Function(1) {} - - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; - + using namespace dealii; + + // @sect3{The MixedLaplaceProblem class template} + + // Again, since this is an adaptation + // of step-6, the main class is + // almost the same as the one in that + // tutorial program. In terms of + // member functions, the main + // differences are that the + // constructor takes the degree of + // the Raviart-Thomas element as an + // argument (and that there is a + // corresponding member variable to + // store this value) and the addition + // of the compute_error function + // in which, no surprise, we will + // compute the difference between the + // exact and the numerical solution + // to determine convergence of our + // computations: + template + class MixedLaplaceProblem + { + public: + MixedLaplaceProblem (const unsigned int degree); + void run (); + + private: + void make_grid_and_dofs (); + void assemble_system (); + void solve (); + void compute_errors () const; + void output_results () const; + + const unsigned int degree; + + Triangulation triangulation; + FESystem fe; + DoFHandler dof_handler; + + // The second difference is that + // the sparsity pattern, the + // system matrix, and solution + // and right hand side vectors + // are now blocked. What this + // means and what one can do with + // such objects is explained in + // the introduction to this + // program as well as further + // down below when we explain the + // linear solvers and + // preconditioners for this + // problem: + BlockSparsityPattern sparsity_pattern; + BlockSparseMatrix system_matrix; + + BlockVector solution; + BlockVector system_rhs; + }; + + + // @sect3{Right hand side, boundary values, and exact solution} + + // Our next task is to define the + // right hand side of our problem + // (i.e., the scalar right hand side + // for the pressure in the original + // Laplace equation), boundary values + // for the pressure, as well as a + // function that describes both the + // pressure and the velocity of the + // exact solution for later + // computations of the error. Note + // that these functions have one, + // one, and dim+1 components, + // respectively, and that we pass the + // number of components down to the + // Function@ base class. For + // the exact solution, we only + // declare the function that actually + // returns the entire solution vector + // (i.e. all components of it) at + // once. Here are the respective + // declarations: + template + class RightHandSide : public Function + { + public: + RightHandSide () : Function(1) {} + virtual double value (const Point &p, + const unsigned int component = 0) const; + }; -template -class PressureBoundaryValues : public Function -{ - public: - PressureBoundaryValues () : Function(1) {} - - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; -template -class ExactSolution : public Function -{ - public: - ExactSolution () : Function(dim+1) {} - - virtual void vector_value (const Point &p, - Vector &value) const; -}; - - - // And then we also have to define - // these respective functions, of - // course. Given our discussion in - // the introduction of how the - // solution should look like, the - // following computations should be - // straightforward: -template -double RightHandSide::value (const Point &/*p*/, - const unsigned int /*component*/) const -{ - return 0; -} + template + class PressureBoundaryValues : public Function + { + public: + PressureBoundaryValues () : Function(1) {} + virtual double value (const Point &p, + const unsigned int component = 0) const; + }; -template -double PressureBoundaryValues::value (const Point &p, - const unsigned int /*component*/) const -{ - const double alpha = 0.3; - const double beta = 1; - return -(alpha*p[0]*p[1]*p[1]/2 + beta*p[0] - alpha*p[0]*p[0]*p[0]/6); -} + template + class ExactSolution : public Function + { + public: + ExactSolution () : Function(dim+1) {} + + virtual void vector_value (const Point &p, + Vector &value) const; + }; + + + // And then we also have to define + // these respective functions, of + // course. Given our discussion in + // the introduction of how the + // solution should look like, the + // following computations should be + // straightforward: + template + double RightHandSide::value (const Point &/*p*/, + const unsigned int /*component*/) const + { + return 0; + } -template -void -ExactSolution::vector_value (const Point &p, - Vector &values) const -{ - Assert (values.size() == dim+1, - ExcDimensionMismatch (values.size(), dim+1)); + template + double PressureBoundaryValues::value (const Point &p, + const unsigned int /*component*/) const + { + const double alpha = 0.3; + const double beta = 1; + return -(alpha*p[0]*p[1]*p[1]/2 + beta*p[0] - alpha*p[0]*p[0]*p[0]/6); + } - const double alpha = 0.3; - const double beta = 1; - values(0) = alpha*p[1]*p[1]/2 + beta - alpha*p[0]*p[0]/2; - values(1) = alpha*p[0]*p[1]; - values(2) = -(alpha*p[0]*p[1]*p[1]/2 + beta*p[0] - alpha*p[0]*p[0]*p[0]/6); -} + template + void + ExactSolution::vector_value (const Point &p, + Vector &values) const + { + Assert (values.size() == dim+1, + ExcDimensionMismatch (values.size(), dim+1)); + const double alpha = 0.3; + const double beta = 1; - // @sect3{The inverse permeability tensor} - - // In addition to the other equation - // data, we also want to use a - // permeability tensor, or better -- - // because this is all that appears - // in the weak form -- the inverse of - // the permeability tensor, - // KInverse. For the purpose of - // verifying the exactness of the - // solution and determining - // convergence orders, this tensor is - // more in the way than helpful. We - // will therefore simply set it to - // the identity matrix. - // - // However, a spatially varying - // permeability tensor is - // indispensable in real-life porous - // media flow simulations, and we - // would like to use the opportunity - // to demonstrate the technique to - // use tensor valued functions. - // - // Possibly unsurprising, deal.II - // also has a base class not only for - // scalar and generally vector-valued - // functions (the Function base - // class) but also for functions that - // return tensors of fixed dimension - // and rank, the TensorFunction - // template. Here, the function under - // consideration returns a dim-by-dim - // matrix, i.e. a tensor of rank 2 - // and dimension dim. We then - // choose the template arguments of - // the base class appropriately. - // - // The interface that the - // TensorFunction class provides - // is essentially equivalent to the - // Function class. In particular, - // there exists a value_list - // function that takes a list of - // points at which to evaluate the - // function, and returns the values - // of the function in the second - // argument, a list of tensors: -template -class KInverse : public TensorFunction<2,dim> -{ - public: - KInverse () : TensorFunction<2,dim>() {} - - virtual void value_list (const std::vector > &points, - std::vector > &values) const; -}; - - - // The implementation is less - // interesting. As in previous - // examples, we add a check to the - // beginning of the class to make - // sure that the sizes of input and - // output parameters are the same - // (see step-5 for a discussion of - // this technique). Then we loop over - // all evaluation points, and for - // each one first clear the output - // tensor and then set all its - // diagonal elements to one - // (i.e. fill the tensor with the - // identity matrix): -template -void -KInverse::value_list (const std::vector > &points, - std::vector > &values) const -{ - Assert (points.size() == values.size(), - ExcDimensionMismatch (points.size(), values.size())); + values(0) = alpha*p[1]*p[1]/2 + beta - alpha*p[0]*p[0]/2; + values(1) = alpha*p[0]*p[1]; + values(2) = -(alpha*p[0]*p[1]*p[1]/2 + beta*p[0] - alpha*p[0]*p[0]*p[0]/6); + } - for (unsigned int p=0; pKInverse. For the purpose of + // verifying the exactness of the + // solution and determining + // convergence orders, this tensor is + // more in the way than helpful. We + // will therefore simply set it to + // the identity matrix. + // + // However, a spatially varying + // permeability tensor is + // indispensable in real-life porous + // media flow simulations, and we + // would like to use the opportunity + // to demonstrate the technique to + // use tensor valued functions. + // + // Possibly unsurprising, deal.II + // also has a base class not only for + // scalar and generally vector-valued + // functions (the Function base + // class) but also for functions that + // return tensors of fixed dimension + // and rank, the TensorFunction + // template. Here, the function under + // consideration returns a dim-by-dim + // matrix, i.e. a tensor of rank 2 + // and dimension dim. We then + // choose the template arguments of + // the base class appropriately. + // + // The interface that the + // TensorFunction class provides + // is essentially equivalent to the + // Function class. In particular, + // there exists a value_list + // function that takes a list of + // points at which to evaluate the + // function, and returns the values + // of the function in the second + // argument, a list of tensors: + template + class KInverse : public TensorFunction<2,dim> + { + public: + KInverse () : TensorFunction<2,dim>() {} + + virtual void value_list (const std::vector > &points, + std::vector > &values) const; + }; + + + // The implementation is less + // interesting. As in previous + // examples, we add a check to the + // beginning of the class to make + // sure that the sizes of input and + // output parameters are the same + // (see step-5 for a discussion of + // this technique). Then we loop over + // all evaluation points, and for + // each one first clear the output + // tensor and then set all its + // diagonal elements to one + // (i.e. fill the tensor with the + // identity matrix): + template + void + KInverse::value_list (const std::vector > &points, + std::vector > &values) const + { + Assert (points.size() == values.size(), + ExcDimensionMismatch (points.size(), values.size())); + for (unsigned int p=0; pfe variable. The - // FESystem class to which this - // variable belongs has a number of - // different constructors that all - // refer to binding simpler elements - // together into one larger - // element. In the present case, we - // want to couple a single RT(degree) - // element with a single DQ(degree) - // element. The constructor to - // FESystem that does this - // requires us to specity first the - // first base element (the - // FE_RaviartThomas object of - // given degree) and then the number - // of copies for this base element, - // and then similarly the kind and - // number of FE_DGQ - // elements. Note that the Raviart - // Thomas element already has dim - // vector components, so that the - // coupled element will have - // dim+1 vector components, the - // first dim of which correspond - // to the velocity variable whereas the - // last one corresponds to the - // pressure. - // - // It is also worth comparing the way - // we constructed this element from - // its base elements, with the way we - // have done so in step-8: there, we - // have built it as fe - // (FE_Q@(1), dim), i.e. we - // have simply used dim copies of - // the FE_Q(1) element, one copy - // for the displacement in each - // coordinate direction. -template -MixedLaplaceProblem::MixedLaplaceProblem (const unsigned int degree) - : - degree (degree), - fe (FE_RaviartThomas(degree), 1, - FE_DGQ(degree), 1), - dof_handler (triangulation) -{} - - - - // @sect4{MixedLaplaceProblem::make_grid_and_dofs} - - // This next function starts out with - // well-known functions calls that - // create and refine a mesh, and then - // associate degrees of freedom with - // it: -template -void MixedLaplaceProblem::make_grid_and_dofs () -{ - GridGenerator::hyper_cube (triangulation, -1, 1); - triangulation.refine_global (3); - - dof_handler.distribute_dofs (fe); - - // However, then things become - // different. As mentioned in the - // introduction, we want to - // subdivide the matrix into blocks - // corresponding to the two - // different kinds of variables, - // velocity and pressure. To this end, - // we first have to make sure that - // the indices corresponding to - // velocities and pressures are not - // intermingled: First all velocity - // degrees of freedom, then all - // pressure DoFs. This way, the - // global matrix separates nicely - // into a 2x2 system. To achieve - // this, we have to renumber - // degrees of freedom base on their - // vector component, an operation - // that conveniently is already - // implemented: - DoFRenumbering::component_wise (dof_handler); - - // The next thing is that we want - // to figure out the sizes of these - // blocks, so that we can allocate - // an appropriate amount of - // space. To this end, we call the - // DoFTools::count_dofs_per_component - // function that counts how many - // shape functions are non-zero for - // a particular vector - // component. We have dim+1 - // vector components, and we have - // to use the knowledge that for - // Raviart-Thomas elements all - // shape functions are nonzero in - // all components. In other words, - // the number of velocity shape - // functions equals the number of - // overall shape functions that are - // nonzero in the zeroth vector - // component. On the other hand, - // the number of pressure variables - // equals the number of shape - // functions that are nonzero in - // the dim-th component. Let us - // compute these numbers and then - // create some nice output with - // that: - std::vector dofs_per_component (dim+1); - DoFTools::count_dofs_per_component (dof_handler, dofs_per_component); - const unsigned int n_u = dofs_per_component[0], - n_p = dofs_per_component[dim]; - - std::cout << "Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << "Total number of cells: " - << triangulation.n_cells() - << std::endl - << "Number of degrees of freedom: " - << dof_handler.n_dofs() - << " (" << n_u << '+' << n_p << ')' - << std::endl; - - // The next task is to allocate a - // sparsity pattern for the matrix - // that we will create. The way - // this works is that we first - // obtain a guess for the maximal - // number of nonzero entries per - // row (this could be done more - // efficiently in this case, but we - // only want to solve relatively - // small problems for which this is - // not so important). In the second - // step, we allocate a 2x2 block - // pattern and then reinitialize - // each of the blocks to its - // correct size using the n_u - // and n_p variables defined - // above that hold the number of - // velocity and pressure - // variables. In this second step, - // we only operate on the - // individual blocks of the - // system. In the third step, we - // therefore have to instruct the - // overlying block system to update - // its knowledge about the sizes of - // the blocks it manages; this - // happens with the - // sparsity_pattern.collect_sizes() - // call: - const unsigned int - n_couplings = dof_handler.max_couplings_between_dofs(); - - sparsity_pattern.reinit (2,2); - sparsity_pattern.block(0,0).reinit (n_u, n_u, n_couplings); - sparsity_pattern.block(1,0).reinit (n_p, n_u, n_couplings); - sparsity_pattern.block(0,1).reinit (n_u, n_p, n_couplings); - sparsity_pattern.block(1,1).reinit (n_p, n_p, n_couplings); - sparsity_pattern.collect_sizes(); - - // Now that the sparsity pattern - // and its blocks have the correct - // sizes, we actually need to - // construct the content of this - // pattern, and as usual compress - // it, before we also initialize a - // block matrix with this block - // sparsity pattern: - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - sparsity_pattern.compress(); - - system_matrix.reinit (sparsity_pattern); - - // Then we have to resize the - // solution and right hand side - // vectors in exactly the same way: - solution.reinit (2); - solution.block(0).reinit (n_u); - solution.block(1).reinit (n_p); - solution.collect_sizes (); - - system_rhs.reinit (2); - system_rhs.block(0).reinit (n_u); - system_rhs.block(1).reinit (n_p); - system_rhs.collect_sizes (); -} + for (unsigned int d=0; dFEValues objects for the cell - // terms, but also for face - // terms. After that, we define the - // usual abbreviations for variables, - // and the allocate space for the - // local matrix and right hand side - // contributions, and the array that - // holds the global numbers of the - // degrees of freedom local to the - // present cell. -template -void MixedLaplaceProblem::assemble_system () -{ - QGauss quadrature_formula(degree+2); - QGauss face_quadrature_formula(degree+2); - - FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); - FEFaceValues fe_face_values (fe, face_quadrature_formula, - update_values | update_normal_vectors | - update_quadrature_points | update_JxW_values); - - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); - const unsigned int n_face_q_points = face_quadrature_formula.size(); - - FullMatrix local_matrix (dofs_per_cell, dofs_per_cell); - Vector local_rhs (dofs_per_cell); - - std::vector local_dof_indices (dofs_per_cell); - - // The next step is to declare - // objects that represent the - // source term, pressure boundary - // value, and coefficient in the - // equation. In addition to these - // objects that represent - // continuous functions, we also - // need arrays to hold their values - // at the quadrature points of - // individual cells (or faces, for - // the boundary values). Note that - // in the case of the coefficient, - // the array has to be one of - // matrices. - const RightHandSide right_hand_side; - const PressureBoundaryValues pressure_boundary_values; - const KInverse k_inverse; - - std::vector rhs_values (n_q_points); - std::vector boundary_values (n_face_q_points); - std::vector > k_inverse_values (n_q_points); - - // Finally, we need a couple of extractors - // that we will use to get at the velocity - // and pressure components of vector-valued - // shape functions. Their function and use - // is described in detail in the @ref - // vector_valued report. Essentially, we - // will use them as subscripts on the - // FEValues objects below: the FEValues - // object describes all vector components - // of shape functions, while after - // subscription, it will only refer to the - // velocities (a set of dim - // components starting at component zero) - // or the pressure (a scalar component - // located at position dim): - const FEValuesExtractors::Vector velocities (0); - const FEValuesExtractors::Scalar pressure (dim); - - // With all this in place, we can - // go on with the loop over all - // cells. The body of this loop has - // been discussed in the - // introduction, and will not be - // commented any further here: - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - fe_values.reinit (cell); - local_matrix = 0; - local_rhs = 0; - - right_hand_side.value_list (fe_values.get_quadrature_points(), - rhs_values); - k_inverse.value_list (fe_values.get_quadrature_points(), - k_inverse_values); - - for (unsigned int q=0; q phi_i_u = fe_values[velocities].value (i, q); - const double div_phi_i_u = fe_values[velocities].divergence (i, q); - const double phi_i_p = fe_values[pressure].value (i, q); - - for (unsigned int j=0; j phi_j_u = fe_values[velocities].value (j, q); - const double div_phi_j_u = fe_values[velocities].divergence (j, q); - const double phi_j_p = fe_values[pressure].value (j, q); - - local_matrix(i,j) += (phi_i_u * k_inverse_values[q] * phi_j_u - - div_phi_i_u * phi_j_p - - phi_i_p * div_phi_j_u) - * fe_values.JxW(q); - } - - local_rhs(i) += -phi_i_p * - rhs_values[q] * - fe_values.JxW(q); - } - - for (unsigned int face_no=0; - face_no::faces_per_cell; - ++face_no) - if (cell->at_boundary(face_no)) - { - fe_face_values.reinit (cell, face_no); - - pressure_boundary_values - .value_list (fe_face_values.get_quadrature_points(), - boundary_values); - - for (unsigned int q=0; qget_dof_indices (local_dof_indices); - for (unsigned int i=0; ife variable. The + // FESystem class to which this + // variable belongs has a number of + // different constructors that all + // refer to binding simpler elements + // together into one larger + // element. In the present case, we + // want to couple a single RT(degree) + // element with a single DQ(degree) + // element. The constructor to + // FESystem that does this + // requires us to specity first the + // first base element (the + // FE_RaviartThomas object of + // given degree) and then the number + // of copies for this base element, + // and then similarly the kind and + // number of FE_DGQ + // elements. Note that the Raviart + // Thomas element already has dim + // vector components, so that the + // coupled element will have + // dim+1 vector components, the + // first dim of which correspond + // to the velocity variable whereas the + // last one corresponds to the + // pressure. + // + // It is also worth comparing the way + // we constructed this element from + // its base elements, with the way we + // have done so in step-8: there, we + // have built it as fe + // (FE_Q@(1), dim), i.e. we + // have simply used dim copies of + // the FE_Q(1) element, one copy + // for the displacement in each + // coordinate direction. + template + MixedLaplaceProblem::MixedLaplaceProblem (const unsigned int degree) + : + degree (degree), + fe (FE_RaviartThomas(degree), 1, + FE_DGQ(degree), 1), + dof_handler (triangulation) + {} + + + + // @sect4{MixedLaplaceProblem::make_grid_and_dofs} + + // This next function starts out with + // well-known functions calls that + // create and refine a mesh, and then + // associate degrees of freedom with + // it: + template + void MixedLaplaceProblem::make_grid_and_dofs () + { + GridGenerator::hyper_cube (triangulation, -1, 1); + triangulation.refine_global (3); + + dof_handler.distribute_dofs (fe); + + // However, then things become + // different. As mentioned in the + // introduction, we want to + // subdivide the matrix into blocks + // corresponding to the two + // different kinds of variables, + // velocity and pressure. To this end, + // we first have to make sure that + // the indices corresponding to + // velocities and pressures are not + // intermingled: First all velocity + // degrees of freedom, then all + // pressure DoFs. This way, the + // global matrix separates nicely + // into a 2x2 system. To achieve + // this, we have to renumber + // degrees of freedom base on their + // vector component, an operation + // that conveniently is already + // implemented: + DoFRenumbering::component_wise (dof_handler); + + // The next thing is that we want + // to figure out the sizes of these + // blocks, so that we can allocate + // an appropriate amount of + // space. To this end, we call the + // DoFTools::count_dofs_per_component + // function that counts how many + // shape functions are non-zero for + // a particular vector + // component. We have dim+1 + // vector components, and we have + // to use the knowledge that for + // Raviart-Thomas elements all + // shape functions are nonzero in + // all components. In other words, + // the number of velocity shape + // functions equals the number of + // overall shape functions that are + // nonzero in the zeroth vector + // component. On the other hand, + // the number of pressure variables + // equals the number of shape + // functions that are nonzero in + // the dim-th component. Let us + // compute these numbers and then + // create some nice output with + // that: + std::vector dofs_per_component (dim+1); + DoFTools::count_dofs_per_component (dof_handler, dofs_per_component); + const unsigned int n_u = dofs_per_component[0], + n_p = dofs_per_component[dim]; + + std::cout << "Number of active cells: " + << triangulation.n_active_cells() + << std::endl + << "Total number of cells: " + << triangulation.n_cells() + << std::endl + << "Number of degrees of freedom: " + << dof_handler.n_dofs() + << " (" << n_u << '+' << n_p << ')' + << std::endl; + + // The next task is to allocate a + // sparsity pattern for the matrix + // that we will create. The way + // this works is that we first + // obtain a guess for the maximal + // number of nonzero entries per + // row (this could be done more + // efficiently in this case, but we + // only want to solve relatively + // small problems for which this is + // not so important). In the second + // step, we allocate a 2x2 block + // pattern and then reinitialize + // each of the blocks to its + // correct size using the n_u + // and n_p variables defined + // above that hold the number of + // velocity and pressure + // variables. In this second step, + // we only operate on the + // individual blocks of the + // system. In the third step, we + // therefore have to instruct the + // overlying block system to update + // its knowledge about the sizes of + // the blocks it manages; this + // happens with the + // sparsity_pattern.collect_sizes() + // call: + const unsigned int + n_couplings = dof_handler.max_couplings_between_dofs(); + + sparsity_pattern.reinit (2,2); + sparsity_pattern.block(0,0).reinit (n_u, n_u, n_couplings); + sparsity_pattern.block(1,0).reinit (n_p, n_u, n_couplings); + sparsity_pattern.block(0,1).reinit (n_u, n_p, n_couplings); + sparsity_pattern.block(1,1).reinit (n_p, n_p, n_couplings); + sparsity_pattern.collect_sizes(); + + // Now that the sparsity pattern + // and its blocks have the correct + // sizes, we actually need to + // construct the content of this + // pattern, and as usual compress + // it, before we also initialize a + // block matrix with this block + // sparsity pattern: + DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); + sparsity_pattern.compress(); + + system_matrix.reinit (sparsity_pattern); + + // Then we have to resize the + // solution and right hand side + // vectors in exactly the same way: + solution.reinit (2); + solution.block(0).reinit (n_u); + solution.block(1).reinit (n_p); + solution.collect_sizes (); + + system_rhs.reinit (2); + system_rhs.block(0).reinit (n_u); + system_rhs.block(1).reinit (n_p); + system_rhs.collect_sizes (); + } - // @sect3{Linear solvers and preconditioners} - - // The linear solvers and - // preconditioners we use in this - // example have been discussed in - // significant detail already in the - // introduction. We will therefore - // not discuss the rationale for - // these classes here any more, but - // rather only comment on - // implementational aspects. - - - // @sect4{The SchurComplement class template} - - // The next class is the Schur - // complement class. Its rationale - // has also been discussed in length - // in the introduction. The only - // things we would like to note is - // that the class, too, is derived - // from the Subscriptor class and - // that as mentioned above it stores - // pointers to the entire block - // matrix and the inverse of the mass - // matrix block using - // SmartPointer objects. - // - // The vmult function requires - // two temporary vectors that we do - // not want to re-allocate and free - // every time we call this - // function. Since here, we have full - // control over the use of these - // vectors (unlike above, where a - // class called by the vmult - // function required these vectors, - // not the vmult function - // itself), we allocate them - // directly, rather than going - // through the VectorMemory - // mechanism. However, again, these - // member variables do not carry any - // state between successive calls to - // the member functions of this class - // (i.e., we never care what values - // they were set to the last time a - // member function was called), we - // mark these vectors as mutable. - // - // The rest of the (short) - // implementation of this class is - // straightforward if you know the - // order of matrix-vector - // multiplications performed by the - // vmult function: -class SchurComplement : public Subscriptor -{ - public: - SchurComplement (const BlockSparseMatrix &A, - const IterativeInverse > &Minv); - void vmult (Vector &dst, - const Vector &src) const; + // @sect4{MixedLaplaceProblem::assemble_system} + // Similarly, the function that + // assembles the linear system has + // mostly been discussed already in + // the introduction to this + // example. At its top, what happens + // are all the usual steps, with the + // addition that we do not only + // allocate quadrature and + // FEValues objects for the cell + // terms, but also for face + // terms. After that, we define the + // usual abbreviations for variables, + // and the allocate space for the + // local matrix and right hand side + // contributions, and the array that + // holds the global numbers of the + // degrees of freedom local to the + // present cell. + template + void MixedLaplaceProblem::assemble_system () + { + QGauss quadrature_formula(degree+2); + QGauss face_quadrature_formula(degree+2); + + FEValues fe_values (fe, quadrature_formula, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); + FEFaceValues fe_face_values (fe, face_quadrature_formula, + update_values | update_normal_vectors | + update_quadrature_points | update_JxW_values); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + const unsigned int n_face_q_points = face_quadrature_formula.size(); + + FullMatrix local_matrix (dofs_per_cell, dofs_per_cell); + Vector local_rhs (dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + // The next step is to declare + // objects that represent the + // source term, pressure boundary + // value, and coefficient in the + // equation. In addition to these + // objects that represent + // continuous functions, we also + // need arrays to hold their values + // at the quadrature points of + // individual cells (or faces, for + // the boundary values). Note that + // in the case of the coefficient, + // the array has to be one of + // matrices. + const RightHandSide right_hand_side; + const PressureBoundaryValues pressure_boundary_values; + const KInverse k_inverse; + + std::vector rhs_values (n_q_points); + std::vector boundary_values (n_face_q_points); + std::vector > k_inverse_values (n_q_points); + + // Finally, we need a couple of extractors + // that we will use to get at the velocity + // and pressure components of vector-valued + // shape functions. Their function and use + // is described in detail in the @ref + // vector_valued report. Essentially, we + // will use them as subscripts on the + // FEValues objects below: the FEValues + // object describes all vector components + // of shape functions, while after + // subscription, it will only refer to the + // velocities (a set of dim + // components starting at component zero) + // or the pressure (a scalar component + // located at position dim): + const FEValuesExtractors::Vector velocities (0); + const FEValuesExtractors::Scalar pressure (dim); + + // With all this in place, we can + // go on with the loop over all + // cells. The body of this loop has + // been discussed in the + // introduction, and will not be + // commented any further here: + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + { + fe_values.reinit (cell); + local_matrix = 0; + local_rhs = 0; + + right_hand_side.value_list (fe_values.get_quadrature_points(), + rhs_values); + k_inverse.value_list (fe_values.get_quadrature_points(), + k_inverse_values); + + for (unsigned int q=0; q phi_i_u = fe_values[velocities].value (i, q); + const double div_phi_i_u = fe_values[velocities].divergence (i, q); + const double phi_i_p = fe_values[pressure].value (i, q); + + for (unsigned int j=0; j phi_j_u = fe_values[velocities].value (j, q); + const double div_phi_j_u = fe_values[velocities].divergence (j, q); + const double phi_j_p = fe_values[pressure].value (j, q); + + local_matrix(i,j) += (phi_i_u * k_inverse_values[q] * phi_j_u + - div_phi_i_u * phi_j_p + - phi_i_p * div_phi_j_u) + * fe_values.JxW(q); + } + + local_rhs(i) += -phi_i_p * + rhs_values[q] * + fe_values.JxW(q); + } + + for (unsigned int face_no=0; + face_no::faces_per_cell; + ++face_no) + if (cell->at_boundary(face_no)) + { + fe_face_values.reinit (cell, face_no); + + pressure_boundary_values + .value_list (fe_face_values.get_quadrature_points(), + boundary_values); + + for (unsigned int q=0; qget_dof_indices (local_dof_indices); + for (unsigned int i=0; i > system_matrix; - const SmartPointer > > m_inverse; - - mutable Vector tmp1, tmp2; -}; + // @sect3{Linear solvers and preconditioners} + + // The linear solvers and + // preconditioners we use in this + // example have been discussed in + // significant detail already in the + // introduction. We will therefore + // not discuss the rationale for + // these classes here any more, but + // rather only comment on + // implementational aspects. + + + // @sect4{The SchurComplement class template} + + // The next class is the Schur + // complement class. Its rationale + // has also been discussed in length + // in the introduction. The only + // things we would like to note is + // that the class, too, is derived + // from the Subscriptor class and + // that as mentioned above it stores + // pointers to the entire block + // matrix and the inverse of the mass + // matrix block using + // SmartPointer objects. + // + // The vmult function requires + // two temporary vectors that we do + // not want to re-allocate and free + // every time we call this + // function. Since here, we have full + // control over the use of these + // vectors (unlike above, where a + // class called by the vmult + // function required these vectors, + // not the vmult function + // itself), we allocate them + // directly, rather than going + // through the VectorMemory + // mechanism. However, again, these + // member variables do not carry any + // state between successive calls to + // the member functions of this class + // (i.e., we never care what values + // they were set to the last time a + // member function was called), we + // mark these vectors as mutable. + // + // The rest of the (short) + // implementation of this class is + // straightforward if you know the + // order of matrix-vector + // multiplications performed by the + // vmult function: + class SchurComplement : public Subscriptor + { + public: + SchurComplement (const BlockSparseMatrix &A, + const IterativeInverse > &Minv); -SchurComplement::SchurComplement (const BlockSparseMatrix &A, - const IterativeInverse > &Minv) - : - system_matrix (&A), - m_inverse (&Minv), - tmp1 (A.block(0,0).m()), - tmp2 (A.block(0,0).m()) -{} + void vmult (Vector &dst, + const Vector &src) const; + private: + const SmartPointer > system_matrix; + const SmartPointer > > m_inverse; -void SchurComplement::vmult (Vector &dst, - const Vector &src) const -{ - system_matrix->block(0,1).vmult (tmp1, src); - m_inverse->vmult (tmp2, tmp1); - system_matrix->block(1,0).vmult (dst, tmp2); -} + mutable Vector tmp1, tmp2; + }; - // @sect4{The ApproximateSchurComplement class template} - - // The third component of our solver - // and preconditioner system is the - // class that approximates the Schur - // complement so we can form a - // an InverseIterate - // object that approximates the - // inverse of the Schur - // complement. It follows the same - // pattern as the Schur complement - // class, with the only exception - // that we do not multiply with the - // inverse mass matrix in vmult, - // but rather just do a single Jacobi - // step. Consequently, the class also - // does not have to store a pointer - // to an inverse mass matrix object. - // - // Since InverseIterate follows the - // standard convention for matrices, - // we need to provide a - // Tvmult function here as - // well. -class ApproximateSchurComplement : public Subscriptor -{ - public: - ApproximateSchurComplement (const BlockSparseMatrix &A); + SchurComplement::SchurComplement (const BlockSparseMatrix &A, + const IterativeInverse > &Minv) + : + system_matrix (&A), + m_inverse (&Minv), + tmp1 (A.block(0,0).m()), + tmp2 (A.block(0,0).m()) + {} - void vmult (Vector &dst, - const Vector &src) const; - void Tvmult (Vector &dst, - const Vector &src) const; - private: - const SmartPointer > system_matrix; - - mutable Vector tmp1, tmp2; -}; + void SchurComplement::vmult (Vector &dst, + const Vector &src) const + { + system_matrix->block(0,1).vmult (tmp1, src); + m_inverse->vmult (tmp2, tmp1); + system_matrix->block(1,0).vmult (dst, tmp2); + } -ApproximateSchurComplement::ApproximateSchurComplement (const BlockSparseMatrix &A) - : - system_matrix (&A), - tmp1 (A.block(0,0).m()), - tmp2 (A.block(0,0).m()) -{} + // @sect4{The ApproximateSchurComplement class template} + + // The third component of our solver + // and preconditioner system is the + // class that approximates the Schur + // complement so we can form a + // an InverseIterate + // object that approximates the + // inverse of the Schur + // complement. It follows the same + // pattern as the Schur complement + // class, with the only exception + // that we do not multiply with the + // inverse mass matrix in vmult, + // but rather just do a single Jacobi + // step. Consequently, the class also + // does not have to store a pointer + // to an inverse mass matrix object. + // + // Since InverseIterate follows the + // standard convention for matrices, + // we need to provide a + // Tvmult function here as + // well. + class ApproximateSchurComplement : public Subscriptor + { + public: + ApproximateSchurComplement (const BlockSparseMatrix &A); + void vmult (Vector &dst, + const Vector &src) const; + void Tvmult (Vector &dst, + const Vector &src) const; -void ApproximateSchurComplement::vmult (Vector &dst, - const Vector &src) const -{ - system_matrix->block(0,1).vmult (tmp1, src); - system_matrix->block(0,0).precondition_Jacobi (tmp2, tmp1); - system_matrix->block(1,0).vmult (dst, tmp2); -} + private: + const SmartPointer > system_matrix; + mutable Vector tmp1, tmp2; + }; -void ApproximateSchurComplement::Tvmult (Vector &dst, - const Vector &src) const -{ - system_matrix->block(1,0).Tvmult (dst, tmp2); - system_matrix->block(0,0).precondition_Jacobi (tmp2, tmp1); - system_matrix->block(0,1).Tvmult (tmp1, src); -} + ApproximateSchurComplement::ApproximateSchurComplement (const BlockSparseMatrix &A) + : + system_matrix (&A), + tmp1 (A.block(0,0).m()), + tmp2 (A.block(0,0).m()) + {} - // @sect4{MixedLaplace::solve} - - // After all these preparations, we - // can finally write the function - // that actually solves the linear - // problem. We will go through the - // two parts it has that each solve - // one of the two equations, the - // first one for the pressure - // (component 1 of the solution), - // then the velocities (component 0 - // of the solution). Both parts need - // an object representing the inverse - // mass matrix and an auxiliary - // vector, and we therefore declare - // these objects at the beginning of - // this function. -template -void MixedLaplaceProblem::solve () -{ - PreconditionIdentity identity; - IterativeInverse > - m_inverse; - m_inverse.initialize(system_matrix.block(0,0), identity); - m_inverse.solver.select("cg"); - static ReductionControl inner_control(1000, 0., 1.e-13); - m_inverse.solver.set_control(inner_control); - - Vector tmp (solution.block(0).size()); - - // Now on to the first - // equation. The right hand side of - // it is BM^{-1}F-G, which is what - // we compute in the first few - // lines. We then declare the - // objects representing the Schur - // complement, its approximation, - // and the inverse of the - // approximation. Finally, we - // declare a solver object and hand - // off all these matrices and - // vectors to it to compute block 1 - // (the pressure) of the solution: + void ApproximateSchurComplement::vmult (Vector &dst, + const Vector &src) const { - Vector schur_rhs (solution.block(1).size()); - - m_inverse.vmult (tmp, system_rhs.block(0)); - system_matrix.block(1,0).vmult (schur_rhs, tmp); - schur_rhs -= system_rhs.block(1); - - - SchurComplement - schur_complement (system_matrix, m_inverse); - - ApproximateSchurComplement - approximate_schur_complement (system_matrix); - - IterativeInverse > - preconditioner; - preconditioner.initialize(approximate_schur_complement, identity); - preconditioner.solver.select("cg"); - preconditioner.solver.set_control(inner_control); - - - SolverControl solver_control (solution.block(1).size(), - 1e-12*schur_rhs.l2_norm()); - SolverCG<> cg (solver_control); - - cg.solve (schur_complement, solution.block(1), schur_rhs, - preconditioner); - - std::cout << solver_control.last_step() - << " CG Schur complement iterations to obtain convergence." - << std::endl; + system_matrix->block(0,1).vmult (tmp1, src); + system_matrix->block(0,0).precondition_Jacobi (tmp2, tmp1); + system_matrix->block(1,0).vmult (dst, tmp2); } - // After we have the pressure, we - // can compute the velocity. The - // equation reads MU=-B^TP+F, and - // we solve it by first computing - // the right hand side, and then - // multiplying it with the object - // that represents the inverse of - // the mass matrix: + + void ApproximateSchurComplement::Tvmult (Vector &dst, + const Vector &src) const { - system_matrix.block(0,1).vmult (tmp, solution.block(1)); - tmp *= -1; - tmp += system_rhs.block(0); - - m_inverse.vmult (solution.block(0), tmp); + system_matrix->block(1,0).Tvmult (dst, tmp2); + system_matrix->block(0,0).precondition_Jacobi (tmp2, tmp1); + system_matrix->block(0,1).Tvmult (tmp1, src); } -} - // @sect3{MixedLaplaceProblem class implementation (continued)} - - // @sect4{MixedLaplace::compute_errors} - - // After we have dealt with the - // linear solver and preconditioners, - // we continue with the - // implementation of our main - // class. In particular, the next - // task is to compute the errors in - // our numerical solution, in both - // the pressures as well as - // velocities. - // - // To compute errors in the solution, - // we have already introduced the - // VectorTools::integrate_difference - // function in step-7 and - // step-11. However, there we only - // dealt with scalar solutions, - // whereas here we have a - // vector-valued solution with - // components that even denote - // different quantities and may have - // different orders of convergence - // (this isn't the case here, by - // choice of the used finite - // elements, but is frequently the - // case in mixed finite element - // applications). What we therefore - // have to do is to `mask' the - // components that we are interested - // in. This is easily done: the - // VectorTools::integrate_difference - // function takes as its last - // argument a pointer to a weight - // function (the parameter defaults - // to the null pointer, meaning unit - // weights). What we simply have to - // do is to pass a function object - // that equals one in the components - // we are interested in, and zero in - // the other ones. For example, to - // compute the pressure error, we - // should pass a function that - // represents the constant vector - // with a unit value in component - // dim, whereas for the velocity - // the constant vector should be one - // in the first dim components, - // and zero in the location of the - // pressure. - // - // In deal.II, the - // ComponentSelectFunction does - // exactly this: it wants to know how - // many vector components the - // function it is to represent should - // have (in our case this would be - // dim+1, for the joint - // velocity-pressure space) and which - // individual or range of components - // should be equal to one. We - // therefore define two such masks at - // the beginning of the function, - // following by an object - // representing the exact solution - // and a vector in which we will - // store the cellwise errors as - // computed by - // integrate_difference: -template -void MixedLaplaceProblem::compute_errors () const -{ - const ComponentSelectFunction - pressure_mask (dim, dim+1); - const ComponentSelectFunction - velocity_mask(std::make_pair(0, dim), dim+1); - - ExactSolution exact_solution; - Vector cellwise_errors (triangulation.n_active_cells()); - - // As already discussed in step-7, - // we have to realize that it is - // impossible to integrate the - // errors exactly. All we can do is - // approximate this integral using - // quadrature. This actually - // presents a slight twist here: if - // we naively chose an object of - // type QGauss@(degree+1) - // as one may be inclined to do - // (this is what we used for - // integrating the linear system), - // one realizes that the error is - // very small and does not follow - // the expected convergence curves - // at all. What is happening is - // that for the mixed finite - // elements used here, the Gauss - // points happen to be - // superconvergence points in which - // the pointwise error is much - // smaller (and converges with - // higher order) than anywhere - // else. These are therefore not - // particularly good points for - // ingration. To avoid this - // problem, we simply use a - // trapezoidal rule and iterate it - // degree+2 times in each - // coordinate direction (again as - // explained in step-7): - QTrapez<1> q_trapez; - QIterated quadrature (q_trapez, degree+2); - - // With this, we can then let the - // library compute the errors and - // output them to the screen: - VectorTools::integrate_difference (dof_handler, solution, exact_solution, - cellwise_errors, quadrature, - VectorTools::L2_norm, - &pressure_mask); - const double p_l2_error = cellwise_errors.l2_norm(); - - VectorTools::integrate_difference (dof_handler, solution, exact_solution, - cellwise_errors, quadrature, - VectorTools::L2_norm, - &velocity_mask); - const double u_l2_error = cellwise_errors.l2_norm(); - - std::cout << "Errors: ||e_p||_L2 = " << p_l2_error - << ", ||e_u||_L2 = " << u_l2_error - << std::endl; -} + // @sect4{MixedLaplace::solve} + + // After all these preparations, we + // can finally write the function + // that actually solves the linear + // problem. We will go through the + // two parts it has that each solve + // one of the two equations, the + // first one for the pressure + // (component 1 of the solution), + // then the velocities (component 0 + // of the solution). Both parts need + // an object representing the inverse + // mass matrix and an auxiliary + // vector, and we therefore declare + // these objects at the beginning of + // this function. + template + void MixedLaplaceProblem::solve () + { + PreconditionIdentity identity; + IterativeInverse > + m_inverse; + m_inverse.initialize(system_matrix.block(0,0), identity); + m_inverse.solver.select("cg"); + static ReductionControl inner_control(1000, 0., 1.e-13); + m_inverse.solver.set_control(inner_control); + + Vector tmp (solution.block(0).size()); + + // Now on to the first + // equation. The right hand side of + // it is BM^{-1}F-G, which is what + // we compute in the first few + // lines. We then declare the + // objects representing the Schur + // complement, its approximation, + // and the inverse of the + // approximation. Finally, we + // declare a solver object and hand + // off all these matrices and + // vectors to it to compute block 1 + // (the pressure) of the solution: + { + Vector schur_rhs (solution.block(1).size()); - // @sect4{MixedLaplace::output_results} - - // The last interesting function is - // the one in which we generate - // graphical output. Everything here - // looks obvious and familiar. Note - // how we construct unique names for - // all the solution variables at the - // beginning, like we did in step-8 - // and other programs later on. The - // only thing worth mentioning is - // that for higher order elements, in - // seems inappropriate to only show a - // single bilinear quadrilateral per - // cell in the graphical output. We - // therefore generate patches of size - // (degree+1)x(degree+1) to capture - // the full information content of - // the solution. See the step-7 - // tutorial program for more - // information on this. - // - // Note that we output the dim+1 - // components of the solution vector as a - // collection of individual scalars - // here. Most visualization programs will - // then only offer to visualize them - // individually, rather than allowing us to - // plot the flow field as a vector - // field. However, as explained in the - // corresponding function of step-22 or the - // @ref VVOutput "Generating graphical output" - // section of the @ref vector_valued module, - // instructing the DataOut class to identify - // components of the FESystem object as - // elements of a dim-dimensional - // vector is not actually very difficult and - // will then allow us to show results as - // vector plots. We skip this here for - // simplicity and refer to the links above - // for more information. -template -void MixedLaplaceProblem::output_results () const -{ - std::vector solution_names; - switch (dim) + m_inverse.vmult (tmp, system_rhs.block(0)); + system_matrix.block(1,0).vmult (schur_rhs, tmp); + schur_rhs -= system_rhs.block(1); + + + SchurComplement + schur_complement (system_matrix, m_inverse); + + ApproximateSchurComplement + approximate_schur_complement (system_matrix); + + IterativeInverse > + preconditioner; + preconditioner.initialize(approximate_schur_complement, identity); + preconditioner.solver.select("cg"); + preconditioner.solver.set_control(inner_control); + + + SolverControl solver_control (solution.block(1).size(), + 1e-12*schur_rhs.l2_norm()); + SolverCG<> cg (solver_control); + + cg.solve (schur_complement, solution.block(1), schur_rhs, + preconditioner); + + std::cout << solver_control.last_step() + << " CG Schur complement iterations to obtain convergence." + << std::endl; + } + + // After we have the pressure, we + // can compute the velocity. The + // equation reads MU=-B^TP+F, and + // we solve it by first computing + // the right hand side, and then + // multiplying it with the object + // that represents the inverse of + // the mass matrix: { - case 2: - solution_names.push_back ("u"); - solution_names.push_back ("v"); - solution_names.push_back ("p"); - break; - - case 3: - solution_names.push_back ("u"); - solution_names.push_back ("v"); - solution_names.push_back ("w"); - solution_names.push_back ("p"); - break; - - default: - Assert (false, ExcNotImplemented()); + system_matrix.block(0,1).vmult (tmp, solution.block(1)); + tmp *= -1; + tmp += system_rhs.block(0); + + m_inverse.vmult (solution.block(0), tmp); } - - - DataOut data_out; + } + + + // @sect3{MixedLaplaceProblem class implementation (continued)} + + // @sect4{MixedLaplace::compute_errors} + + // After we have dealt with the + // linear solver and preconditioners, + // we continue with the + // implementation of our main + // class. In particular, the next + // task is to compute the errors in + // our numerical solution, in both + // the pressures as well as + // velocities. + // + // To compute errors in the solution, + // we have already introduced the + // VectorTools::integrate_difference + // function in step-7 and + // step-11. However, there we only + // dealt with scalar solutions, + // whereas here we have a + // vector-valued solution with + // components that even denote + // different quantities and may have + // different orders of convergence + // (this isn't the case here, by + // choice of the used finite + // elements, but is frequently the + // case in mixed finite element + // applications). What we therefore + // have to do is to `mask' the + // components that we are interested + // in. This is easily done: the + // VectorTools::integrate_difference + // function takes as its last + // argument a pointer to a weight + // function (the parameter defaults + // to the null pointer, meaning unit + // weights). What we simply have to + // do is to pass a function object + // that equals one in the components + // we are interested in, and zero in + // the other ones. For example, to + // compute the pressure error, we + // should pass a function that + // represents the constant vector + // with a unit value in component + // dim, whereas for the velocity + // the constant vector should be one + // in the first dim components, + // and zero in the location of the + // pressure. + // + // In deal.II, the + // ComponentSelectFunction does + // exactly this: it wants to know how + // many vector components the + // function it is to represent should + // have (in our case this would be + // dim+1, for the joint + // velocity-pressure space) and which + // individual or range of components + // should be equal to one. We + // therefore define two such masks at + // the beginning of the function, + // following by an object + // representing the exact solution + // and a vector in which we will + // store the cellwise errors as + // computed by + // integrate_difference: + template + void MixedLaplaceProblem::compute_errors () const + { + const ComponentSelectFunction + pressure_mask (dim, dim+1); + const ComponentSelectFunction + velocity_mask(std::make_pair(0, dim), dim+1); + + ExactSolution exact_solution; + Vector cellwise_errors (triangulation.n_active_cells()); + + // As already discussed in step-7, + // we have to realize that it is + // impossible to integrate the + // errors exactly. All we can do is + // approximate this integral using + // quadrature. This actually + // presents a slight twist here: if + // we naively chose an object of + // type QGauss@(degree+1) + // as one may be inclined to do + // (this is what we used for + // integrating the linear system), + // one realizes that the error is + // very small and does not follow + // the expected convergence curves + // at all. What is happening is + // that for the mixed finite + // elements used here, the Gauss + // points happen to be + // superconvergence points in which + // the pointwise error is much + // smaller (and converges with + // higher order) than anywhere + // else. These are therefore not + // particularly good points for + // ingration. To avoid this + // problem, we simply use a + // trapezoidal rule and iterate it + // degree+2 times in each + // coordinate direction (again as + // explained in step-7): + QTrapez<1> q_trapez; + QIterated quadrature (q_trapez, degree+2); + + // With this, we can then let the + // library compute the errors and + // output them to the screen: + VectorTools::integrate_difference (dof_handler, solution, exact_solution, + cellwise_errors, quadrature, + VectorTools::L2_norm, + &pressure_mask); + const double p_l2_error = cellwise_errors.l2_norm(); + + VectorTools::integrate_difference (dof_handler, solution, exact_solution, + cellwise_errors, quadrature, + VectorTools::L2_norm, + &velocity_mask); + const double u_l2_error = cellwise_errors.l2_norm(); + + std::cout << "Errors: ||e_p||_L2 = " << p_l2_error + << ", ||e_u||_L2 = " << u_l2_error + << std::endl; + } + - data_out.attach_dof_handler (dof_handler); - data_out.add_data_vector (solution, solution_names); + // @sect4{MixedLaplace::output_results} + + // The last interesting function is + // the one in which we generate + // graphical output. Everything here + // looks obvious and familiar. Note + // how we construct unique names for + // all the solution variables at the + // beginning, like we did in step-8 + // and other programs later on. The + // only thing worth mentioning is + // that for higher order elements, in + // seems inappropriate to only show a + // single bilinear quadrilateral per + // cell in the graphical output. We + // therefore generate patches of size + // (degree+1)x(degree+1) to capture + // the full information content of + // the solution. See the step-7 + // tutorial program for more + // information on this. + // + // Note that we output the dim+1 + // components of the solution vector as a + // collection of individual scalars + // here. Most visualization programs will + // then only offer to visualize them + // individually, rather than allowing us to + // plot the flow field as a vector + // field. However, as explained in the + // corresponding function of step-22 or the + // @ref VVOutput "Generating graphical output" + // section of the @ref vector_valued module, + // instructing the DataOut class to identify + // components of the FESystem object as + // elements of a dim-dimensional + // vector is not actually very difficult and + // will then allow us to show results as + // vector plots. We skip this here for + // simplicity and refer to the links above + // for more information. + template + void MixedLaplaceProblem::output_results () const + { + std::vector solution_names; + switch (dim) + { + case 2: + solution_names.push_back ("u"); + solution_names.push_back ("v"); + solution_names.push_back ("p"); + break; - data_out.build_patches (degree+1); + case 3: + solution_names.push_back ("u"); + solution_names.push_back ("v"); + solution_names.push_back ("w"); + solution_names.push_back ("p"); + break; - std::ofstream output ("solution.gmv"); - data_out.write_gmv (output); -} + default: + Assert (false, ExcNotImplemented()); + } + DataOut data_out; - // @sect4{MixedLaplace::run} + data_out.attach_dof_handler (dof_handler); + data_out.add_data_vector (solution, solution_names); - // This is the final function of our - // main class. It's only job is to - // call the other functions in their - // natural order: -template -void MixedLaplaceProblem::run () -{ - make_grid_and_dofs(); - assemble_system (); - solve (); - compute_errors (); - output_results (); + data_out.build_patches (degree+1); + + std::ofstream output ("solution.gmv"); + data_out.write_gmv (output); + } + + + + // @sect4{MixedLaplace::run} + + // This is the final function of our + // main class. It's only job is to + // call the other functions in their + // natural order: + template + void MixedLaplaceProblem::run () + { + make_grid_and_dofs(); + assemble_system (); + solve (); + compute_errors (); + output_results (); + } } - + // @sect3{The main function} // The main function we stole from @@ -1212,10 +1215,13 @@ void MixedLaplaceProblem::run () // to the constructor of the mixed // laplace problem (here, we use // zero-th order elements). -int main () +int main () { try { + using namespace dealii; + using namespace Step20; + deallog.depth_console (0); MixedLaplaceProblem<2> mixed_laplace_problem(0); @@ -1231,10 +1237,10 @@ int main () << "Aborting!" << std::endl << "----------------------------------------------------" << std::endl; - + return 1; } - catch (...) + catch (...) { std::cerr << std::endl << std::endl << "----------------------------------------------------" diff --git a/deal.II/examples/step-21/step-21.cc b/deal.II/examples/step-21/step-21.cc index ac0357018b..87fdcab89a 100644 --- a/deal.II/examples/step-21/step-21.cc +++ b/deal.II/examples/step-21/step-21.cc @@ -66,1342 +66,1345 @@ // The last step is as in all // previous programs: -using namespace dealii; - - - // @sect3{The TwoPhaseFlowProblem class} - - // This is the main class of the program. It - // is close to the one of step-20, but with a - // few additional functions: - // - //
    - //
  • assemble_rhs_S assembles the - // right hand side of the saturation - // equation. As explained in the - // introduction, this can't be integrated - // into assemble_rhs since it depends - // on the velocity that is computed in the - // first part of the time step. - // - //
  • get_maximal_velocity does as its - // name suggests. This function is used in - // the computation of the time step size. - // - //
  • project_back_saturation resets - // all saturation degrees of freedom with - // values less than zero to zero, and all - // those with saturations greater than one - // to one. - //
- // - // The rest of the class should be pretty - // much obvious. The viscosity variable - // stores the viscosity $\mu$ that enters - // several of the formulas in the nonlinear - // equations. -template -class TwoPhaseFlowProblem +namespace Step21 { - public: - TwoPhaseFlowProblem (const unsigned int degree); - void run (); - - private: - void make_grid_and_dofs (); - void assemble_system (); - void assemble_rhs_S (); - double get_maximal_velocity () const; - void solve (); - void project_back_saturation (); - void output_results () const; - - const unsigned int degree; - - Triangulation triangulation; - FESystem fe; - DoFHandler dof_handler; - - BlockSparsityPattern sparsity_pattern; - BlockSparseMatrix system_matrix; - - const unsigned int n_refinement_steps; - - double time_step; - unsigned int timestep_number; - double viscosity; - - BlockVector solution; - BlockVector old_solution; - BlockVector system_rhs; -}; - - - // @sect3{Equation data} - - // @sect4{Pressure right hand side} - // At present, the right hand side of the - // pressure equation is simply the zero - // function. However, the rest of the program - // is fully equipped to deal with anything - // else, if this is desired: -template -class PressureRightHandSide : public Function -{ - public: - PressureRightHandSide () : Function(1) {} + using namespace dealii; - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; + // @sect3{The TwoPhaseFlowProblem class} + // This is the main class of the program. It + // is close to the one of step-20, but with a + // few additional functions: + // + //
    + //
  • assemble_rhs_S assembles the + // right hand side of the saturation + // equation. As explained in the + // introduction, this can't be integrated + // into assemble_rhs since it depends + // on the velocity that is computed in the + // first part of the time step. + // + //
  • get_maximal_velocity does as its + // name suggests. This function is used in + // the computation of the time step size. + // + //
  • project_back_saturation resets + // all saturation degrees of freedom with + // values less than zero to zero, and all + // those with saturations greater than one + // to one. + //
+ // + // The rest of the class should be pretty + // much obvious. The viscosity variable + // stores the viscosity $\mu$ that enters + // several of the formulas in the nonlinear + // equations. + template + class TwoPhaseFlowProblem + { + public: + TwoPhaseFlowProblem (const unsigned int degree); + void run (); -template -double -PressureRightHandSide::value (const Point &/*p*/, - const unsigned int /*component*/) const -{ - return 0; -} + private: + void make_grid_and_dofs (); + void assemble_system (); + void assemble_rhs_S (); + double get_maximal_velocity () const; + void solve (); + void project_back_saturation (); + void output_results () const; + const unsigned int degree; - // @sect4{Pressure boundary values} - // The next are pressure boundary values. As - // mentioned in the introduction, we choose a - // linear pressure field: -template -class PressureBoundaryValues : public Function -{ - public: - PressureBoundaryValues () : Function(1) {} + Triangulation triangulation; + FESystem fe; + DoFHandler dof_handler; - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; + BlockSparsityPattern sparsity_pattern; + BlockSparseMatrix system_matrix; + const unsigned int n_refinement_steps; -template -double -PressureBoundaryValues::value (const Point &p, - const unsigned int /*component*/) const -{ - return 1-p[0]; -} + double time_step; + unsigned int timestep_number; + double viscosity; + BlockVector solution; + BlockVector old_solution; + BlockVector system_rhs; + }; - // @sect4{Saturation boundary values} - // Then we also need boundary values on the - // inflow portions of the boundary. The - // question whether something is an inflow - // part is decided when assembling the right - // hand side, we only have to provide a - // functional description of the boundary - // values. This is as explained in the - // introduction: -template -class SaturationBoundaryValues : public Function -{ - public: - SaturationBoundaryValues () : Function(1) {} + // @sect3{Equation data} - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; + // @sect4{Pressure right hand side} + // At present, the right hand side of the + // pressure equation is simply the zero + // function. However, the rest of the program + // is fully equipped to deal with anything + // else, if this is desired: + template + class PressureRightHandSide : public Function + { + public: + PressureRightHandSide () : Function(1) {} + virtual double value (const Point &p, + const unsigned int component = 0) const; + }; -template -double -SaturationBoundaryValues::value (const Point &p, - const unsigned int /*component*/) const -{ - if (p[0] == 0) - return 1; - else - return 0; -} + template + double + PressureRightHandSide::value (const Point &/*p*/, + const unsigned int /*component*/) const + { + return 0; + } - // @sect4{Initial data} - - // Finally, we need initial data. In reality, - // we only need initial data for the - // saturation, but we are lazy, so we will - // later, before the first time step, simply - // interpolate the entire solution for the - // previous time step from a function that - // contains all vector components. - // - // We therefore simply create a function that - // returns zero in all components. We do that - // by simply forward every function to the - // ZeroFunction class. Why not use that right - // away in the places of this program where - // we presently use the InitialValues - // class? Because this way it is simpler to - // later go back and choose a different - // function for initial values. -template -class InitialValues : public Function -{ - public: - InitialValues () : Function(dim+2) {} + // @sect4{Pressure boundary values} + // The next are pressure boundary values. As + // mentioned in the introduction, we choose a + // linear pressure field: + template + class PressureBoundaryValues : public Function + { + public: + PressureBoundaryValues () : Function(1) {} - virtual double value (const Point &p, - const unsigned int component = 0) const; + virtual double value (const Point &p, + const unsigned int component = 0) const; + }; - virtual void vector_value (const Point &p, - Vector &value) const; -}; + template + double + PressureBoundaryValues::value (const Point &p, + const unsigned int /*component*/) const + { + return 1-p[0]; + } -template -double -InitialValues::value (const Point &p, - const unsigned int component) const -{ - return ZeroFunction(dim+2).value (p, component); -} + // @sect4{Saturation boundary values} + // Then we also need boundary values on the + // inflow portions of the boundary. The + // question whether something is an inflow + // part is decided when assembling the right + // hand side, we only have to provide a + // functional description of the boundary + // values. This is as explained in the + // introduction: + template + class SaturationBoundaryValues : public Function + { + public: + SaturationBoundaryValues () : Function(1) {} -template -void -InitialValues::vector_value (const Point &p, - Vector &values) const -{ - ZeroFunction(dim+2).vector_value (p, values); -} + virtual double value (const Point &p, + const unsigned int component = 0) const; + }; + template + double + SaturationBoundaryValues::value (const Point &p, + const unsigned int /*component*/) const + { + if (p[0] == 0) + return 1; + else + return 0; + } - // @sect3{The inverse permeability tensor} - // As announced in the introduction, we - // implement two different permeability - // tensor fields. Each of them we put into a - // namespace of its own, so that it will be - // easy later to replace use of one by the - // other in the code. - // @sect4{Single curving crack permeability} + // @sect4{Initial data} - // The first function for the - // permeability was the one that - // models a single curving crack. It - // was already used at the end of - // step-20, and its functional form - // is given in the introduction of - // the present tutorial program. As - // in some previous programs, we have - // to declare a (seemingly - // unnecessary) default constructor - // of the KInverse class to avoid - // warnings from some compilers: -namespace SingleCurvingCrack -{ + // Finally, we need initial data. In reality, + // we only need initial data for the + // saturation, but we are lazy, so we will + // later, before the first time step, simply + // interpolate the entire solution for the + // previous time step from a function that + // contains all vector components. + // + // We therefore simply create a function that + // returns zero in all components. We do that + // by simply forward every function to the + // ZeroFunction class. Why not use that right + // away in the places of this program where + // we presently use the InitialValues + // class? Because this way it is simpler to + // later go back and choose a different + // function for initial values. template - class KInverse : public TensorFunction<2,dim> + class InitialValues : public Function { public: - KInverse () - : - TensorFunction<2,dim> () - {} + InitialValues () : Function(dim+2) {} + + virtual double value (const Point &p, + const unsigned int component = 0) const; + + virtual void vector_value (const Point &p, + Vector &value) const; - virtual void value_list (const std::vector > &points, - std::vector > &values) const; }; + template + double + InitialValues::value (const Point &p, + const unsigned int component) const + { + return ZeroFunction(dim+2).value (p, component); + } + + template void - KInverse::value_list (const std::vector > &points, - std::vector > &values) const + InitialValues::vector_value (const Point &p, + Vector &values) const { - Assert (points.size() == values.size(), - ExcDimensionMismatch (points.size(), values.size())); + ZeroFunction(dim+2).vector_value (p, values); + } - for (unsigned int p=0; pget_centers that computes the list of - // center points when called. - // - // Note that this class works just fine in - // both 2d and 3d, with the only difference - // being that we use more points in 3d: by - // experimenting we find that we need more - // exponentials in 3d than in 2d (we have - // more ground to cover, after all, if we - // want to keep the distance between centers - // roughly equal), so we choose 40 in 2d and - // 100 in 3d. For any other dimension, the - // function does presently not know what to - // do so simply throws an exception - // indicating exactly this. -namespace RandomMedium -{ - template - class KInverse : public TensorFunction<2,dim> - { - public: - KInverse () - : - TensorFunction<2,dim> () - {} + // @sect4{Single curving crack permeability} - virtual void value_list (const std::vector > &points, - std::vector > &values) const; + // The first function for the + // permeability was the one that + // models a single curving crack. It + // was already used at the end of + // step-20, and its functional form + // is given in the introduction of + // the present tutorial program. As + // in some previous programs, we have + // to declare a (seemingly + // unnecessary) default constructor + // of the KInverse class to avoid + // warnings from some compilers: + namespace SingleCurvingCrack + { + template + class KInverse : public TensorFunction<2,dim> + { + public: + KInverse () + : + TensorFunction<2,dim> () + {} - private: - static std::vector > centers; + virtual void value_list (const std::vector > &points, + std::vector > &values) const; + }; - static std::vector > get_centers (); - }; + template + void + KInverse::value_list (const std::vector > &points, + std::vector > &values) const + { + Assert (points.size() == values.size(), + ExcDimensionMismatch (points.size(), values.size())); + for (unsigned int p=0; p - std::vector > - KInverse::centers = KInverse::get_centers(); + const double distance_to_flowline + = std::fabs(points[p][1]-0.5-0.1*std::sin(10*points[p][0])); + const double permeability = std::max(std::exp(-(distance_to_flowline* + distance_to_flowline) + / (0.1 * 0.1)), + 0.01); - template - std::vector > - KInverse::get_centers () - { - const unsigned int N = (dim == 2 ? - 40 : - (dim == 3 ? - 100 : - throw ExcNotImplemented())); - - std::vector > centers_list (N); - for (unsigned int i=0; i(rand())/RAND_MAX; - - return centers_list; + for (unsigned int d=0; d - void - KInverse::value_list (const std::vector > &points, - std::vector > &values) const + // @sect4{Random medium permeability} + + // This function does as announced in the + // introduction, i.e. it creates an overlay + // of exponentials at random places. There is + // one thing worth considering for this + // class. The issue centers around the + // problem that the class creates the centers + // of the exponentials using a random + // function. If we therefore created the + // centers each time we create an object of + // the present type, we would get a different + // list of centers each time. That's not what + // we expect from classes of this type: they + // should reliably represent the same + // function. + // + // The solution to this problem is to make + // the list of centers a static member + // variable of this class, i.e. there exists + // exactly one such variable for the entire + // program, rather than for each object of + // this type. That's exactly what we are + // going to do. + // + // The next problem, however, is that we need + // a way to initialize this variable. Since + // this variable is initialized at the + // beginning of the program, we can't use a + // regular member function for that since + // there may not be an object of this type + // around at the time. The C++ standard + // therefore says that only non-member and + // static member functions can be used to + // initialize a static variable. We use the + // latter possibility by defining a function + // get_centers that computes the list of + // center points when called. + // + // Note that this class works just fine in + // both 2d and 3d, with the only difference + // being that we use more points in 3d: by + // experimenting we find that we need more + // exponentials in 3d than in 2d (we have + // more ground to cover, after all, if we + // want to keep the distance between centers + // roughly equal), so we choose 40 in 2d and + // 100 in 3d. For any other dimension, the + // function does presently not know what to + // do so simply throws an exception + // indicating exactly this. + namespace RandomMedium { - Assert (points.size() == values.size(), - ExcDimensionMismatch (points.size(), values.size())); + template + class KInverse : public TensorFunction<2,dim> + { + public: + KInverse () + : + TensorFunction<2,dim> () + {} - for (unsigned int p=0; p > &points, + std::vector > &values) const; - double permeability = 0; - for (unsigned int i=0; i > centers; - const double normalized_permeability - = std::min (std::max(permeability, 0.01), 4.); + static std::vector > get_centers (); + }; - for (unsigned int d=0; d + std::vector > + KInverse::centers = KInverse::get_centers(); - // @sect3{The inverse mobility and saturation functions} - // There are two more pieces of data that we - // need to describe, namely the inverse - // mobility function and the saturation - // curve. Their form is also given in the - // introduction: -double mobility_inverse (const double S, - const double viscosity) -{ - return 1.0 /(1.0/viscosity * S * S + (1-S) * (1-S)); -} + template + std::vector > + KInverse::get_centers () + { + const unsigned int N = (dim == 2 ? + 40 : + (dim == 3 ? + 100 : + throw ExcNotImplemented())); + + std::vector > centers_list (N); + for (unsigned int i=0; i(rand())/RAND_MAX; + + return centers_list; + } -double f_saturation (const double S, - const double viscosity) -{ - return S*S /( S * S +viscosity * (1-S) * (1-S)); -} + template + void + KInverse::value_list (const std::vector > &points, + std::vector > &values) const + { + Assert (points.size() == values.size(), + ExcDimensionMismatch (points.size(), values.size())); + for (unsigned int p=0; psrc.size() CG - // iterations before the solver in - // the vmult() function - // converges. (This is, of course, a - // result of numerical round-off, - // since we know that on paper, the - // CG method converges in at most - // src.size() steps.) As - // a consequence, we set the maximum - // number of iterations equal to the - // maximum of the size of the linear - // system and 200. -template -class InverseMatrix : public Subscriptor -{ - public: - InverseMatrix (const Matrix &m); + const double normalized_permeability + = std::min (std::max(permeability, 0.01), 4.); - void vmult (Vector &dst, - const Vector &src) const; + for (unsigned int d=0; d matrix; -}; -template -InverseMatrix::InverseMatrix (const Matrix &m) - : - matrix (&m) -{} + // @sect3{The inverse mobility and saturation functions} + // There are two more pieces of data that we + // need to describe, namely the inverse + // mobility function and the saturation + // curve. Their form is also given in the + // introduction: + double mobility_inverse (const double S, + const double viscosity) + { + return 1.0 /(1.0/viscosity * S * S + (1-S) * (1-S)); + } + double f_saturation (const double S, + const double viscosity) + { + return S*S /( S * S +viscosity * (1-S) * (1-S)); + } -template -void InverseMatrix::vmult (Vector &dst, - const Vector &src) const -{ - SolverControl solver_control (std::max(src.size(), 200U), - 1e-8*src.l2_norm()); - SolverCG<> cg (solver_control); - dst = 0; - cg.solve (*matrix, dst, src, PreconditionIdentity()); -} + // @sect3{Linear solvers and preconditioners} + + // The linear solvers we use are also + // completely analogous to the ones + // used in step-20. The following + // classes are therefore copied + // verbatim from there. There is a + // single change: if the size of a + // linear system is small, i.e. when + // the mesh is very coarse, then it + // is sometimes not sufficient to set + // a maximum of + // src.size() CG + // iterations before the solver in + // the vmult() function + // converges. (This is, of course, a + // result of numerical round-off, + // since we know that on paper, the + // CG method converges in at most + // src.size() steps.) As + // a consequence, we set the maximum + // number of iterations equal to the + // maximum of the size of the linear + // system and 200. + template + class InverseMatrix : public Subscriptor + { + public: + InverseMatrix (const Matrix &m); -class SchurComplement : public Subscriptor -{ - public: - SchurComplement (const BlockSparseMatrix &A, - const InverseMatrix > &Minv); + void vmult (Vector &dst, + const Vector &src) const; - void vmult (Vector &dst, - const Vector &src) const; + private: + const SmartPointer matrix; + }; - private: - const SmartPointer > system_matrix; - const SmartPointer > > m_inverse; - mutable Vector tmp1, tmp2; -}; + template + InverseMatrix::InverseMatrix (const Matrix &m) + : + matrix (&m) + {} -SchurComplement:: -SchurComplement (const BlockSparseMatrix &A, - const InverseMatrix > &Minv) - : - system_matrix (&A), - m_inverse (&Minv), - tmp1 (A.block(0,0).m()), - tmp2 (A.block(0,0).m()) -{} + template + void InverseMatrix::vmult (Vector &dst, + const Vector &src) const + { + SolverControl solver_control (std::max(src.size(), 200U), + 1e-8*src.l2_norm()); + SolverCG<> cg (solver_control); + dst = 0; -void SchurComplement::vmult (Vector &dst, - const Vector &src) const -{ - system_matrix->block(0,1).vmult (tmp1, src); - m_inverse->vmult (tmp2, tmp1); - system_matrix->block(1,0).vmult (dst, tmp2); -} + cg.solve (*matrix, dst, src, PreconditionIdentity()); + } -class ApproximateSchurComplement : public Subscriptor -{ - public: - ApproximateSchurComplement (const BlockSparseMatrix &A); + class SchurComplement : public Subscriptor + { + public: + SchurComplement (const BlockSparseMatrix &A, + const InverseMatrix > &Minv); - void vmult (Vector &dst, - const Vector &src) const; + void vmult (Vector &dst, + const Vector &src) const; - private: - const SmartPointer > system_matrix; + private: + const SmartPointer > system_matrix; + const SmartPointer > > m_inverse; - mutable Vector tmp1, tmp2; -}; + mutable Vector tmp1, tmp2; + }; -ApproximateSchurComplement:: -ApproximateSchurComplement (const BlockSparseMatrix &A) - : - system_matrix (&A), - tmp1 (A.block(0,0).m()), - tmp2 (A.block(0,0).m()) -{} + SchurComplement:: + SchurComplement (const BlockSparseMatrix &A, + const InverseMatrix > &Minv) + : + system_matrix (&A), + m_inverse (&Minv), + tmp1 (A.block(0,0).m()), + tmp2 (A.block(0,0).m()) + {} -void ApproximateSchurComplement::vmult (Vector &dst, - const Vector &src) const -{ - system_matrix->block(0,1).vmult (tmp1, src); - system_matrix->block(0,0).precondition_Jacobi (tmp2, tmp1); - system_matrix->block(1,0).vmult (dst, tmp2); -} + void SchurComplement::vmult (Vector &dst, + const Vector &src) const + { + system_matrix->block(0,1).vmult (tmp1, src); + m_inverse->vmult (tmp2, tmp1); + system_matrix->block(1,0).vmult (dst, tmp2); + } + class ApproximateSchurComplement : public Subscriptor + { + public: + ApproximateSchurComplement (const BlockSparseMatrix &A); - // @sect3{TwoPhaseFlowProblem class implementation} + void vmult (Vector &dst, + const Vector &src) const; - // Here now the implementation of the main - // class. Much of it is actually copied from - // step-20, so we won't comment on it in much - // detail. You should try to get familiar - // with that program first, then most of what - // is happening here should be mostly clear. + private: + const SmartPointer > system_matrix; - // @sect4{TwoPhaseFlowProblem::TwoPhaseFlowProblem} - // First for the constructor. We use $RT_k - // \times DQ_k \times DQ_k$ spaces. The time - // step is set to zero initially, but will be - // computed before it is needed first, as - // described in a subsection of the - // introduction. -template -TwoPhaseFlowProblem::TwoPhaseFlowProblem (const unsigned int degree) - : - degree (degree), - fe (FE_RaviartThomas(degree), 1, - FE_DGQ(degree), 1, - FE_DGQ(degree), 1), - dof_handler (triangulation), - n_refinement_steps (5), - time_step (0), - viscosity (0.2) -{} + mutable Vector tmp1, tmp2; + }; + ApproximateSchurComplement:: + ApproximateSchurComplement (const BlockSparseMatrix &A) + : + system_matrix (&A), + tmp1 (A.block(0,0).m()), + tmp2 (A.block(0,0).m()) + {} - // @sect4{TwoPhaseFlowProblem::make_grid_and_dofs} - // This next function starts out with - // well-known functions calls that create and - // refine a mesh, and then associate degrees - // of freedom with it. It does all the same - // things as in step-20, just now for three - // components instead of two. -template -void TwoPhaseFlowProblem::make_grid_and_dofs () -{ - GridGenerator::hyper_cube (triangulation, 0, 1); - triangulation.refine_global (n_refinement_steps); - - dof_handler.distribute_dofs (fe); - DoFRenumbering::component_wise (dof_handler); - - std::vector dofs_per_component (dim+2); - DoFTools::count_dofs_per_component (dof_handler, dofs_per_component); - const unsigned int n_u = dofs_per_component[0], - n_p = dofs_per_component[dim], - n_s = dofs_per_component[dim+1]; - - std::cout << "Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << "Number of degrees of freedom: " - << dof_handler.n_dofs() - << " (" << n_u << '+' << n_p << '+'<< n_s <<')' - << std::endl - << std::endl; - - const unsigned int - n_couplings = dof_handler.max_couplings_between_dofs(); - - sparsity_pattern.reinit (3,3); - sparsity_pattern.block(0,0).reinit (n_u, n_u, n_couplings); - sparsity_pattern.block(1,0).reinit (n_p, n_u, n_couplings); - sparsity_pattern.block(2,0).reinit (n_s, n_u, n_couplings); - sparsity_pattern.block(0,1).reinit (n_u, n_p, n_couplings); - sparsity_pattern.block(1,1).reinit (n_p, n_p, n_couplings); - sparsity_pattern.block(2,1).reinit (n_s, n_p, n_couplings); - sparsity_pattern.block(0,2).reinit (n_u, n_s, n_couplings); - sparsity_pattern.block(1,2).reinit (n_p, n_s, n_couplings); - sparsity_pattern.block(2,2).reinit (n_s, n_s, n_couplings); - - sparsity_pattern.collect_sizes(); - - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - sparsity_pattern.compress(); - - - system_matrix.reinit (sparsity_pattern); - - - solution.reinit (3); - solution.block(0).reinit (n_u); - solution.block(1).reinit (n_p); - solution.block(2).reinit (n_s); - solution.collect_sizes (); - - old_solution.reinit (3); - old_solution.block(0).reinit (n_u); - old_solution.block(1).reinit (n_p); - old_solution.block(2).reinit (n_s); - old_solution.collect_sizes (); - - system_rhs.reinit (3); - system_rhs.block(0).reinit (n_u); - system_rhs.block(1).reinit (n_p); - system_rhs.block(2).reinit (n_s); - system_rhs.collect_sizes (); -} + void ApproximateSchurComplement::vmult (Vector &dst, + const Vector &src) const + { + system_matrix->block(0,1).vmult (tmp1, src); + system_matrix->block(0,0).precondition_Jacobi (tmp2, tmp1); + system_matrix->block(1,0).vmult (dst, tmp2); + } - // @sect4{TwoPhaseFlowProblem::assemble_system} - - // This is the function that assembles the - // linear system, or at least everything - // except the (1,3) block that depends on the - // still-unknown velocity computed during - // this time step (we deal with this in - // assemble_rhs_S). Much of it - // is again as in step-20, but we have to - // deal with some nonlinearity this time. - // However, the top of the function is pretty - // much as usual (note that we set matrix and - // right hand side to zero at the beginning - // — something we didn't have to do for - // stationary problems since there we use - // each matrix object only once and it is - // empty at the beginning anyway). - // - // Note that in its present form, the - // function uses the permeability implemented - // in the RandomMedium::KInverse - // class. Switching to the single curved - // crack permeability function is as simple - // as just changing the namespace name. -template -void TwoPhaseFlowProblem::assemble_system () -{ - system_matrix=0; - system_rhs=0; - QGauss quadrature_formula(degree+2); - QGauss face_quadrature_formula(degree+2); - FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); - FEFaceValues fe_face_values (fe, face_quadrature_formula, - update_values | update_normal_vectors | - update_quadrature_points | update_JxW_values); - const unsigned int dofs_per_cell = fe.dofs_per_cell; + // @sect3{TwoPhaseFlowProblem class implementation} - const unsigned int n_q_points = quadrature_formula.size(); - const unsigned int n_face_q_points = face_quadrature_formula.size(); + // Here now the implementation of the main + // class. Much of it is actually copied from + // step-20, so we won't comment on it in much + // detail. You should try to get familiar + // with that program first, then most of what + // is happening here should be mostly clear. - FullMatrix local_matrix (dofs_per_cell, dofs_per_cell); - Vector local_rhs (dofs_per_cell); + // @sect4{TwoPhaseFlowProblem::TwoPhaseFlowProblem} + // First for the constructor. We use $RT_k + // \times DQ_k \times DQ_k$ spaces. The time + // step is set to zero initially, but will be + // computed before it is needed first, as + // described in a subsection of the + // introduction. + template + TwoPhaseFlowProblem::TwoPhaseFlowProblem (const unsigned int degree) + : + degree (degree), + fe (FE_RaviartThomas(degree), 1, + FE_DGQ(degree), 1, + FE_DGQ(degree), 1), + dof_handler (triangulation), + n_refinement_steps (5), + time_step (0), + viscosity (0.2) + {} + + + + // @sect4{TwoPhaseFlowProblem::make_grid_and_dofs} + + // This next function starts out with + // well-known functions calls that create and + // refine a mesh, and then associate degrees + // of freedom with it. It does all the same + // things as in step-20, just now for three + // components instead of two. + template + void TwoPhaseFlowProblem::make_grid_and_dofs () + { + GridGenerator::hyper_cube (triangulation, 0, 1); + triangulation.refine_global (n_refinement_steps); + + dof_handler.distribute_dofs (fe); + DoFRenumbering::component_wise (dof_handler); + + std::vector dofs_per_component (dim+2); + DoFTools::count_dofs_per_component (dof_handler, dofs_per_component); + const unsigned int n_u = dofs_per_component[0], + n_p = dofs_per_component[dim], + n_s = dofs_per_component[dim+1]; + + std::cout << "Number of active cells: " + << triangulation.n_active_cells() + << std::endl + << "Number of degrees of freedom: " + << dof_handler.n_dofs() + << " (" << n_u << '+' << n_p << '+'<< n_s <<')' + << std::endl + << std::endl; + + const unsigned int + n_couplings = dof_handler.max_couplings_between_dofs(); + + sparsity_pattern.reinit (3,3); + sparsity_pattern.block(0,0).reinit (n_u, n_u, n_couplings); + sparsity_pattern.block(1,0).reinit (n_p, n_u, n_couplings); + sparsity_pattern.block(2,0).reinit (n_s, n_u, n_couplings); + sparsity_pattern.block(0,1).reinit (n_u, n_p, n_couplings); + sparsity_pattern.block(1,1).reinit (n_p, n_p, n_couplings); + sparsity_pattern.block(2,1).reinit (n_s, n_p, n_couplings); + sparsity_pattern.block(0,2).reinit (n_u, n_s, n_couplings); + sparsity_pattern.block(1,2).reinit (n_p, n_s, n_couplings); + sparsity_pattern.block(2,2).reinit (n_s, n_s, n_couplings); + + sparsity_pattern.collect_sizes(); + + DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); + sparsity_pattern.compress(); + + + system_matrix.reinit (sparsity_pattern); + + + solution.reinit (3); + solution.block(0).reinit (n_u); + solution.block(1).reinit (n_p); + solution.block(2).reinit (n_s); + solution.collect_sizes (); + + old_solution.reinit (3); + old_solution.block(0).reinit (n_u); + old_solution.block(1).reinit (n_p); + old_solution.block(2).reinit (n_s); + old_solution.collect_sizes (); + + system_rhs.reinit (3); + system_rhs.block(0).reinit (n_u); + system_rhs.block(1).reinit (n_p); + system_rhs.block(2).reinit (n_s); + system_rhs.collect_sizes (); + } - std::vector local_dof_indices (dofs_per_cell); - const PressureRightHandSide pressure_right_hand_side; - const PressureBoundaryValues pressure_boundary_values; - const RandomMedium::KInverse k_inverse; + // @sect4{TwoPhaseFlowProblem::assemble_system} + + // This is the function that assembles the + // linear system, or at least everything + // except the (1,3) block that depends on the + // still-unknown velocity computed during + // this time step (we deal with this in + // assemble_rhs_S). Much of it + // is again as in step-20, but we have to + // deal with some nonlinearity this time. + // However, the top of the function is pretty + // much as usual (note that we set matrix and + // right hand side to zero at the beginning + // — something we didn't have to do for + // stationary problems since there we use + // each matrix object only once and it is + // empty at the beginning anyway). + // + // Note that in its present form, the + // function uses the permeability implemented + // in the RandomMedium::KInverse + // class. Switching to the single curved + // crack permeability function is as simple + // as just changing the namespace name. + template + void TwoPhaseFlowProblem::assemble_system () + { + system_matrix=0; + system_rhs=0; - std::vector pressure_rhs_values (n_q_points); - std::vector boundary_values (n_face_q_points); - std::vector > k_inverse_values (n_q_points); + QGauss quadrature_formula(degree+2); + QGauss face_quadrature_formula(degree+2); - std::vector > old_solution_values(n_q_points, Vector(dim+2)); - std::vector > > old_solution_grads(n_q_points, - std::vector > (dim+2)); + FEValues fe_values (fe, quadrature_formula, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); + FEFaceValues fe_face_values (fe, face_quadrature_formula, + update_values | update_normal_vectors | + update_quadrature_points | update_JxW_values); - const FEValuesExtractors::Vector velocities (0); - const FEValuesExtractors::Scalar pressure (dim); - const FEValuesExtractors::Scalar saturation (dim+1); + const unsigned int dofs_per_cell = fe.dofs_per_cell; - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - fe_values.reinit (cell); - local_matrix = 0; - local_rhs = 0; - - // Here's the first significant - // difference: We have to get the - // values of the saturation function of - // the previous time step at the - // quadrature points. To this end, we - // can use the - // FEValues::get_function_values - // (previously already used in step-9, - // step-14 and step-15), a function - // that takes a solution vector and - // returns a list of function values at - // the quadrature points of the present - // cell. In fact, it returns the - // complete vector-valued solution at - // each quadrature point, i.e. not only - // the saturation but also the - // velocities and pressure: - fe_values.get_function_values (old_solution, old_solution_values); - - // Then we also have to get the values - // of the pressure right hand side and - // of the inverse permeability tensor - // at the quadrature points: - pressure_right_hand_side.value_list (fe_values.get_quadrature_points(), - pressure_rhs_values); - k_inverse.value_list (fe_values.get_quadrature_points(), - k_inverse_values); - - // With all this, we can now loop over - // all the quadrature points and shape - // functions on this cell and assemble - // those parts of the matrix and right - // hand side that we deal with in this - // function. The individual terms in - // the contributions should be - // self-explanatory given the explicit - // form of the bilinear form stated in - // the introduction: - for (unsigned int q=0; q phi_i_u = fe_values[velocities].value (i, q); - const double div_phi_i_u = fe_values[velocities].divergence (i, q); - const double phi_i_p = fe_values[pressure].value (i, q); - const double phi_i_s = fe_values[saturation].value (i, q); - - for (unsigned int j=0; j phi_j_u = fe_values[velocities].value (j, q); - const double div_phi_j_u = fe_values[velocities].divergence (j, q); - const double phi_j_p = fe_values[pressure].value (j, q); - const double phi_j_s = fe_values[saturation].value (j, q); - - local_matrix(i,j) += (phi_i_u * k_inverse_values[q] * - mobility_inverse(old_s,viscosity) * phi_j_u - - div_phi_i_u * phi_j_p - - phi_i_p * div_phi_j_u - + phi_i_s * phi_j_s) - * fe_values.JxW(q); - } - - local_rhs(i) += (-phi_i_p * pressure_rhs_values[q])* - fe_values.JxW(q); - } - - - // Next, we also have to deal with the - // pressure boundary values. This, - // again is as in step-20: - for (unsigned int face_no=0; - face_no::faces_per_cell; - ++face_no) - if (cell->at_boundary(face_no)) - { - fe_face_values.reinit (cell, face_no); - - pressure_boundary_values - .value_list (fe_face_values.get_quadrature_points(), - boundary_values); - - for (unsigned int q=0; q - phi_i_u = fe_face_values[velocities].value (i, q); - - local_rhs(i) += -(phi_i_u * - fe_face_values.normal_vector(q) * - boundary_values[q] * - fe_face_values.JxW(q)); - } - } - - // The final step in the loop - // over all cells is to - // transfer local contributions - // into the global matrix and - // right hand side vector: - cell->get_dof_indices (local_dof_indices); - for (unsigned int i=0; i local_matrix (dofs_per_cell, dofs_per_cell); + Vector local_rhs (dofs_per_cell); - // So much for assembly of matrix and right - // hand side. Note that we do not have to - // interpolate and apply boundary values - // since they have all been taken care of in - // the weak form already. + std::vector local_dof_indices (dofs_per_cell); + const PressureRightHandSide pressure_right_hand_side; + const PressureBoundaryValues pressure_boundary_values; + const RandomMedium::KInverse k_inverse; - // @sect4{TwoPhaseFlowProblem::assemble_rhs_S} + std::vector pressure_rhs_values (n_q_points); + std::vector boundary_values (n_face_q_points); + std::vector > k_inverse_values (n_q_points); - // As explained in the introduction, we can - // only evaluate the right hand side of the - // saturation equation once the velocity has - // been computed. We therefore have this - // separate function to this end. -template -void TwoPhaseFlowProblem::assemble_rhs_S () -{ - QGauss quadrature_formula(degree+2); - QGauss face_quadrature_formula(degree+2); - FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); - FEFaceValues fe_face_values (fe, face_quadrature_formula, - update_values | update_normal_vectors | - update_quadrature_points | update_JxW_values); - FEFaceValues fe_face_values_neighbor (fe, face_quadrature_formula, - update_values); - - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); - const unsigned int n_face_q_points = face_quadrature_formula.size(); - - Vector local_rhs (dofs_per_cell); - - std::vector > old_solution_values(n_q_points, Vector(dim+2)); - std::vector > old_solution_values_face(n_face_q_points, Vector(dim+2)); - std::vector > old_solution_values_face_neighbor(n_face_q_points, Vector(dim+2)); - std::vector > present_solution_values(n_q_points, Vector(dim+2)); - std::vector > present_solution_values_face(n_face_q_points, Vector(dim+2)); - - std::vector neighbor_saturation (n_face_q_points); - std::vector local_dof_indices (dofs_per_cell); - - SaturationBoundaryValues saturation_boundary_values; - - const FEValuesExtractors::Scalar saturation (dim+1); - - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - local_rhs = 0; - fe_values.reinit (cell); - - fe_values.get_function_values (old_solution, old_solution_values); - fe_values.get_function_values (solution, present_solution_values); - - // First for the cell terms. These are, - // following the formulas in the - // introduction, $(S^n,\sigma)-(F(S^n) - // \mathbf{v}^{n+1},\nabla sigma)$, - // where $\sigma$ is the saturation - // component of the test function: - for (unsigned int q=0; q present_u; - for (unsigned int d=0; d grad_phi_i_s = fe_values[saturation].gradient (i, q); - - local_rhs(i) += (time_step * - f_saturation(old_s,viscosity) * - present_u * - grad_phi_i_s - + - old_s * phi_i_s) - * - fe_values.JxW(q); - } - - // Secondly, we have to deal with the - // flux parts on the face - // boundaries. This was a bit more - // involved because we first have to - // determine which are the influx and - // outflux parts of the cell - // boundary. If we have an influx - // boundary, we need to evaluate the - // saturation on the other side of the - // face (or the boundary values, if we - // are at the boundary of the domain). - // - // All this is a bit tricky, but has - // been explained in some detail - // already in step-9. Take a look there - // how this is supposed to work! - for (unsigned int face_no=0; face_no::faces_per_cell; - ++face_no) - { - fe_face_values.reinit (cell, face_no); - - fe_face_values.get_function_values (old_solution, old_solution_values_face); - fe_face_values.get_function_values (solution, present_solution_values_face); - - if (cell->at_boundary(face_no)) - saturation_boundary_values - .value_list (fe_face_values.get_quadrature_points(), - neighbor_saturation); - else - { - const typename DoFHandler::active_cell_iterator - neighbor = cell->neighbor(face_no); - const unsigned int - neighbor_face = cell->neighbor_of_neighbor(face_no); - - fe_face_values_neighbor.reinit (neighbor, neighbor_face); - - fe_face_values_neighbor - .get_function_values (old_solution, - old_solution_values_face_neighbor); - - for (unsigned int q=0; q present_u_face; - for (unsigned int d=0; d= 0); - - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - for (unsigned int i=0; i > old_solution_values(n_q_points, Vector(dim+2)); + std::vector > > old_solution_grads(n_q_points, + std::vector > (dim+2)); + + const FEValuesExtractors::Vector velocities (0); + const FEValuesExtractors::Scalar pressure (dim); + const FEValuesExtractors::Scalar saturation (dim+1); + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + { + fe_values.reinit (cell); + local_matrix = 0; + local_rhs = 0; + + // Here's the first significant + // difference: We have to get the + // values of the saturation function of + // the previous time step at the + // quadrature points. To this end, we + // can use the + // FEValues::get_function_values + // (previously already used in step-9, + // step-14 and step-15), a function + // that takes a solution vector and + // returns a list of function values at + // the quadrature points of the present + // cell. In fact, it returns the + // complete vector-valued solution at + // each quadrature point, i.e. not only + // the saturation but also the + // velocities and pressure: + fe_values.get_function_values (old_solution, old_solution_values); + + // Then we also have to get the values + // of the pressure right hand side and + // of the inverse permeability tensor + // at the quadrature points: + pressure_right_hand_side.value_list (fe_values.get_quadrature_points(), + pressure_rhs_values); + k_inverse.value_list (fe_values.get_quadrature_points(), + k_inverse_values); + + // With all this, we can now loop over + // all the quadrature points and shape + // functions on this cell and assemble + // those parts of the matrix and right + // hand side that we deal with in this + // function. The individual terms in + // the contributions should be + // self-explanatory given the explicit + // form of the bilinear form stated in + // the introduction: + for (unsigned int q=0; q phi_i_u = fe_values[velocities].value (i, q); + const double div_phi_i_u = fe_values[velocities].divergence (i, q); + const double phi_i_p = fe_values[pressure].value (i, q); + const double phi_i_s = fe_values[saturation].value (i, q); + + for (unsigned int j=0; j phi_j_u = fe_values[velocities].value (j, q); + const double div_phi_j_u = fe_values[velocities].divergence (j, q); + const double phi_j_p = fe_values[pressure].value (j, q); + const double phi_j_s = fe_values[saturation].value (j, q); + + local_matrix(i,j) += (phi_i_u * k_inverse_values[q] * + mobility_inverse(old_s,viscosity) * phi_j_u + - div_phi_i_u * phi_j_p + - phi_i_p * div_phi_j_u + + phi_i_s * phi_j_s) + * fe_values.JxW(q); + } + + local_rhs(i) += (-phi_i_p * pressure_rhs_values[q])* + fe_values.JxW(q); + } + + + // Next, we also have to deal with the + // pressure boundary values. This, + // again is as in step-20: + for (unsigned int face_no=0; + face_no::faces_per_cell; + ++face_no) + if (cell->at_boundary(face_no)) + { + fe_face_values.reinit (cell, face_no); + + pressure_boundary_values + .value_list (fe_face_values.get_quadrature_points(), + boundary_values); + + for (unsigned int q=0; q + phi_i_u = fe_face_values[velocities].value (i, q); + + local_rhs(i) += -(phi_i_u * + fe_face_values.normal_vector(q) * + boundary_values[q] * + fe_face_values.JxW(q)); + } + } + + // The final step in the loop + // over all cells is to + // transfer local contributions + // into the global matrix and + // right hand side vector: + cell->get_dof_indices (local_dof_indices); + for (unsigned int i=0; i -void TwoPhaseFlowProblem::solve () -{ - const InverseMatrix > - m_inverse (system_matrix.block(0,0)); - Vector tmp (solution.block(0).size()); - Vector schur_rhs (solution.block(1).size()); - Vector tmp2 (solution.block(2).size()); + // @sect4{TwoPhaseFlowProblem::assemble_rhs_S} - // First the pressure, using the pressure - // Schur complement of the first two - // equations: + // As explained in the introduction, we can + // only evaluate the right hand side of the + // saturation equation once the velocity has + // been computed. We therefore have this + // separate function to this end. + template + void TwoPhaseFlowProblem::assemble_rhs_S () { - m_inverse.vmult (tmp, system_rhs.block(0)); - system_matrix.block(1,0).vmult (schur_rhs, tmp); - schur_rhs -= system_rhs.block(1); + QGauss quadrature_formula(degree+2); + QGauss face_quadrature_formula(degree+2); + FEValues fe_values (fe, quadrature_formula, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); + FEFaceValues fe_face_values (fe, face_quadrature_formula, + update_values | update_normal_vectors | + update_quadrature_points | update_JxW_values); + FEFaceValues fe_face_values_neighbor (fe, face_quadrature_formula, + update_values); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + const unsigned int n_face_q_points = face_quadrature_formula.size(); + + Vector local_rhs (dofs_per_cell); + + std::vector > old_solution_values(n_q_points, Vector(dim+2)); + std::vector > old_solution_values_face(n_face_q_points, Vector(dim+2)); + std::vector > old_solution_values_face_neighbor(n_face_q_points, Vector(dim+2)); + std::vector > present_solution_values(n_q_points, Vector(dim+2)); + std::vector > present_solution_values_face(n_face_q_points, Vector(dim+2)); + + std::vector neighbor_saturation (n_face_q_points); + std::vector local_dof_indices (dofs_per_cell); + + SaturationBoundaryValues saturation_boundary_values; + + const FEValuesExtractors::Scalar saturation (dim+1); + + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + { + local_rhs = 0; + fe_values.reinit (cell); + + fe_values.get_function_values (old_solution, old_solution_values); + fe_values.get_function_values (solution, present_solution_values); + + // First for the cell terms. These are, + // following the formulas in the + // introduction, $(S^n,\sigma)-(F(S^n) + // \mathbf{v}^{n+1},\nabla sigma)$, + // where $\sigma$ is the saturation + // component of the test function: + for (unsigned int q=0; q present_u; + for (unsigned int d=0; d grad_phi_i_s = fe_values[saturation].gradient (i, q); + + local_rhs(i) += (time_step * + f_saturation(old_s,viscosity) * + present_u * + grad_phi_i_s + + + old_s * phi_i_s) + * + fe_values.JxW(q); + } + + // Secondly, we have to deal with the + // flux parts on the face + // boundaries. This was a bit more + // involved because we first have to + // determine which are the influx and + // outflux parts of the cell + // boundary. If we have an influx + // boundary, we need to evaluate the + // saturation on the other side of the + // face (or the boundary values, if we + // are at the boundary of the domain). + // + // All this is a bit tricky, but has + // been explained in some detail + // already in step-9. Take a look there + // how this is supposed to work! + for (unsigned int face_no=0; face_no::faces_per_cell; + ++face_no) + { + fe_face_values.reinit (cell, face_no); + + fe_face_values.get_function_values (old_solution, old_solution_values_face); + fe_face_values.get_function_values (solution, present_solution_values_face); + + if (cell->at_boundary(face_no)) + saturation_boundary_values + .value_list (fe_face_values.get_quadrature_points(), + neighbor_saturation); + else + { + const typename DoFHandler::active_cell_iterator + neighbor = cell->neighbor(face_no); + const unsigned int + neighbor_face = cell->neighbor_of_neighbor(face_no); + + fe_face_values_neighbor.reinit (neighbor, neighbor_face); + + fe_face_values_neighbor + .get_function_values (old_solution, + old_solution_values_face_neighbor); + + for (unsigned int q=0; q present_u_face; + for (unsigned int d=0; d= 0); + + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + for (unsigned int i=0; i - preconditioner (approximate_schur_complement); + // After all these preparations, we finally + // solve the linear system for velocity and + // pressure in the same way as in + // step-20. After that, we have to deal with + // the saturation equation (see below): + template + void TwoPhaseFlowProblem::solve () + { + const InverseMatrix > + m_inverse (system_matrix.block(0,0)); + Vector tmp (solution.block(0).size()); + Vector schur_rhs (solution.block(1).size()); + Vector tmp2 (solution.block(2).size()); - SolverControl solver_control (solution.block(1).size(), - 1e-12*schur_rhs.l2_norm()); - SolverCG<> cg (solver_control); + // First the pressure, using the pressure + // Schur complement of the first two + // equations: + { + m_inverse.vmult (tmp, system_rhs.block(0)); + system_matrix.block(1,0).vmult (schur_rhs, tmp); + schur_rhs -= system_rhs.block(1); - cg.solve (schur_complement, solution.block(1), schur_rhs, - preconditioner); - std::cout << " " - << solver_control.last_step() - << " CG Schur complement iterations for pressure." - << std::endl; - } + SchurComplement + schur_complement (system_matrix, m_inverse); - // Now the velocity: - { - system_matrix.block(0,1).vmult (tmp, solution.block(1)); - tmp *= -1; - tmp += system_rhs.block(0); + ApproximateSchurComplement + approximate_schur_complement (system_matrix); - m_inverse.vmult (solution.block(0), tmp); - } + InverseMatrix + preconditioner (approximate_schur_complement); - // Finally, we have to take care of the - // saturation equation. The first business - // we have here is to determine the time - // step using the formula in the - // introduction. Knowing the shape of our - // domain and that we created the mesh by - // regular subdivision of cells, we can - // compute the diameter of each of our - // cells quite easily (in fact we use the - // linear extensions in coordinate - // directions of the cells, not the - // diameter). Note that we will learn a - // more general way to do this in step-24, - // where we use the - // GridTools::minimal_cell_diameter - // function. - // - // The maximal velocity we compute using a - // helper function to compute the maximal - // velocity defined below, and with all - // this we can evaluate our new time step - // length: - time_step = std::pow(0.5, double(n_refinement_steps)) / - get_maximal_velocity(); - - // The next step is to assemble the right - // hand side, and then to pass everything - // on for solution. At the end, we project - // back saturations onto the physically - // reasonable range: - assemble_rhs_S (); - { - SolverControl solver_control (system_matrix.block(2,2).m(), - 1e-8*system_rhs.block(2).l2_norm()); - SolverCG<> cg (solver_control); - cg.solve (system_matrix.block(2,2), solution.block(2), system_rhs.block(2), - PreconditionIdentity()); + SolverControl solver_control (solution.block(1).size(), + 1e-12*schur_rhs.l2_norm()); + SolverCG<> cg (solver_control); - project_back_saturation (); + cg.solve (schur_complement, solution.block(1), schur_rhs, + preconditioner); - std::cout << " " - << solver_control.last_step() - << " CG iterations for saturation." - << std::endl; - } + std::cout << " " + << solver_control.last_step() + << " CG Schur complement iterations for pressure." + << std::endl; + } + // Now the velocity: + { + system_matrix.block(0,1).vmult (tmp, solution.block(1)); + tmp *= -1; + tmp += system_rhs.block(0); - old_solution = solution; -} + m_inverse.vmult (solution.block(0), tmp); + } + // Finally, we have to take care of the + // saturation equation. The first business + // we have here is to determine the time + // step using the formula in the + // introduction. Knowing the shape of our + // domain and that we created the mesh by + // regular subdivision of cells, we can + // compute the diameter of each of our + // cells quite easily (in fact we use the + // linear extensions in coordinate + // directions of the cells, not the + // diameter). Note that we will learn a + // more general way to do this in step-24, + // where we use the + // GridTools::minimal_cell_diameter + // function. + // + // The maximal velocity we compute using a + // helper function to compute the maximal + // velocity defined below, and with all + // this we can evaluate our new time step + // length: + time_step = std::pow(0.5, double(n_refinement_steps)) / + get_maximal_velocity(); + + // The next step is to assemble the right + // hand side, and then to pass everything + // on for solution. At the end, we project + // back saturations onto the physically + // reasonable range: + assemble_rhs_S (); + { - // @sect4{TwoPhaseFlowProblem::output_results} + SolverControl solver_control (system_matrix.block(2,2).m(), + 1e-8*system_rhs.block(2).l2_norm()); + SolverCG<> cg (solver_control); + cg.solve (system_matrix.block(2,2), solution.block(2), system_rhs.block(2), + PreconditionIdentity()); - // There is nothing surprising here. Since - // the program will do a lot of time steps, - // we create an output file only every fifth - // time step. -template -void TwoPhaseFlowProblem::output_results () const -{ - if (timestep_number % 5 != 0) - return; + project_back_saturation (); - std::vector solution_names; - switch (dim) - { - case 2: - solution_names.push_back ("u"); - solution_names.push_back ("v"); - solution_names.push_back ("p"); - solution_names.push_back ("S"); - break; - - case 3: - solution_names.push_back ("u"); - solution_names.push_back ("v"); - solution_names.push_back ("w"); - solution_names.push_back ("p"); - solution_names.push_back ("S"); - break; - - default: - Assert (false, ExcNotImplemented()); + std::cout << " " + << solver_control.last_step() + << " CG iterations for saturation." + << std::endl; } - DataOut data_out; - data_out.attach_dof_handler (dof_handler); - data_out.add_data_vector (solution, solution_names); + old_solution = solution; + } - data_out.build_patches (degree+1); - std::ostringstream filename; - filename << "solution-" << timestep_number << ".vtk"; + // @sect4{TwoPhaseFlowProblem::output_results} - std::ofstream output (filename.str().c_str()); - data_out.write_vtk (output); -} + // There is nothing surprising here. Since + // the program will do a lot of time steps, + // we create an output file only every fifth + // time step. + template + void TwoPhaseFlowProblem::output_results () const + { + if (timestep_number % 5 != 0) + return; + std::vector solution_names; + switch (dim) + { + case 2: + solution_names.push_back ("u"); + solution_names.push_back ("v"); + solution_names.push_back ("p"); + solution_names.push_back ("S"); + break; + + case 3: + solution_names.push_back ("u"); + solution_names.push_back ("v"); + solution_names.push_back ("w"); + solution_names.push_back ("p"); + solution_names.push_back ("S"); + break; + + default: + Assert (false, ExcNotImplemented()); + } + DataOut data_out; - // @sect4{TwoPhaseFlowProblem::project_back_saturation} - - // In this function, we simply run over all - // saturation degrees of freedom and make - // sure that if they should have left the - // physically reasonable range, that they be - // reset to the interval $[0,1]$. To do this, - // we only have to loop over all saturation - // components of the solution vector; these - // are stored in the block 2 (block 0 are the - // velocities, block 1 are the pressures). - // - // It may be instructive to note that this - // function almost never triggers when the - // time step is chosen as mentioned in the - // introduction. However, if we choose the - // timestep only slightly larger, we get - // plenty of values outside the proper - // range. Strictly speaking, the function is - // therefore unnecessary if we choose the - // time step small enough. In a sense, the - // function is therefore only a safety device - // to avoid situations where our entire - // solution becomes unphysical because - // individual degrees of freedom have become - // unphysical a few time steps earlier. -template -void -TwoPhaseFlowProblem::project_back_saturation () -{ - for (unsigned int i=0; i 1) - solution.block(2)(i) = 1; -} + data_out.attach_dof_handler (dof_handler); + data_out.add_data_vector (solution, solution_names); + data_out.build_patches (degree+1); - // @sect4{TwoPhaseFlowProblem::get_maximal_velocity} + std::ostringstream filename; + filename << "solution-" << timestep_number << ".vtk"; - // The following function is used in - // determining the maximal allowable time - // step. What it does is to loop over all - // quadrature points in the domain and find - // what the maximal magnitude of the velocity - // is. -template -double -TwoPhaseFlowProblem::get_maximal_velocity () const -{ - QGauss quadrature_formula(degree+2); - const unsigned int n_q_points - = quadrature_formula.size(); - - FEValues fe_values (fe, quadrature_formula, - update_values); - std::vector > solution_values(n_q_points, - Vector(dim+2)); - double max_velocity = 0; - - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - fe_values.reinit (cell); - fe_values.get_function_values (solution, solution_values); - - for (unsigned int q=0; q velocity; - for (unsigned int i=0; iConstraintMatrix() as the - // second argument. - // - // The second point worth mentioning is that - // we only compute the length of the present - // time step in the middle of solving the - // linear system corresponding to each time - // step. We can therefore output the present - // end time of a time step only at the end of - // the time step. -template -void TwoPhaseFlowProblem::run () -{ - make_grid_and_dofs(); + // @sect4{TwoPhaseFlowProblem::project_back_saturation} + // In this function, we simply run over all + // saturation degrees of freedom and make + // sure that if they should have left the + // physically reasonable range, that they be + // reset to the interval $[0,1]$. To do this, + // we only have to loop over all saturation + // components of the solution vector; these + // are stored in the block 2 (block 0 are the + // velocities, block 1 are the pressures). + // + // It may be instructive to note that this + // function almost never triggers when the + // time step is chosen as mentioned in the + // introduction. However, if we choose the + // timestep only slightly larger, we get + // plenty of values outside the proper + // range. Strictly speaking, the function is + // therefore unnecessary if we choose the + // time step small enough. In a sense, the + // function is therefore only a safety device + // to avoid situations where our entire + // solution becomes unphysical because + // individual degrees of freedom have become + // unphysical a few time steps earlier. + template + void + TwoPhaseFlowProblem::project_back_saturation () { - ConstraintMatrix constraints; - constraints.close(); - - VectorTools::project (dof_handler, - constraints, - QGauss(degree+2), - InitialValues(), - old_solution); + for (unsigned int i=0; i 1) + solution.block(2)(i) = 1; } - timestep_number = 1; - double time = 0; - do - { - std::cout << "Timestep " << timestep_number - << std::endl; + // @sect4{TwoPhaseFlowProblem::get_maximal_velocity} - assemble_system (); + // The following function is used in + // determining the maximal allowable time + // step. What it does is to loop over all + // quadrature points in the domain and find + // what the maximal magnitude of the velocity + // is. + template + double + TwoPhaseFlowProblem::get_maximal_velocity () const + { + QGauss quadrature_formula(degree+2); + const unsigned int n_q_points + = quadrature_formula.size(); + + FEValues fe_values (fe, quadrature_formula, + update_values); + std::vector > solution_values(n_q_points, + Vector(dim+2)); + double max_velocity = 0; + + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + { + fe_values.reinit (cell); + fe_values.get_function_values (solution, solution_values); + + for (unsigned int q=0; q velocity; + for (unsigned int i=0; isection on improved ILU - // we're going to discuss this issue - // in more detail. - - // There is one more change compared - // to previous tutorial programs: - // There is no reason in sorting the - // dim velocity - // components individually. In fact, - // rather than first enumerating all - // $x$-velocities, then all - // $y$-velocities, etc, we would like - // to keep all velocities at the same - // location together and only - // separate between velocities (all - // components) and pressures. By - // default, this is not what the - // DoFRenumbering::component_wise - // function does: it treats each - // vector component separately; what - // we have to do is group several - // components into "blocks" and pass - // this block structure to that - // function. Consequently, we - // allocate a vector - // block_component with - // as many elements as there are - // components and describe all - // velocity components to correspond - // to block 0, while the pressure - // component will form block 1: -template -void StokesProblem::setup_dofs () -{ - A_preconditioner.reset (); - system_matrix.clear (); - - dof_handler.distribute_dofs (fe); - DoFRenumbering::Cuthill_McKee (dof_handler); - - std::vector block_component (dim+1,0); - block_component[dim] = 1; - DoFRenumbering::component_wise (dof_handler, block_component); - - // Now comes the implementation of - // Dirichlet boundary conditions, which - // should be evident after the discussion - // in the introduction. All that changed is - // that the function already appears in the - // setup functions, whereas we were used to - // see it in some assembly routine. Further - // down below where we set up the mesh, we - // will associate the top boundary where we - // impose Dirichlet boundary conditions - // with boundary indicator 1. We will have - // to pass this boundary indicator as - // second argument to the function below - // interpolating boundary values. There is - // one more thing, though. The function - // describing the Dirichlet conditions was - // defined for all components, both - // velocity and pressure. However, the - // Dirichlet conditions are to be set for - // the velocity only. To this end, we use - // a component_mask that - // filters out the pressure component, so - // that the condensation is performed on - // velocity degrees of freedom only. Since - // we use adaptively refined grids the - // constraint matrix needs to be first - // filled with hanging node constraints - // generated from the DoF handler. Note the - // order of the two functions — we - // first compute the hanging node - // constraints, and then insert the - // boundary values into the constraint - // matrix. This makes sure that we respect - // H1 conformity on boundaries - // with hanging nodes (in three space - // dimensions), where the hanging node - // needs to dominate the Dirichlet boundary - // values. + template + void SchurComplement::vmult (Vector &dst, + const Vector &src) const { - constraints.clear (); - std::vector component_mask (dim+1, true); - component_mask[dim] = false; - DoFTools::make_hanging_node_constraints (dof_handler, - constraints); - VectorTools::interpolate_boundary_values (dof_handler, - 1, - BoundaryValues(), - constraints, - component_mask); + system_matrix->block(0,1).vmult (tmp1, src); + A_inverse->vmult (tmp2, tmp1); + system_matrix->block(1,0).vmult (dst, tmp2); } - constraints.close (); - - // In analogy to step-20, we count the dofs - // in the individual components. We could - // do this in the same way as there, but we - // want to operate on the block structure - // we used already for the renumbering: The - // function - // DoFTools::count_dofs_per_block - // does the same as - // DoFTools::count_dofs_per_component, - // but now grouped as velocity and pressure - // block via block_component. - std::vector dofs_per_block (2); - DoFTools::count_dofs_per_block (dof_handler, dofs_per_block, block_component); - const unsigned int n_u = dofs_per_block[0], - n_p = dofs_per_block[1]; - - std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << " (" << n_u << '+' << n_p << ')' - << std::endl; - - // The next task is to allocate a - // sparsity pattern for the system matrix - // we will create. We could do this in - // the same way as in step-20, - // i.e. directly build an object of type - // SparsityPattern through - // DoFTools::make_sparsity_pattern. However, - // there is a major reason not to do so: - // In 3D, the function - // DoFTools::max_couplings_between_dofs - // yields a conservative but rather large - // number for the coupling between the - // individual dofs, so that the memory - // initially provided for the creation of - // the sparsity pattern of the matrix is - // far too much -- so much actually that - // the initial sparsity pattern won't - // even fit into the physical memory of - // most systems already for - // moderately-sized 3D problems, see also - // the discussion in step-18. Instead, - // we first build a temporary object that - // uses a different data structure that - // doesn't require allocating more memory - // than necessary but isn't suitable for - // use as a basis of SparseMatrix or - // BlockSparseMatrix objects; in a second - // step we then copy this object into an - // object of BlockSparsityPattern. This - // is entirely analgous to what we - // already did in step-11 and step-18. + + // @sect3{StokesProblem class implementation} + + // @sect4{StokesProblem::StokesProblem} + + // The constructor of this class + // looks very similar to the one of + // step-20. The constructor + // initializes the variables for the + // polynomial degree, triangulation, + // finite element system and the dof + // handler. The underlying polynomial + // functions are of order + // degree+1 for the + // vector-valued velocity components + // and of order degree + // for the pressure. This gives the + // LBB-stable element pair + // $Q_{degree+1}^d\times Q_{degree}$, + // often referred to as the + // Taylor-Hood element. // - // There is one snag again here, though: - // it turns out that using the - // CompressedSparsityPattern (or the - // block version - // BlockCompressedSparsityPattern we - // would use here) has a bottleneck that - // makes the algorithm to build the - // sparsity pattern be quadratic in the - // number of degrees of freedom. This - // doesn't become noticable until we get - // well into the range of several 100,000 - // degrees of freedom, but eventually - // dominates the setup of the linear - // system when we get to more than a - // million degrees of freedom. This is - // due to the data structures used in the - // CompressedSparsityPattern class, - // nothing that can easily be - // changed. Fortunately, there is an easy - // solution: the - // CompressedSimpleSparsityPattern class - // (and its block variant - // BlockCompressedSimpleSparsityPattern) - // has exactly the same interface, uses a - // different %internal data structure and - // is linear in the number of degrees of - // freedom and therefore much more - // efficient for large problems. As - // another alternative, we could also - // have chosen the class - // BlockCompressedSetSparsityPattern that - // uses yet another strategy for %internal - // memory management. Though, that class - // turns out to be more memory-demanding - // than - // BlockCompressedSimpleSparsityPattern - // for this example. + // Note that we initialize the triangulation + // with a MeshSmoothing argument, which + // ensures that the refinement of cells is + // done in a way that the approximation of + // the PDE solution remains well-behaved + // (problems arise if grids are too + // unstructered), see the documentation of + // Triangulation::MeshSmoothing + // for details. + template + StokesProblem::StokesProblem (const unsigned int degree) + : + degree (degree), + triangulation (Triangulation::maximum_smoothing), + fe (FE_Q(degree+1), dim, + FE_Q(degree), 1), + dof_handler (triangulation) + {} + + + // @sect4{StokesProblem::setup_dofs} + + // Given a mesh, this function + // associates the degrees of freedom + // with it and creates the + // corresponding matrices and + // vectors. At the beginning it also + // releases the pointer to the + // preconditioner object (if the + // shared pointer pointed at anything + // at all at this point) since it + // will definitely not be needed any + // more after this point and will + // have to be re-computed after + // assembling the matrix, and unties + // the sparse matrix from its + // sparsity pattern object. // - // Consequently, this is the class that - // we will use for our intermediate - // sparsity representation. All this is - // done inside a new scope, which means - // that the memory of csp - // will be released once the information - // has been copied to - // sparsity_pattern. + // We then proceed with distributing + // degrees of freedom and renumbering + // them: In order to make the ILU + // preconditioner (in 3D) work + // efficiently, it is important to + // enumerate the degrees of freedom + // in such a way that it reduces the + // bandwidth of the matrix, or maybe + // more importantly: in such a way + // that the ILU is as close as + // possible to a real LU + // decomposition. On the other hand, + // we need to preserve the block + // structure of velocity and pressure + // already seen in in step-20 and + // step-21. This is done in two + // steps: First, all dofs are + // renumbered to improve the ILU and + // then we renumber once again by + // components. Since + // DoFRenumbering::component_wise + // does not touch the renumbering + // within the individual blocks, the + // basic renumbering from the first + // step remains. As for how the + // renumber degrees of freedom to + // improve the ILU: deal.II has a + // number of algorithms that attempt + // to find orderings to improve ILUs, + // or reduce the bandwidth of + // matrices, or optimize some other + // aspect. The DoFRenumbering + // namespace shows a comparison of + // the results we obtain with several + // of these algorithms based on the + // testcase discussed here in this + // tutorial program. Here, we will + // use the traditional Cuthill-McKee + // algorithm already used in some of + // the previous tutorial programs. + // In the + // section on improved ILU + // we're going to discuss this issue + // in more detail. + + // There is one more change compared + // to previous tutorial programs: + // There is no reason in sorting the + // dim velocity + // components individually. In fact, + // rather than first enumerating all + // $x$-velocities, then all + // $y$-velocities, etc, we would like + // to keep all velocities at the same + // location together and only + // separate between velocities (all + // components) and pressures. By + // default, this is not what the + // DoFRenumbering::component_wise + // function does: it treats each + // vector component separately; what + // we have to do is group several + // components into "blocks" and pass + // this block structure to that + // function. Consequently, we + // allocate a vector + // block_component with + // as many elements as there are + // components and describe all + // velocity components to correspond + // to block 0, while the pressure + // component will form block 1: + template + void StokesProblem::setup_dofs () { - BlockCompressedSimpleSparsityPattern csp (2,2); - - csp.block(0,0).reinit (n_u, n_u); - csp.block(1,0).reinit (n_p, n_u); - csp.block(0,1).reinit (n_u, n_p); - csp.block(1,1).reinit (n_p, n_p); - - csp.collect_sizes(); + A_preconditioner.reset (); + system_matrix.clear (); + + dof_handler.distribute_dofs (fe); + DoFRenumbering::Cuthill_McKee (dof_handler); + + std::vector block_component (dim+1,0); + block_component[dim] = 1; + DoFRenumbering::component_wise (dof_handler, block_component); + + // Now comes the implementation of + // Dirichlet boundary conditions, which + // should be evident after the discussion + // in the introduction. All that changed is + // that the function already appears in the + // setup functions, whereas we were used to + // see it in some assembly routine. Further + // down below where we set up the mesh, we + // will associate the top boundary where we + // impose Dirichlet boundary conditions + // with boundary indicator 1. We will have + // to pass this boundary indicator as + // second argument to the function below + // interpolating boundary values. There is + // one more thing, though. The function + // describing the Dirichlet conditions was + // defined for all components, both + // velocity and pressure. However, the + // Dirichlet conditions are to be set for + // the velocity only. To this end, we use + // a component_mask that + // filters out the pressure component, so + // that the condensation is performed on + // velocity degrees of freedom only. Since + // we use adaptively refined grids the + // constraint matrix needs to be first + // filled with hanging node constraints + // generated from the DoF handler. Note the + // order of the two functions — we + // first compute the hanging node + // constraints, and then insert the + // boundary values into the constraint + // matrix. This makes sure that we respect + // H1 conformity on boundaries + // with hanging nodes (in three space + // dimensions), where the hanging node + // needs to dominate the Dirichlet boundary + // values. + { + constraints.clear (); + std::vector component_mask (dim+1, true); + component_mask[dim] = false; + DoFTools::make_hanging_node_constraints (dof_handler, + constraints); + VectorTools::interpolate_boundary_values (dof_handler, + 1, + BoundaryValues(), + constraints, + component_mask); + } - DoFTools::make_sparsity_pattern (dof_handler, csp, constraints, false); - sparsity_pattern.copy_from (csp); - } + constraints.close (); + + // In analogy to step-20, we count the dofs + // in the individual components. We could + // do this in the same way as there, but we + // want to operate on the block structure + // we used already for the renumbering: The + // function + // DoFTools::count_dofs_per_block + // does the same as + // DoFTools::count_dofs_per_component, + // but now grouped as velocity and pressure + // block via block_component. + std::vector dofs_per_block (2); + DoFTools::count_dofs_per_block (dof_handler, dofs_per_block, block_component); + const unsigned int n_u = dofs_per_block[0], + n_p = dofs_per_block[1]; + + std::cout << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl + << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << " (" << n_u << '+' << n_p << ')' + << std::endl; - // Finally, the system matrix, - // solution and right hand side are - // created from the block - // structure as in step-20: - system_matrix.reinit (sparsity_pattern); - - solution.reinit (2); - solution.block(0).reinit (n_u); - solution.block(1).reinit (n_p); - solution.collect_sizes (); - - system_rhs.reinit (2); - system_rhs.block(0).reinit (n_u); - system_rhs.block(1).reinit (n_p); - system_rhs.collect_sizes (); -} + // The next task is to allocate a + // sparsity pattern for the system matrix + // we will create. We could do this in + // the same way as in step-20, + // i.e. directly build an object of type + // SparsityPattern through + // DoFTools::make_sparsity_pattern. However, + // there is a major reason not to do so: + // In 3D, the function + // DoFTools::max_couplings_between_dofs + // yields a conservative but rather large + // number for the coupling between the + // individual dofs, so that the memory + // initially provided for the creation of + // the sparsity pattern of the matrix is + // far too much -- so much actually that + // the initial sparsity pattern won't + // even fit into the physical memory of + // most systems already for + // moderately-sized 3D problems, see also + // the discussion in step-18. Instead, + // we first build a temporary object that + // uses a different data structure that + // doesn't require allocating more memory + // than necessary but isn't suitable for + // use as a basis of SparseMatrix or + // BlockSparseMatrix objects; in a second + // step we then copy this object into an + // object of BlockSparsityPattern. This + // is entirely analgous to what we + // already did in step-11 and step-18. + // + // There is one snag again here, though: + // it turns out that using the + // CompressedSparsityPattern (or the + // block version + // BlockCompressedSparsityPattern we + // would use here) has a bottleneck that + // makes the algorithm to build the + // sparsity pattern be quadratic in the + // number of degrees of freedom. This + // doesn't become noticable until we get + // well into the range of several 100,000 + // degrees of freedom, but eventually + // dominates the setup of the linear + // system when we get to more than a + // million degrees of freedom. This is + // due to the data structures used in the + // CompressedSparsityPattern class, + // nothing that can easily be + // changed. Fortunately, there is an easy + // solution: the + // CompressedSimpleSparsityPattern class + // (and its block variant + // BlockCompressedSimpleSparsityPattern) + // has exactly the same interface, uses a + // different %internal data structure and + // is linear in the number of degrees of + // freedom and therefore much more + // efficient for large problems. As + // another alternative, we could also + // have chosen the class + // BlockCompressedSetSparsityPattern that + // uses yet another strategy for %internal + // memory management. Though, that class + // turns out to be more memory-demanding + // than + // BlockCompressedSimpleSparsityPattern + // for this example. + // + // Consequently, this is the class that + // we will use for our intermediate + // sparsity representation. All this is + // done inside a new scope, which means + // that the memory of csp + // will be released once the information + // has been copied to + // sparsity_pattern. + { + BlockCompressedSimpleSparsityPattern csp (2,2); + csp.block(0,0).reinit (n_u, n_u); + csp.block(1,0).reinit (n_p, n_u); + csp.block(0,1).reinit (n_u, n_p); + csp.block(1,1).reinit (n_p, n_p); - // @sect4{StokesProblem::assemble_system} + csp.collect_sizes(); - // The assembly process follows the - // discussion in step-20 and in the - // introduction. We use the well-known - // abbreviations for the data structures - // that hold the local matrix, right - // hand side, and global - // numbering of the degrees of freedom - // for the present cell. -template -void StokesProblem::assemble_system () -{ - system_matrix=0; - system_rhs=0; - - QGauss quadrature_formula(degree+2); - - FEValues fe_values (fe, quadrature_formula, - update_values | - update_quadrature_points | - update_JxW_values | - update_gradients); - - const unsigned int dofs_per_cell = fe.dofs_per_cell; - - const unsigned int n_q_points = quadrature_formula.size(); - - FullMatrix local_matrix (dofs_per_cell, dofs_per_cell); - Vector local_rhs (dofs_per_cell); - - std::vector local_dof_indices (dofs_per_cell); - - const RightHandSide right_hand_side; - std::vector > rhs_values (n_q_points, - Vector(dim+1)); - - // Next, we need two objects that work as - // extractors for the FEValues - // object. Their use is explained in detail - // in the report on @ref vector_valued : - const FEValuesExtractors::Vector velocities (0); - const FEValuesExtractors::Scalar pressure (dim); - - // As an extension over step-20 and - // step-21, we include a few - // optimizations that make assembly - // much faster for this particular - // problem. The improvements are - // based on the observation that we - // do a few calculations too many - // times when we do as in step-20: - // The symmetric gradient actually - // has dofs_per_cell - // different values per quadrature - // point, but we extract it - // dofs_per_cell*dofs_per_cell - // times from the FEValues object - - // for both the loop over - // i and the inner - // loop over j. In 3d, - // that means evaluating it - // $89^2=7921$ instead of $89$ - // times, a not insignificant - // difference. - // - // So what we're - // going to do here is to avoid - // such repeated calculations by - // getting a vector of rank-2 - // tensors (and similarly for - // the divergence and the basis - // function value on pressure) - // at the quadrature point prior - // to starting the loop over the - // dofs on the cell. First, we - // create the respective objects - // that will hold these - // values. Then, we start the - // loop over all cells and the loop - // over the quadrature points, - // where we first extract these - // values. There is one more - // optimization we implement here: - // the local matrix (as well as - // the global one) is going to - // be symmetric, since all - // the operations involved are - // symmetric with respect to $i$ - // and $j$. This is implemented by - // simply running the inner loop - // not to dofs_per_cell, - // but only up to i, - // the index of the outer loop. - std::vector > phi_grads_u (dofs_per_cell); - std::vector div_phi_u (dofs_per_cell); - std::vector phi_p (dofs_per_cell); - - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - fe_values.reinit (cell); - local_matrix = 0; - local_rhs = 0; - - right_hand_side.vector_value_list(fe_values.get_quadrature_points(), - rhs_values); - - for (unsigned int q=0; q phi_p[i] - // * phi_p[j] , yielding a - // pressure mass matrix in the - // $(1,1)$ block of the matrix as - // discussed in the - // introduction. That this term only - // ends up in the $(1,1)$ block stems - // from the fact that both of the - // factors in phi_p[i] * - // phi_p[j] are only non-zero - // when all the other terms vanish - // (and the other way around). - // - // Note also that operator* is - // overloaded for symmetric - // tensors, yielding the scalar - // product between the two - // tensors in the first line of - // the local matrix - // contribution. - - // Before we can write the local data - // into the global matrix (and - // simultaneously use the - // ConstraintMatrix object to apply - // Dirichlet boundary conditions and - // eliminate hanging node - // constraints, as we discussed in - // the introduction), we have to be - // careful about one thing, - // though. We have only build up half - // of the local matrix because of - // symmetry, but we're going to save - // the full system matrix in order to - // use the standard functions for - // solution. This is done by flipping - // the indices in case we are - // pointing into the empty part of - // the local matrix. - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - constraints.distribute_local_to_global (local_matrix, local_rhs, - local_dof_indices, - system_matrix, system_rhs); + DoFTools::make_sparsity_pattern (dof_handler, csp, constraints, false); + sparsity_pattern.copy_from (csp); } - // Before we're going to solve this - // linear system, we generate a - // preconditioner for the - // velocity-velocity matrix, i.e., - // block(0,0) in the - // system matrix. As mentioned - // above, this depends on the - // spatial dimension. Since the two - // classes described by the - // InnerPreconditioner::type - // typedef have the same interface, - // we do not have to do anything - // different whether we want to use - // a sparse direct solver or an - // ILU: - std::cout << " Computing preconditioner..." << std::endl << std::flush; - - A_preconditioner - = std_cxx1x::shared_ptr::type>(new typename InnerPreconditioner::type()); - A_preconditioner->initialize (system_matrix.block(0,0), - typename InnerPreconditioner::type::AdditionalData()); - -} + // Finally, the system matrix, + // solution and right hand side are + // created from the block + // structure as in step-20: + system_matrix.reinit (sparsity_pattern); + + solution.reinit (2); + solution.block(0).reinit (n_u); + solution.block(1).reinit (n_p); + solution.collect_sizes (); + + system_rhs.reinit (2); + system_rhs.block(0).reinit (n_u); + system_rhs.block(1).reinit (n_p); + system_rhs.collect_sizes (); + } + // @sect4{StokesProblem::assemble_system} - // @sect4{StokesProblem::solve} - - // After the discussion in the introduction - // and the definition of the respective - // classes above, the implementation of the - // solve function is rather - // straigt-forward and done in a similar way - // as in step-20. To start with, we need an - // object of the InverseMatrix - // class that represents the inverse of the - // matrix A. As described in the - // introduction, the inverse is generated - // with the help of an inner preconditioner - // of type - // InnerPreconditioner::type. -template -void StokesProblem::solve () -{ - const InverseMatrix, - typename InnerPreconditioner::type> - A_inverse (system_matrix.block(0,0), *A_preconditioner); - Vector tmp (solution.block(0).size()); - - // This is as in step-20. We generate the - // right hand side $B A^{-1} F - G$ for the - // Schur complement and an object that - // represents the respective linear - // operation $B A^{-1} B^T$, now with a - // template parameter indicating the - // preconditioner - in accordance with the - // definition of the class. + // The assembly process follows the + // discussion in step-20 and in the + // introduction. We use the well-known + // abbreviations for the data structures + // that hold the local matrix, right + // hand side, and global + // numbering of the degrees of freedom + // for the present cell. + template + void StokesProblem::assemble_system () { - Vector schur_rhs (solution.block(1).size()); - A_inverse.vmult (tmp, system_rhs.block(0)); - system_matrix.block(1,0).vmult (schur_rhs, tmp); - schur_rhs -= system_rhs.block(1); - - SchurComplement::type> - schur_complement (system_matrix, A_inverse); - - // The usual control structures for - // the solver call are created... - SolverControl solver_control (solution.block(1).size(), - 1e-6*schur_rhs.l2_norm()); - SolverCG<> cg (solver_control); - - // Now to the preconditioner to the - // Schur complement. As explained in - // the introduction, the - // preconditioning is done by a mass - // matrix in the pressure variable. It - // is stored in the $(1,1)$ block of - // the system matrix (that is not used - // anywhere else but in - // preconditioning). - // - // Actually, the solver needs to have - // the preconditioner in the form - // $P^{-1}$, so we need to create an - // inverse operation. Once again, we - // use an object of the class - // InverseMatrix, which - // implements the vmult - // operation that is needed by the - // solver. In this case, we have to - // invert the pressure mass matrix. As - // it already turned out in earlier - // tutorial programs, the inversion of - // a mass matrix is a rather cheap and - // straight-forward operation (compared - // to, e.g., a Laplace matrix). The CG - // method with ILU preconditioning - // converges in 5-10 steps, - // independently on the mesh size. - // This is precisely what we do here: - // We choose another ILU preconditioner - // and take it along to the - // InverseMatrix object via the - // corresponding template parameter. A - // CG solver is then called within the - // vmult operation of the inverse - // matrix. + system_matrix=0; + system_rhs=0; + + QGauss quadrature_formula(degree+2); + + FEValues fe_values (fe, quadrature_formula, + update_values | + update_quadrature_points | + update_JxW_values | + update_gradients); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix local_matrix (dofs_per_cell, dofs_per_cell); + Vector local_rhs (dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + const RightHandSide right_hand_side; + std::vector > rhs_values (n_q_points, + Vector(dim+1)); + + // Next, we need two objects that work as + // extractors for the FEValues + // object. Their use is explained in detail + // in the report on @ref vector_valued : + const FEValuesExtractors::Vector velocities (0); + const FEValuesExtractors::Scalar pressure (dim); + + // As an extension over step-20 and + // step-21, we include a few + // optimizations that make assembly + // much faster for this particular + // problem. The improvements are + // based on the observation that we + // do a few calculations too many + // times when we do as in step-20: + // The symmetric gradient actually + // has dofs_per_cell + // different values per quadrature + // point, but we extract it + // dofs_per_cell*dofs_per_cell + // times from the FEValues object - + // for both the loop over + // i and the inner + // loop over j. In 3d, + // that means evaluating it + // $89^2=7921$ instead of $89$ + // times, a not insignificant + // difference. // - // An alternative that is cheaper to - // build, but needs more iterations - // afterwards, would be to choose a - // SSOR preconditioner with factor - // 1.2. It needs about twice the number - // of iterations, but the costs for its - // generation are almost neglible. - SparseILU preconditioner; - preconditioner.initialize (system_matrix.block(1,1), - SparseILU::AdditionalData()); - - InverseMatrix,SparseILU > - m_inverse (system_matrix.block(1,1), preconditioner); - - // With the Schur complement and an - // efficient preconditioner at hand, we - // can solve the respective equation - // for the pressure (i.e. block 0 in - // the solution vector) in the usual - // way: - cg.solve (schur_complement, solution.block(1), schur_rhs, - m_inverse); - - // After this first solution step, the - // hanging node constraints have to be - // distributed to the solution in order - // to achieve a consistent pressure - // field. - constraints.distribute (solution); - - std::cout << " " - << solver_control.last_step() - << " outer CG Schur complement iterations for pressure" - << std::endl; - } + // So what we're + // going to do here is to avoid + // such repeated calculations by + // getting a vector of rank-2 + // tensors (and similarly for + // the divergence and the basis + // function value on pressure) + // at the quadrature point prior + // to starting the loop over the + // dofs on the cell. First, we + // create the respective objects + // that will hold these + // values. Then, we start the + // loop over all cells and the loop + // over the quadrature points, + // where we first extract these + // values. There is one more + // optimization we implement here: + // the local matrix (as well as + // the global one) is going to + // be symmetric, since all + // the operations involved are + // symmetric with respect to $i$ + // and $j$. This is implemented by + // simply running the inner loop + // not to dofs_per_cell, + // but only up to i, + // the index of the outer loop. + std::vector > phi_grads_u (dofs_per_cell); + std::vector div_phi_u (dofs_per_cell); + std::vector phi_p (dofs_per_cell); + + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + { + fe_values.reinit (cell); + local_matrix = 0; + local_rhs = 0; + + right_hand_side.vector_value_list(fe_values.get_quadrature_points(), + rhs_values); + + for (unsigned int q=0; q phi_p[i] + // * phi_p[j] , yielding a + // pressure mass matrix in the + // $(1,1)$ block of the matrix as + // discussed in the + // introduction. That this term only + // ends up in the $(1,1)$ block stems + // from the fact that both of the + // factors in phi_p[i] * + // phi_p[j] are only non-zero + // when all the other terms vanish + // (and the other way around). + // + // Note also that operator* is + // overloaded for symmetric + // tensors, yielding the scalar + // product between the two + // tensors in the first line of + // the local matrix + // contribution. + + // Before we can write the local data + // into the global matrix (and + // simultaneously use the + // ConstraintMatrix object to apply + // Dirichlet boundary conditions and + // eliminate hanging node + // constraints, as we discussed in + // the introduction), we have to be + // careful about one thing, + // though. We have only build up half + // of the local matrix because of + // symmetry, but we're going to save + // the full system matrix in order to + // use the standard functions for + // solution. This is done by flipping + // the indices in case we are + // pointing into the empty part of + // the local matrix. + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + constraints.distribute_local_to_global (local_matrix, local_rhs, + local_dof_indices, + system_matrix, system_rhs); + } + + // Before we're going to solve this + // linear system, we generate a + // preconditioner for the + // velocity-velocity matrix, i.e., + // block(0,0) in the + // system matrix. As mentioned + // above, this depends on the + // spatial dimension. Since the two + // classes described by the + // InnerPreconditioner::type + // typedef have the same interface, + // we do not have to do anything + // different whether we want to use + // a sparse direct solver or an + // ILU: + std::cout << " Computing preconditioner..." << std::endl << std::flush; + + A_preconditioner + = std_cxx1x::shared_ptr::type>(new typename InnerPreconditioner::type()); + A_preconditioner->initialize (system_matrix.block(0,0), + typename InnerPreconditioner::type::AdditionalData()); - // As in step-20, we finally need to - // solve for the velocity equation where - // we plug in the solution to the - // pressure equation. This involves only - // objects we already know - so we simply - // multiply $p$ by $B^T$, subtract the - // right hand side and multiply by the - // inverse of $A$. At the end, we need to - // distribute the constraints from - // hanging nodes in order to obtain a - // constistent flow field: - { - system_matrix.block(0,1).vmult (tmp, solution.block(1)); - tmp *= -1; - tmp += system_rhs.block(0); + } - A_inverse.vmult (solution.block(0), tmp); - constraints.distribute (solution); - } -} + // @sect4{StokesProblem::solve} + + // After the discussion in the introduction + // and the definition of the respective + // classes above, the implementation of the + // solve function is rather + // straigt-forward and done in a similar way + // as in step-20. To start with, we need an + // object of the InverseMatrix + // class that represents the inverse of the + // matrix A. As described in the + // introduction, the inverse is generated + // with the help of an inner preconditioner + // of type + // InnerPreconditioner::type. + template + void StokesProblem::solve () + { + const InverseMatrix, + typename InnerPreconditioner::type> + A_inverse (system_matrix.block(0,0), *A_preconditioner); + Vector tmp (solution.block(0).size()); + + // This is as in step-20. We generate the + // right hand side $B A^{-1} F - G$ for the + // Schur complement and an object that + // represents the respective linear + // operation $B A^{-1} B^T$, now with a + // template parameter indicating the + // preconditioner - in accordance with the + // definition of the class. + { + Vector schur_rhs (solution.block(1).size()); + A_inverse.vmult (tmp, system_rhs.block(0)); + system_matrix.block(1,0).vmult (schur_rhs, tmp); + schur_rhs -= system_rhs.block(1); + + SchurComplement::type> + schur_complement (system_matrix, A_inverse); + + // The usual control structures for + // the solver call are created... + SolverControl solver_control (solution.block(1).size(), + 1e-6*schur_rhs.l2_norm()); + SolverCG<> cg (solver_control); + + // Now to the preconditioner to the + // Schur complement. As explained in + // the introduction, the + // preconditioning is done by a mass + // matrix in the pressure variable. It + // is stored in the $(1,1)$ block of + // the system matrix (that is not used + // anywhere else but in + // preconditioning). + // + // Actually, the solver needs to have + // the preconditioner in the form + // $P^{-1}$, so we need to create an + // inverse operation. Once again, we + // use an object of the class + // InverseMatrix, which + // implements the vmult + // operation that is needed by the + // solver. In this case, we have to + // invert the pressure mass matrix. As + // it already turned out in earlier + // tutorial programs, the inversion of + // a mass matrix is a rather cheap and + // straight-forward operation (compared + // to, e.g., a Laplace matrix). The CG + // method with ILU preconditioning + // converges in 5-10 steps, + // independently on the mesh size. + // This is precisely what we do here: + // We choose another ILU preconditioner + // and take it along to the + // InverseMatrix object via the + // corresponding template parameter. A + // CG solver is then called within the + // vmult operation of the inverse + // matrix. + // + // An alternative that is cheaper to + // build, but needs more iterations + // afterwards, would be to choose a + // SSOR preconditioner with factor + // 1.2. It needs about twice the number + // of iterations, but the costs for its + // generation are almost neglible. + SparseILU preconditioner; + preconditioner.initialize (system_matrix.block(1,1), + SparseILU::AdditionalData()); + + InverseMatrix,SparseILU > + m_inverse (system_matrix.block(1,1), preconditioner); + + // With the Schur complement and an + // efficient preconditioner at hand, we + // can solve the respective equation + // for the pressure (i.e. block 0 in + // the solution vector) in the usual + // way: + cg.solve (schur_complement, solution.block(1), schur_rhs, + m_inverse); + + // After this first solution step, the + // hanging node constraints have to be + // distributed to the solution in order + // to achieve a consistent pressure + // field. + constraints.distribute (solution); + + std::cout << " " + << solver_control.last_step() + << " outer CG Schur complement iterations for pressure" + << std::endl; + } - // @sect4{StokesProblem::output_results} - - // The next function generates graphical - // output. In this example, we are going to - // use the VTK file format. We attach - // names to the individual variables in the - // problem: velocity to the - // dim components of velocity - // and pressure to the - // pressure. - // - // Not all visualization programs have the - // ability to group individual vector - // components into a vector to provide - // vector plots; in particular, this holds - // for some VTK-based visualization - // programs. In this case, the logical - // grouping of components into vectors - // should already be described in the file - // containing the data. In other words, - // what we need to do is provide our output - // writers with a way to know which of the - // components of the finite element - // logically form a vector (with $d$ - // components in $d$ space dimensions) - // rather than letting them assume that we - // simply have a bunch of scalar fields. - // This is achieved using the members of - // the - // DataComponentInterpretation - // namespace: as with the filename, we - // create a vector in which the first - // dim components refer to the - // velocities and are given the tag - // DataComponentInterpretation::component_is_part_of_vector; - // we finally push one tag - // DataComponentInterpretation::component_is_scalar - // to describe the grouping of the pressure - // variable. - - // The rest of the function is then - // the same as in step-20. -template -void -StokesProblem::output_results (const unsigned int refinement_cycle) const -{ - std::vector solution_names (dim, "velocity"); - solution_names.push_back ("pressure"); + // As in step-20, we finally need to + // solve for the velocity equation where + // we plug in the solution to the + // pressure equation. This involves only + // objects we already know - so we simply + // multiply $p$ by $B^T$, subtract the + // right hand side and multiply by the + // inverse of $A$. At the end, we need to + // distribute the constraints from + // hanging nodes in order to obtain a + // constistent flow field: + { + system_matrix.block(0,1).vmult (tmp, solution.block(1)); + tmp *= -1; + tmp += system_rhs.block(0); - std::vector - data_component_interpretation - (dim, DataComponentInterpretation::component_is_part_of_vector); - data_component_interpretation - .push_back (DataComponentInterpretation::component_is_scalar); - - DataOut data_out; - data_out.attach_dof_handler (dof_handler); - data_out.add_data_vector (solution, solution_names, - DataOut::type_dof_data, - data_component_interpretation); - data_out.build_patches (); - - std::ostringstream filename; - filename << "solution-" - << Utilities::int_to_string (refinement_cycle, 2) - << ".vtk"; - - std::ofstream output (filename.str().c_str()); - data_out.write_vtk (output); -} + A_inverse.vmult (solution.block(0), tmp); + constraints.distribute (solution); + } + } - // @sect4{StokesProblem::refine_mesh} - - // This is the last interesting function of - // the StokesProblem class. - // As indicated by its name, it takes the - // solution to the problem and refines the - // mesh where this is needed. The procedure - // is the same as in the respective step in - // step-6, with the exception that we base - // the refinement only on the change in - // pressure, i.e., we call the Kelly error - // estimator with a mask - // object. Additionally, we do not coarsen - // the grid again: -template -void -StokesProblem::refine_mesh () -{ - Vector estimated_error_per_cell (triangulation.n_active_cells()); - - std::vector component_mask (dim+1, false); - component_mask[dim] = true; - KellyErrorEstimator::estimate (dof_handler, - QGauss(degree+1), - typename FunctionMap::type(), - solution, - estimated_error_per_cell, - component_mask); - - GridRefinement::refine_and_coarsen_fixed_number (triangulation, - estimated_error_per_cell, - 0.3, 0.0); - triangulation.execute_coarsening_and_refinement (); -} + // @sect4{StokesProblem::output_results} - // @sect4{StokesProblem::run} - - // The last step in the Stokes class is, as - // usual, the function that generates the - // initial grid and calls the other - // functions in the respective order. - // - // We start off with a rectangle of size $4 - // \times 1$ (in 2d) or $4 \times 1 \times - // 1$ (in 3d), placed in $R^2/R^3$ as - // $(-2,2)\times(-1,0)$ or - // $(-2,2)\times(0,1)\times(-1,1)$, - // respectively. It is natural to start - // with equal mesh size in each direction, - // so we subdivide the initial rectangle - // four times in the first coordinate - // direction. To limit the scope of the - // variables involved in the creation of - // the mesh to the range where we actually - // need them, we put the entire block - // between a pair of braces: -template -void StokesProblem::run () -{ + // The next function generates graphical + // output. In this example, we are going to + // use the VTK file format. We attach + // names to the individual variables in the + // problem: velocity to the + // dim components of velocity + // and pressure to the + // pressure. + // + // Not all visualization programs have the + // ability to group individual vector + // components into a vector to provide + // vector plots; in particular, this holds + // for some VTK-based visualization + // programs. In this case, the logical + // grouping of components into vectors + // should already be described in the file + // containing the data. In other words, + // what we need to do is provide our output + // writers with a way to know which of the + // components of the finite element + // logically form a vector (with $d$ + // components in $d$ space dimensions) + // rather than letting them assume that we + // simply have a bunch of scalar fields. + // This is achieved using the members of + // the + // DataComponentInterpretation + // namespace: as with the filename, we + // create a vector in which the first + // dim components refer to the + // velocities and are given the tag + // DataComponentInterpretation::component_is_part_of_vector; + // we finally push one tag + // DataComponentInterpretation::component_is_scalar + // to describe the grouping of the pressure + // variable. + + // The rest of the function is then + // the same as in step-20. + template + void + StokesProblem::output_results (const unsigned int refinement_cycle) const { - std::vector subdivisions (dim, 1); - subdivisions[0] = 4; - - const Point bottom_left = (dim == 2 ? - Point(-2,-1) : - Point(-2,0,-1)); - const Point top_right = (dim == 2 ? - Point(2,0) : - Point(2,1,0)); - - GridGenerator::subdivided_hyper_rectangle (triangulation, - subdivisions, - bottom_left, - top_right); - } + std::vector solution_names (dim, "velocity"); + solution_names.push_back ("pressure"); - // A boundary indicator of 1 is set to all - // boundaries that are subject to Dirichlet - // boundary conditions, i.e. to faces that - // are located at 0 in the last coordinate - // direction. See the example description - // above for details. - for (typename Triangulation::active_cell_iterator - cell = triangulation.begin_active(); - cell != triangulation.end(); ++cell) - for (unsigned int f=0; f::faces_per_cell; ++f) - if (cell->face(f)->center()[dim-1] == 0) - cell->face(f)->set_all_boundary_indicators(1); - - - // We then apply an initial refinement - // before solving for the first time. In - // 3D, there are going to be more degrees - // of freedom, so we refine less there: - triangulation.refine_global (4-dim); - - // As first seen in step-6, we cycle over - // the different refinement levels and - // refine (except for the first cycle), - // setup the degrees of freedom and - // matrices, assemble, solve and create - // output: - for (unsigned int refinement_cycle = 0; refinement_cycle<6; - ++refinement_cycle) - { - std::cout << "Refinement cycle " << refinement_cycle << std::endl; - - if (refinement_cycle > 0) - refine_mesh (); + std::vector + data_component_interpretation + (dim, DataComponentInterpretation::component_is_part_of_vector); + data_component_interpretation + .push_back (DataComponentInterpretation::component_is_scalar); + + DataOut data_out; + data_out.attach_dof_handler (dof_handler); + data_out.add_data_vector (solution, solution_names, + DataOut::type_dof_data, + data_component_interpretation); + data_out.build_patches (); + + std::ostringstream filename; + filename << "solution-" + << Utilities::int_to_string (refinement_cycle, 2) + << ".vtk"; + + std::ofstream output (filename.str().c_str()); + data_out.write_vtk (output); + } - setup_dofs (); - std::cout << " Assembling..." << std::endl << std::flush; - assemble_system (); + // @sect4{StokesProblem::refine_mesh} + + // This is the last interesting function of + // the StokesProblem class. + // As indicated by its name, it takes the + // solution to the problem and refines the + // mesh where this is needed. The procedure + // is the same as in the respective step in + // step-6, with the exception that we base + // the refinement only on the change in + // pressure, i.e., we call the Kelly error + // estimator with a mask + // object. Additionally, we do not coarsen + // the grid again: + template + void + StokesProblem::refine_mesh () + { + Vector estimated_error_per_cell (triangulation.n_active_cells()); + + std::vector component_mask (dim+1, false); + component_mask[dim] = true; + KellyErrorEstimator::estimate (dof_handler, + QGauss(degree+1), + typename FunctionMap::type(), + solution, + estimated_error_per_cell, + component_mask); + + GridRefinement::refine_and_coarsen_fixed_number (triangulation, + estimated_error_per_cell, + 0.3, 0.0); + triangulation.execute_coarsening_and_refinement (); + } - std::cout << " Solving..." << std::flush; - solve (); - output_results (refinement_cycle); + // @sect4{StokesProblem::run} - std::cout << std::endl; + // The last step in the Stokes class is, as + // usual, the function that generates the + // initial grid and calls the other + // functions in the respective order. + // + // We start off with a rectangle of size $4 + // \times 1$ (in 2d) or $4 \times 1 \times + // 1$ (in 3d), placed in $R^2/R^3$ as + // $(-2,2)\times(-1,0)$ or + // $(-2,2)\times(0,1)\times(-1,1)$, + // respectively. It is natural to start + // with equal mesh size in each direction, + // so we subdivide the initial rectangle + // four times in the first coordinate + // direction. To limit the scope of the + // variables involved in the creation of + // the mesh to the range where we actually + // need them, we put the entire block + // between a pair of braces: + template + void StokesProblem::run () + { + { + std::vector subdivisions (dim, 1); + subdivisions[0] = 4; + + const Point bottom_left = (dim == 2 ? + Point(-2,-1) : + Point(-2,0,-1)); + const Point top_right = (dim == 2 ? + Point(2,0) : + Point(2,1,0)); + + GridGenerator::subdivided_hyper_rectangle (triangulation, + subdivisions, + bottom_left, + top_right); } + + // A boundary indicator of 1 is set to all + // boundaries that are subject to Dirichlet + // boundary conditions, i.e. to faces that + // are located at 0 in the last coordinate + // direction. See the example description + // above for details. + for (typename Triangulation::active_cell_iterator + cell = triangulation.begin_active(); + cell != triangulation.end(); ++cell) + for (unsigned int f=0; f::faces_per_cell; ++f) + if (cell->face(f)->center()[dim-1] == 0) + cell->face(f)->set_all_boundary_indicators(1); + + + // We then apply an initial refinement + // before solving for the first time. In + // 3D, there are going to be more degrees + // of freedom, so we refine less there: + triangulation.refine_global (4-dim); + + // As first seen in step-6, we cycle over + // the different refinement levels and + // refine (except for the first cycle), + // setup the degrees of freedom and + // matrices, assemble, solve and create + // output: + for (unsigned int refinement_cycle = 0; refinement_cycle<6; + ++refinement_cycle) + { + std::cout << "Refinement cycle " << refinement_cycle << std::endl; + + if (refinement_cycle > 0) + refine_mesh (); + + setup_dofs (); + + std::cout << " Assembling..." << std::endl << std::flush; + assemble_system (); + + std::cout << " Solving..." << std::flush; + solve (); + + output_results (refinement_cycle); + + std::cout << std::endl; + } + } } @@ -1322,6 +1325,9 @@ int main () { try { + using namespace dealii; + using namespace Step22; + deallog.depth_console (0); StokesProblem<2> flow_problem(1); diff --git a/deal.II/examples/step-23/step-23.cc b/deal.II/examples/step-23/step-23.cc index 396d127240..c5b7cfb4db 100644 --- a/deal.II/examples/step-23/step-23.cc +++ b/deal.II/examples/step-23/step-23.cc @@ -4,7 +4,7 @@ /* $Id$ */ /* Version: $Name: $ */ /* */ -/* Copyright (C) 2006, 2007, 2008, 2009 by the deal.II authors */ +/* Copyright (C) 2006, 2007, 2008, 2009, 2011 by the deal.II authors */ /* */ /* This file is subject to QPL and may not be distributed */ /* without copyright and license information. Please refer */ @@ -111,660 +111,663 @@ // The last step is as in all // previous programs: -using namespace dealii; - - - // @sect3{The WaveEquation class} - - // Next comes the declaration of the main - // class. It's public interface of functions - // is like in most of the other tutorial - // programs. Worth mentioning is that we now - // have to store four matrices instead of - // one: the mass matrix $M$, the Laplace - // matrix $A$, the matrix $M+k^2\theta^2A$ - // used for solving for $U^n$, and a copy of - // the mass matrix with boundary conditions - // applied used for solving for $V^n$. Note - // that it is a bit wasteful to have an - // additional copy of the mass matrix - // around. We will discuss strategies for how - // to avoid this in the section on possible - // improvements. - // - // Likewise, we need solution vectors for - // $U^n,V^n$ as well as for the corresponding - // vectors at the previous time step, - // $U^{n-1},V^{n-1}$. The - // system_rhs will be used for - // whatever right hand side vector we have - // when solving one of the two linear systems - // in each time step. These will be solved in - // the two functions solve_u and - // solve_v. - // - // Finally, the variable - // theta is used to - // indicate the parameter $\theta$ - // that is used to define which time - // stepping scheme to use, as - // explained in the introduction. The - // rest is self-explanatory. -template -class WaveEquation +namespace Step23 { - public: - WaveEquation (); - void run (); - - private: - void setup_system (); - void solve_u (); - void solve_v (); - void output_results () const; - - Triangulation triangulation; - FE_Q fe; - DoFHandler dof_handler; - - ConstraintMatrix constraints; - - SparsityPattern sparsity_pattern; - SparseMatrix mass_matrix; - SparseMatrix laplace_matrix; - SparseMatrix matrix_u; - SparseMatrix matrix_v; - - Vector solution_u, solution_v; - Vector old_solution_u, old_solution_v; - Vector system_rhs; - - double time, time_step; - unsigned int timestep_number; - const double theta; -}; - - - - // @sect3{Equation data} - - // Before we go on filling in the - // details of the main class, let us - // define the equation data - // corresponding to the problem, - // i.e. initial and boundary values - // for both the solution $u$ and its - // time derivative $v$, as well as a - // right hand side class. We do so - // using classes derived from the - // Function class template that has - // been used many times before, so - // the following should not be a - // surprise. - // - // Let's start with initial values - // and choose zero for both the value - // $u$ as well as its time - // derivative, the velocity $v$: -template -class InitialValuesU : public Function -{ - public: - InitialValuesU () : Function() {} - - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; - - -template -class InitialValuesV : public Function -{ - public: - InitialValuesV () : Function() {} - - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; - - - -template -double InitialValuesU::value (const Point &/*p*/, - const unsigned int component) const -{ - Assert (component == 0, ExcInternalError()); - return 0; -} - - - -template -double InitialValuesV::value (const Point &/*p*/, - const unsigned int component) const -{ - Assert (component == 0, ExcInternalError()); - return 0; -} - - - - // Secondly, we have the right hand - // side forcing term. Boring as we - // are, we choose zero here as well: -template -class RightHandSide : public Function -{ - public: - RightHandSide () : Function() {} - - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; - - - -template -double RightHandSide::value (const Point &/*p*/, - const unsigned int component) const -{ - Assert (component == 0, ExcInternalError()); - return 0; -} - - - - // Finally, we have boundary values for $u$ - // and $v$. They are as described in the - // introduction, one being the time - // derivative of the other: -template -class BoundaryValuesU : public Function -{ - public: - BoundaryValuesU () : Function() {} - - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; - - - - -template -class BoundaryValuesV : public Function -{ - public: - BoundaryValuesV () : Function() {} - - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; - - - - -template -double BoundaryValuesU::value (const Point &p, - const unsigned int component) const -{ - Assert (component == 0, ExcInternalError()); - - if ((this->get_time() <= 0.5) && - (p[0] < 0) && - (p[1] < 1./3) && - (p[1] > -1./3)) - return std::sin (this->get_time() * 4 * numbers::PI); - else + using namespace dealii; + + + // @sect3{The WaveEquation class} + + // Next comes the declaration of the main + // class. It's public interface of functions + // is like in most of the other tutorial + // programs. Worth mentioning is that we now + // have to store four matrices instead of + // one: the mass matrix $M$, the Laplace + // matrix $A$, the matrix $M+k^2\theta^2A$ + // used for solving for $U^n$, and a copy of + // the mass matrix with boundary conditions + // applied used for solving for $V^n$. Note + // that it is a bit wasteful to have an + // additional copy of the mass matrix + // around. We will discuss strategies for how + // to avoid this in the section on possible + // improvements. + // + // Likewise, we need solution vectors for + // $U^n,V^n$ as well as for the corresponding + // vectors at the previous time step, + // $U^{n-1},V^{n-1}$. The + // system_rhs will be used for + // whatever right hand side vector we have + // when solving one of the two linear systems + // in each time step. These will be solved in + // the two functions solve_u and + // solve_v. + // + // Finally, the variable + // theta is used to + // indicate the parameter $\theta$ + // that is used to define which time + // stepping scheme to use, as + // explained in the introduction. The + // rest is self-explanatory. + template + class WaveEquation + { + public: + WaveEquation (); + void run (); + + private: + void setup_system (); + void solve_u (); + void solve_v (); + void output_results () const; + + Triangulation triangulation; + FE_Q fe; + DoFHandler dof_handler; + + ConstraintMatrix constraints; + + SparsityPattern sparsity_pattern; + SparseMatrix mass_matrix; + SparseMatrix laplace_matrix; + SparseMatrix matrix_u; + SparseMatrix matrix_v; + + Vector solution_u, solution_v; + Vector old_solution_u, old_solution_v; + Vector system_rhs; + + double time, time_step; + unsigned int timestep_number; + const double theta; + }; + + + + // @sect3{Equation data} + + // Before we go on filling in the + // details of the main class, let us + // define the equation data + // corresponding to the problem, + // i.e. initial and boundary values + // for both the solution $u$ and its + // time derivative $v$, as well as a + // right hand side class. We do so + // using classes derived from the + // Function class template that has + // been used many times before, so + // the following should not be a + // surprise. + // + // Let's start with initial values + // and choose zero for both the value + // $u$ as well as its time + // derivative, the velocity $v$: + template + class InitialValuesU : public Function + { + public: + InitialValuesU () : Function() {} + + virtual double value (const Point &p, + const unsigned int component = 0) const; + }; + + + template + class InitialValuesV : public Function + { + public: + InitialValuesV () : Function() {} + + virtual double value (const Point &p, + const unsigned int component = 0) const; + }; + + + + template + double InitialValuesU::value (const Point &/*p*/, + const unsigned int component) const + { + Assert (component == 0, ExcInternalError()); return 0; -} + } -template -double BoundaryValuesV::value (const Point &p, - const unsigned int component) const -{ - Assert (component == 0, ExcInternalError()); - - if ((this->get_time() <= 0.5) && - (p[0] < 0) && - (p[1] < 1./3) && - (p[1] > -1./3)) - return (std::cos (this->get_time() * 4 * numbers::PI) * - 4 * numbers::PI); - else + template + double InitialValuesV::value (const Point &/*p*/, + const unsigned int component) const + { + Assert (component == 0, ExcInternalError()); return 0; -} + } + // Secondly, we have the right hand + // side forcing term. Boring as we + // are, we choose zero here as well: + template + class RightHandSide : public Function + { + public: + RightHandSide () : Function() {} - // @sect3{Implementation of the WaveEquation class} - - // The implementation of the actual logic is - // actually fairly short, since we relegate - // things like assembling the matrices and - // right hand side vectors to the - // library. The rest boils down to not much - // more than 130 lines of actual code, a - // significant fraction of which is - // boilerplate code that can be taken from - // previous example programs (e.g. the - // functions that solve linear systems, or - // that generate output). - // - // Let's start with the constructor (for an - // explanation of the choice of time step, - // see the section on Courant, Friedrichs, - // and Lewy in the introduction): -template -WaveEquation::WaveEquation () : - fe (1), - dof_handler (triangulation), - time_step (1./64), - theta (0.5) -{} - - - // @sect4{WaveEquation::setup_system} - - // The next function is the one that - // sets up the mesh, DoFHandler, and - // matrices and vectors at the - // beginning of the program, - // i.e. before the first time - // step. The first few lines are - // pretty much standard if you've - // read through the tutorial programs - // at least up to step-6: -template -void WaveEquation::setup_system () -{ - GridGenerator::hyper_cube (triangulation, -1, 1); - triangulation.refine_global (7); - - std::cout << "Number of active cells: " - << triangulation.n_active_cells() - << std::endl; - - dof_handler.distribute_dofs (fe); - - std::cout << "Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl - << std::endl; - - sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - sparsity_pattern.compress(); - - // Then comes a block where we have to - // initialize the 3 matrices we need in the - // course of the program: the mass matrix, - // the laplace matrix, and the matrix - // $M+k^2\theta^2A$ used when solving for - // $U^n$ in each time step. - // - // When setting up these matrices, note - // that they all make use of the same - // sparsity pattern object. Finally, the - // reason why matrices and sparsity - // patterns are separate objects in deal.II - // (unlike in many other finite element or - // linear algebra classes) becomes clear: - // in a significant fraction of - // applications, one has to hold several - // matrices that happen to have the same - // sparsity pattern, and there is no reason - // for them not to share this information, - // rather than re-building and wasting - // memory on it several times. - // - // After initializing all of these - // matrices, we call library functions that - // build the Laplace and mass matrices. All - // they need is a DoFHandler object and a - // quadrature formula object that is to be - // used for numerical integration. Note - // that in many respects these functions - // are better than what we would usually do - // in application programs, for example - // because they automatically parallelize - // building the matrices if multiple - // processors are available in a - // machine. The matrices for solving linear - // systems will be filled in the run() - // method because we need to re-apply - // boundary conditions every time step. - mass_matrix.reinit (sparsity_pattern); - laplace_matrix.reinit (sparsity_pattern); - matrix_u.reinit (sparsity_pattern); - matrix_v.reinit (sparsity_pattern); - - MatrixCreator::create_mass_matrix (dof_handler, QGauss(3), - mass_matrix); - MatrixCreator::create_laplace_matrix (dof_handler, QGauss(3), - laplace_matrix); - - // The rest of the function is spent on - // setting vector sizes to the correct - // value. The final line closes the hanging - // node constraints object. Since we work - // on a uniformly refined mesh, no - // constraints exist or have been computed - // (i.e. there was no need to call - // DoFTools::make_hanging_node_constraints - // as in other programs), but we need a - // constraints object in one place further - // down below anyway. - solution_u.reinit (dof_handler.n_dofs()); - solution_v.reinit (dof_handler.n_dofs()); - old_solution_u.reinit (dof_handler.n_dofs()); - old_solution_v.reinit (dof_handler.n_dofs()); - system_rhs.reinit (dof_handler.n_dofs()); - - constraints.close (); -} + virtual double value (const Point &p, + const unsigned int component = 0) const; + }; - // @sect4{WaveEquation::solve_u and WaveEquation::solve_v} - - // The next two functions deal with solving - // the linear systems associated with the - // equations for $U^n$ and $V^n$. Both are - // not particularly interesting as they - // pretty much follow the scheme used in all - // the previous tutorial programs. - // - // One can make little experiments with - // preconditioners for the two matrices we - // have to invert. As it turns out, however, - // for the matrices at hand here, using - // Jacobi or SSOR preconditioners reduces the - // number of iterations necessary to solve - // the linear system slightly, but due to the - // cost of applying the preconditioner it is - // no win in terms of run-time. It is not - // much of a loss either, but let's keep it - // simple and just do without: -template -void WaveEquation::solve_u () -{ - SolverControl solver_control (1000, 1e-8*system_rhs.l2_norm()); - SolverCG<> cg (solver_control); - cg.solve (matrix_u, solution_u, system_rhs, - PreconditionIdentity()); + template + double RightHandSide::value (const Point &/*p*/, + const unsigned int component) const + { + Assert (component == 0, ExcInternalError()); + return 0; + } - std::cout << " u-equation: " << solver_control.last_step() - << " CG iterations." - << std::endl; -} -template -void WaveEquation::solve_v () -{ - SolverControl solver_control (1000, 1e-8*system_rhs.l2_norm()); - SolverCG<> cg (solver_control); + // Finally, we have boundary values for $u$ + // and $v$. They are as described in the + // introduction, one being the time + // derivative of the other: + template + class BoundaryValuesU : public Function + { + public: + BoundaryValuesU () : Function() {} - cg.solve (matrix_v, solution_v, system_rhs, - PreconditionIdentity()); + virtual double value (const Point &p, + const unsigned int component = 0) const; + }; - std::cout << " v-equation: " << solver_control.last_step() - << " CG iterations." - << std::endl; -} - // @sect4{WaveEquation::output_results} + template + class BoundaryValuesV : public Function + { + public: + BoundaryValuesV () : Function() {} - // Likewise, the following function is pretty - // much what we've done before. The only - // thing worth mentioning is how here we - // generate a string representation of the - // time step number padded with leading zeros - // to 3 character length using the - // Utilities::int_to_string function's second - // argument. -template -void WaveEquation::output_results () const -{ - DataOut data_out; + virtual double value (const Point &p, + const unsigned int component = 0) const; + }; - data_out.attach_dof_handler (dof_handler); - data_out.add_data_vector (solution_u, "U"); - data_out.add_data_vector (solution_v, "V"); - data_out.build_patches (); - const std::string filename = "solution-" + - Utilities::int_to_string (timestep_number, 3) + - ".gnuplot"; - std::ofstream output (filename.c_str()); - data_out.write_gnuplot (output); -} + template + double BoundaryValuesU::value (const Point &p, + const unsigned int component) const + { + Assert (component == 0, ExcInternalError()); + if ((this->get_time() <= 0.5) && + (p[0] < 0) && + (p[1] < 1./3) && + (p[1] > -1./3)) + return std::sin (this->get_time() * 4 * numbers::PI); + else + return 0; + } - // @sect4{WaveEquation::run} - // The following is really the only - // interesting function of the program. It - // contains the loop over all time steps, but - // before we get to that we have to set up - // the grid, DoFHandler, and matrices. In - // addition, we have to somehow get started - // with initial values. To this end, we use - // the VectorTools::project function that - // takes an object that describes a - // continuous function and computes the $L^2$ - // projection of this function onto the - // finite element space described by the - // DoFHandler object. Can't be any simpler - // than that: -template -void WaveEquation::run () -{ - setup_system(); - - VectorTools::project (dof_handler, constraints, QGauss(3), - InitialValuesU(), - old_solution_u); - VectorTools::project (dof_handler, constraints, QGauss(3), - InitialValuesV(), - old_solution_v); - - // The next thing is to loop over all the - // time steps until we reach the end time - // ($T=5$ in this case). In each time step, - // we first have to solve for $U^n$, using - // the equation $(M^n + k^2\theta^2 A^n)U^n - // =$ $(M^{n,n-1} - k^2\theta(1-\theta) - // A^{n,n-1})U^{n-1} + kM^{n,n-1}V^{n-1} +$ - // $k\theta \left[k \theta F^n + k(1-\theta) - // F^{n-1} \right]$. Note that we use the - // same mesh for all time steps, so that - // $M^n=M^{n,n-1}=M$ and - // $A^n=A^{n,n-1}=A$. What we therefore - // have to do first is to add up $MU^{n-1} - // - k^2\theta(1-\theta) AU^{n-1} + kMV^{n-1}$ and - // the forcing terms, and put the result - // into the system_rhs - // vector. (For these additions, we need a - // temporary vector that we declare before - // the loop to avoid repeated memory - // allocations in each time step.) - // - // The one thing to realize here is how we - // communicate the time variable to the - // object describing the right hand side: - // each object derived from the Function - // class has a time field that can be set - // using the Function::set_time and read by - // Function::get_time. In essence, using - // this mechanism, all functions of space - // and time are therefore considered - // functions of space evaluated at a - // particular time. This matches well what - // we typically need in finite element - // programs, where we almost always work on - // a single time step at a time, and where - // it never happens that, for example, one - // would like to evaluate a space-time - // function for all times at any given - // spatial location. - Vector tmp (solution_u.size()); - Vector forcing_terms (solution_u.size()); - - for (timestep_number=1, time=time_step; - time<=5; - time+=time_step, ++timestep_number) - { - std::cout << "Time step " << timestep_number - << " at t=" << time - << std::endl; - - mass_matrix.vmult (system_rhs, old_solution_u); - - mass_matrix.vmult (tmp, old_solution_v); - system_rhs.add (time_step, tmp); - - laplace_matrix.vmult (tmp, old_solution_u); - system_rhs.add (-theta * (1-theta) * time_step * time_step, tmp); - - RightHandSide rhs_function; - rhs_function.set_time (time); - VectorTools::create_right_hand_side (dof_handler, QGauss(2), - rhs_function, tmp); - forcing_terms = tmp; - forcing_terms *= theta * time_step; - - rhs_function.set_time (time-time_step); - VectorTools::create_right_hand_side (dof_handler, QGauss(2), - rhs_function, tmp); - - forcing_terms.add ((1-theta) * time_step, tmp); - - system_rhs.add (theta * time_step, forcing_terms); - - // After so constructing the right hand - // side vector of the first equation, - // all we have to do is apply the - // correct boundary values. As for the - // right hand side, this is a - // space-time function evaluated at a - // particular time, which we - // interpolate at boundary nodes and - // then use the result to apply - // boundary values as we usually - // do. The result is then handed off to - // the solve_u() function: - { - BoundaryValuesU boundary_values_u_function; - boundary_values_u_function.set_time (time); - - std::map boundary_values; - VectorTools::interpolate_boundary_values (dof_handler, - 0, - boundary_values_u_function, - boundary_values); - - // The matrix for solve_u() is the same in - // every time steps, so one could think - // that it is enough to do this only once - // at the beginning of the - // simulation. However, since we need to - // apply boundary values to the linear - // system (which eliminate some matrix rows - // and columns and give contributions to - // the right hand side), we have to refill - // the matrix in every time steps before we - // actually apply boundary data. The actual - // content is very simple: it is the sum of - // the mass matrix and a weighted Laplace - // matrix: - matrix_u.copy_from (mass_matrix); - matrix_u.add (theta * theta * time_step * time_step, laplace_matrix); - MatrixTools::apply_boundary_values (boundary_values, - matrix_u, - solution_u, - system_rhs); - } - solve_u (); + template + double BoundaryValuesV::value (const Point &p, + const unsigned int component) const + { + Assert (component == 0, ExcInternalError()); + if ((this->get_time() <= 0.5) && + (p[0] < 0) && + (p[1] < 1./3) && + (p[1] > -1./3)) + return (std::cos (this->get_time() * 4 * numbers::PI) * + 4 * numbers::PI); + else + return 0; + } - // The second step, i.e. solving for - // $V^n$, works similarly, except that - // this time the matrix on the left is - // the mass matrix (which we copy again - // in order to be able to apply - // boundary conditions, and the right - // hand side is $MV^{n-1} - k\left[ - // \theta A U^n + (1-\theta) - // AU^{n-1}\right]$ plus forcing - // terms. %Boundary values are applied - // in the same way as before, except - // that now we have to use the - // BoundaryValuesV class: - laplace_matrix.vmult (system_rhs, solution_u); - system_rhs *= -theta * time_step; - mass_matrix.vmult (tmp, old_solution_v); - system_rhs += tmp; - laplace_matrix.vmult (tmp, old_solution_u); - system_rhs.add (-time_step * (1-theta), tmp); - system_rhs += forcing_terms; + // @sect3{Implementation of the WaveEquation class} + // The implementation of the actual logic is + // actually fairly short, since we relegate + // things like assembling the matrices and + // right hand side vectors to the + // library. The rest boils down to not much + // more than 130 lines of actual code, a + // significant fraction of which is + // boilerplate code that can be taken from + // previous example programs (e.g. the + // functions that solve linear systems, or + // that generate output). + // + // Let's start with the constructor (for an + // explanation of the choice of time step, + // see the section on Courant, Friedrichs, + // and Lewy in the introduction): + template + WaveEquation::WaveEquation () : + fe (1), + dof_handler (triangulation), + time_step (1./64), + theta (0.5) + {} + + + // @sect4{WaveEquation::setup_system} + + // The next function is the one that + // sets up the mesh, DoFHandler, and + // matrices and vectors at the + // beginning of the program, + // i.e. before the first time + // step. The first few lines are + // pretty much standard if you've + // read through the tutorial programs + // at least up to step-6: + template + void WaveEquation::setup_system () + { + GridGenerator::hyper_cube (triangulation, -1, 1); + triangulation.refine_global (7); + + std::cout << "Number of active cells: " + << triangulation.n_active_cells() + << std::endl; + + dof_handler.distribute_dofs (fe); + + std::cout << "Number of degrees of freedom: " + << dof_handler.n_dofs() + << std::endl + << std::endl; + + sparsity_pattern.reinit (dof_handler.n_dofs(), + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); + DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); + sparsity_pattern.compress(); + + // Then comes a block where we have to + // initialize the 3 matrices we need in the + // course of the program: the mass matrix, + // the laplace matrix, and the matrix + // $M+k^2\theta^2A$ used when solving for + // $U^n$ in each time step. + // + // When setting up these matrices, note + // that they all make use of the same + // sparsity pattern object. Finally, the + // reason why matrices and sparsity + // patterns are separate objects in deal.II + // (unlike in many other finite element or + // linear algebra classes) becomes clear: + // in a significant fraction of + // applications, one has to hold several + // matrices that happen to have the same + // sparsity pattern, and there is no reason + // for them not to share this information, + // rather than re-building and wasting + // memory on it several times. + // + // After initializing all of these + // matrices, we call library functions that + // build the Laplace and mass matrices. All + // they need is a DoFHandler object and a + // quadrature formula object that is to be + // used for numerical integration. Note + // that in many respects these functions + // are better than what we would usually do + // in application programs, for example + // because they automatically parallelize + // building the matrices if multiple + // processors are available in a + // machine. The matrices for solving linear + // systems will be filled in the run() + // method because we need to re-apply + // boundary conditions every time step. + mass_matrix.reinit (sparsity_pattern); + laplace_matrix.reinit (sparsity_pattern); + matrix_u.reinit (sparsity_pattern); + matrix_v.reinit (sparsity_pattern); + + MatrixCreator::create_mass_matrix (dof_handler, QGauss(3), + mass_matrix); + MatrixCreator::create_laplace_matrix (dof_handler, QGauss(3), + laplace_matrix); + + // The rest of the function is spent on + // setting vector sizes to the correct + // value. The final line closes the hanging + // node constraints object. Since we work + // on a uniformly refined mesh, no + // constraints exist or have been computed + // (i.e. there was no need to call + // DoFTools::make_hanging_node_constraints + // as in other programs), but we need a + // constraints object in one place further + // down below anyway. + solution_u.reinit (dof_handler.n_dofs()); + solution_v.reinit (dof_handler.n_dofs()); + old_solution_u.reinit (dof_handler.n_dofs()); + old_solution_v.reinit (dof_handler.n_dofs()); + system_rhs.reinit (dof_handler.n_dofs()); + + constraints.close (); + } + + + // @sect4{WaveEquation::solve_u and WaveEquation::solve_v} + + // The next two functions deal with solving + // the linear systems associated with the + // equations for $U^n$ and $V^n$. Both are + // not particularly interesting as they + // pretty much follow the scheme used in all + // the previous tutorial programs. + // + // One can make little experiments with + // preconditioners for the two matrices we + // have to invert. As it turns out, however, + // for the matrices at hand here, using + // Jacobi or SSOR preconditioners reduces the + // number of iterations necessary to solve + // the linear system slightly, but due to the + // cost of applying the preconditioner it is + // no win in terms of run-time. It is not + // much of a loss either, but let's keep it + // simple and just do without: + template + void WaveEquation::solve_u () + { + SolverControl solver_control (1000, 1e-8*system_rhs.l2_norm()); + SolverCG<> cg (solver_control); + + cg.solve (matrix_u, solution_u, system_rhs, + PreconditionIdentity()); + + std::cout << " u-equation: " << solver_control.last_step() + << " CG iterations." + << std::endl; + } + + + template + void WaveEquation::solve_v () + { + SolverControl solver_control (1000, 1e-8*system_rhs.l2_norm()); + SolverCG<> cg (solver_control); + + cg.solve (matrix_v, solution_v, system_rhs, + PreconditionIdentity()); + + std::cout << " v-equation: " << solver_control.last_step() + << " CG iterations." + << std::endl; + } + + + + // @sect4{WaveEquation::output_results} + + // Likewise, the following function is pretty + // much what we've done before. The only + // thing worth mentioning is how here we + // generate a string representation of the + // time step number padded with leading zeros + // to 3 character length using the + // Utilities::int_to_string function's second + // argument. + template + void WaveEquation::output_results () const + { + DataOut data_out; + + data_out.attach_dof_handler (dof_handler); + data_out.add_data_vector (solution_u, "U"); + data_out.add_data_vector (solution_v, "V"); + + data_out.build_patches (); + + const std::string filename = "solution-" + + Utilities::int_to_string (timestep_number, 3) + + ".gnuplot"; + std::ofstream output (filename.c_str()); + data_out.write_gnuplot (output); + } + + + + + // @sect4{WaveEquation::run} + + // The following is really the only + // interesting function of the program. It + // contains the loop over all time steps, but + // before we get to that we have to set up + // the grid, DoFHandler, and matrices. In + // addition, we have to somehow get started + // with initial values. To this end, we use + // the VectorTools::project function that + // takes an object that describes a + // continuous function and computes the $L^2$ + // projection of this function onto the + // finite element space described by the + // DoFHandler object. Can't be any simpler + // than that: + template + void WaveEquation::run () + { + setup_system(); + + VectorTools::project (dof_handler, constraints, QGauss(3), + InitialValuesU(), + old_solution_u); + VectorTools::project (dof_handler, constraints, QGauss(3), + InitialValuesV(), + old_solution_v); + + // The next thing is to loop over all the + // time steps until we reach the end time + // ($T=5$ in this case). In each time step, + // we first have to solve for $U^n$, using + // the equation $(M^n + k^2\theta^2 A^n)U^n + // =$ $(M^{n,n-1} - k^2\theta(1-\theta) + // A^{n,n-1})U^{n-1} + kM^{n,n-1}V^{n-1} +$ + // $k\theta \left[k \theta F^n + k(1-\theta) + // F^{n-1} \right]$. Note that we use the + // same mesh for all time steps, so that + // $M^n=M^{n,n-1}=M$ and + // $A^n=A^{n,n-1}=A$. What we therefore + // have to do first is to add up $MU^{n-1} + // - k^2\theta(1-\theta) AU^{n-1} + kMV^{n-1}$ and + // the forcing terms, and put the result + // into the system_rhs + // vector. (For these additions, we need a + // temporary vector that we declare before + // the loop to avoid repeated memory + // allocations in each time step.) + // + // The one thing to realize here is how we + // communicate the time variable to the + // object describing the right hand side: + // each object derived from the Function + // class has a time field that can be set + // using the Function::set_time and read by + // Function::get_time. In essence, using + // this mechanism, all functions of space + // and time are therefore considered + // functions of space evaluated at a + // particular time. This matches well what + // we typically need in finite element + // programs, where we almost always work on + // a single time step at a time, and where + // it never happens that, for example, one + // would like to evaluate a space-time + // function for all times at any given + // spatial location. + Vector tmp (solution_u.size()); + Vector forcing_terms (solution_u.size()); + + for (timestep_number=1, time=time_step; + time<=5; + time+=time_step, ++timestep_number) { - BoundaryValuesV boundary_values_v_function; - boundary_values_v_function.set_time (time); - - std::map boundary_values; - VectorTools::interpolate_boundary_values (dof_handler, - 0, - boundary_values_v_function, - boundary_values); - matrix_v.copy_from (mass_matrix); - MatrixTools::apply_boundary_values (boundary_values, - matrix_v, - solution_v, - system_rhs); + std::cout << "Time step " << timestep_number + << " at t=" << time + << std::endl; + + mass_matrix.vmult (system_rhs, old_solution_u); + + mass_matrix.vmult (tmp, old_solution_v); + system_rhs.add (time_step, tmp); + + laplace_matrix.vmult (tmp, old_solution_u); + system_rhs.add (-theta * (1-theta) * time_step * time_step, tmp); + + RightHandSide rhs_function; + rhs_function.set_time (time); + VectorTools::create_right_hand_side (dof_handler, QGauss(2), + rhs_function, tmp); + forcing_terms = tmp; + forcing_terms *= theta * time_step; + + rhs_function.set_time (time-time_step); + VectorTools::create_right_hand_side (dof_handler, QGauss(2), + rhs_function, tmp); + + forcing_terms.add ((1-theta) * time_step, tmp); + + system_rhs.add (theta * time_step, forcing_terms); + + // After so constructing the right hand + // side vector of the first equation, + // all we have to do is apply the + // correct boundary values. As for the + // right hand side, this is a + // space-time function evaluated at a + // particular time, which we + // interpolate at boundary nodes and + // then use the result to apply + // boundary values as we usually + // do. The result is then handed off to + // the solve_u() function: + { + BoundaryValuesU boundary_values_u_function; + boundary_values_u_function.set_time (time); + + std::map boundary_values; + VectorTools::interpolate_boundary_values (dof_handler, + 0, + boundary_values_u_function, + boundary_values); + + // The matrix for solve_u() is the same in + // every time steps, so one could think + // that it is enough to do this only once + // at the beginning of the + // simulation. However, since we need to + // apply boundary values to the linear + // system (which eliminate some matrix rows + // and columns and give contributions to + // the right hand side), we have to refill + // the matrix in every time steps before we + // actually apply boundary data. The actual + // content is very simple: it is the sum of + // the mass matrix and a weighted Laplace + // matrix: + matrix_u.copy_from (mass_matrix); + matrix_u.add (theta * theta * time_step * time_step, laplace_matrix); + MatrixTools::apply_boundary_values (boundary_values, + matrix_u, + solution_u, + system_rhs); + } + solve_u (); + + + // The second step, i.e. solving for + // $V^n$, works similarly, except that + // this time the matrix on the left is + // the mass matrix (which we copy again + // in order to be able to apply + // boundary conditions, and the right + // hand side is $MV^{n-1} - k\left[ + // \theta A U^n + (1-\theta) + // AU^{n-1}\right]$ plus forcing + // terms. %Boundary values are applied + // in the same way as before, except + // that now we have to use the + // BoundaryValuesV class: + laplace_matrix.vmult (system_rhs, solution_u); + system_rhs *= -theta * time_step; + + mass_matrix.vmult (tmp, old_solution_v); + system_rhs += tmp; + + laplace_matrix.vmult (tmp, old_solution_u); + system_rhs.add (-time_step * (1-theta), tmp); + + system_rhs += forcing_terms; + + { + BoundaryValuesV boundary_values_v_function; + boundary_values_v_function.set_time (time); + + std::map boundary_values; + VectorTools::interpolate_boundary_values (dof_handler, + 0, + boundary_values_v_function, + boundary_values); + matrix_v.copy_from (mass_matrix); + MatrixTools::apply_boundary_values (boundary_values, + matrix_v, + solution_v, + system_rhs); + } + solve_v (); + + // Finally, after both solution + // components have been computed, we + // output the result, compute the + // energy in the solution, and go on to + // the next time step after shifting + // the present solution into the + // vectors that hold the solution at + // the previous time step. Note the + // function + // SparseMatrix::matrix_norm_square + // that can compute + // $\left$ and + // $\left$ in one step, + // saving us the expense of a temporary + // vector and several lines of code: + output_results (); + + std::cout << " Total energy: " + << (mass_matrix.matrix_norm_square (solution_v) + + laplace_matrix.matrix_norm_square (solution_u)) / 2 + << std::endl; + + old_solution_u = solution_u; + old_solution_v = solution_v; } - solve_v (); - - // Finally, after both solution - // components have been computed, we - // output the result, compute the - // energy in the solution, and go on to - // the next time step after shifting - // the present solution into the - // vectors that hold the solution at - // the previous time step. Note the - // function - // SparseMatrix::matrix_norm_square - // that can compute - // $\left$ and - // $\left$ in one step, - // saving us the expense of a temporary - // vector and several lines of code: - output_results (); - - std::cout << " Total energy: " - << (mass_matrix.matrix_norm_square (solution_v) + - laplace_matrix.matrix_norm_square (solution_u)) / 2 - << std::endl; - - old_solution_u = solution_u; - old_solution_v = solution_v; - } + } } @@ -774,11 +777,15 @@ void WaveEquation::run () // program. There is nothing here that hasn't // been shown in several of the previous // programs: -int main () +int main () { try { + using namespace dealii; + using namespace Step23; + deallog.depth_console (0); + WaveEquation<2> wave_equation_solver; wave_equation_solver.run (); } @@ -795,7 +802,7 @@ int main () return 1; } - catch (...) + catch (...) { std::cerr << std::endl << std::endl << "----------------------------------------------------" @@ -806,6 +813,6 @@ int main () << std::endl; return 1; } - + return 0; } diff --git a/deal.II/examples/step-24/step-24.cc b/deal.II/examples/step-24/step-24.cc index a200b6b377..a4d70c28e8 100644 --- a/deal.II/examples/step-24/step-24.cc +++ b/deal.II/examples/step-24/step-24.cc @@ -1,14 +1,14 @@ /* $Id$ */ /* Version: $Name: $ */ /* */ -/* Copyright (C) 2006, 2007, 2008, 2009 by the deal.II authors */ +/* Copyright (C) 2006, 2007, 2008, 2009, 2011 by the deal.II authors */ /* Author: Xing Jin, Wolfgang Bangerth, Texas A&M University, 2006 */ /* */ /* This file is subject to QPL and may not be distributed */ /* without copyright and license information. Please refer */ /* to the file deal.II/doc/license.html for the text and */ /* further information on this license. */ - + // @sect3{Include files} @@ -54,596 +54,599 @@ // The last step is as in all // previous programs: -using namespace dealii; - - // @sect3{The "forward problem" class template} - - // The first part of the main class is - // exactly as in step-23 - // (except for the name): -template -class TATForwardProblem +namespace Step24 { - public: - TATForwardProblem (); - void run (); - - private: - void setup_system (); - void solve_p (); - void solve_v (); - void output_results () const; - - Triangulation triangulation; - FE_Q fe; - DoFHandler dof_handler; - - ConstraintMatrix constraints; - - SparsityPattern sparsity_pattern; - SparseMatrix system_matrix; - SparseMatrix mass_matrix; - SparseMatrix laplace_matrix; - - Vector solution_p, solution_v; - Vector old_solution_p, old_solution_v; - Vector system_rhs_p, system_rhs_v; - - double time, time_step; - unsigned int timestep_number; - const double theta; - - // Here's what's new: first, we need - // that boundary mass matrix $B$ that - // came out of the absorbing boundary - // condition. Likewise, since this time - // we consider a realistic medium, we - // must have a measure of the wave speed - // $c_0$ that will enter all the - // formulas with the Laplace matrix - // (which we still define as $(\nabla - // \phi_i,\nabla \phi_j)$): - SparseMatrix boundary_matrix; - const double wave_speed; - - // The last thing we have to take care of - // is that we wanted to evaluate the - // solution at a certain number of - // detector locations. We need an array - // to hold these locations, declared here - // and filled in the constructor: - std::vector > detector_locations; -}; - - - // @sect3{Equation data} - - // As usual, we have to define our - // initial values, boundary - // conditions, and right hand side - // functions. Except things are a bit - // simpler this time: we are to - // consider a problem that is driven - // by initial conditions, so there is - // no right hand side function - // (though you could look up in - // step-23 to see how this can be - // done. Secondly, there are no - // boundary conditions: the entire - // boundary of the domain consists of - // absorbing boundary - // conditions. That only leaves - // initial conditions, and there - // things are simple too since for - // this particular application only - // nonzero initial conditions for the - // pressure are prescribed, not for - // the velocity (which is zero at the - // initial time). - // - // So this is all we need: a class that - // specifies initial conditions for the - // pressure. In the physical setting - // considered in this program, these are - // small absorbers, which we model as a - // series of little circles where we assume - // that the pressure surplus is one, whereas - // no absorption and therefore no pressure - // surplus is anywhere else. This is how we - // do things (note that if we wanted to - // expand this program to not only compile - // but also to run, we would have to - // initialize the sources with - // three-dimensional source locations): -template -class InitialValuesP : public Function -{ - public: - InitialValuesP () - : - Function() - {} - - virtual double value (const Point &p, - const unsigned int component = 0) const; - - private: - struct Source - { - Source (const Point &l, - const double r) - : - location (l), - radius (r) - {} - - const Point location; - const double radius; - }; -}; - - -template -double InitialValuesP::value (const Point &p, - const unsigned int /*component*/) const -{ - static const Source sources[] = {Source (Point (0, 0), 0.025), - Source (Point (-0.135, 0), 0.05), - Source (Point (0.17, 0), 0.03), - Source (Point (-0.25, 0), 0.02), - Source (Point (-0.05, -0.15), 0.015)}; - static const unsigned int n_sources = sizeof(sources)/sizeof(sources[0]); - - for (unsigned int i=0; iTATForwardProblem class} - - // Let's start again with the - // constructor. Setting the member variables - // is straightforward. We use the acoustic - // wave speed of mineral oil (in millimeters - // per microsecond, a common unit in - // experimental biomedical imaging) since - // this is where many of the experiments we - // want to compare the output with are made - // in. The Crank-Nicolson scheme is used - // again, i.e. theta is set to 0.5. The time - // step is later selected to satisfy $k = - // \frac hc$ -template -TATForwardProblem::TATForwardProblem () - : - fe (1), - dof_handler (triangulation), - theta (0.5), - wave_speed (1.437) -{ - // The second task in the constructor is to - // initialize the array that holds the - // detector locations. The results of this - // program were compared with experiments - // in which the step size of the detector - // spacing is 2.25 degree, corresponding to - // 160 detector locations. The radius of - // the scanning circle is selected to be - // half way between the center and the - // boundary to avoid that the remaining - // reflections from the imperfect boundary - // condition spoils our numerical results. + // The first part of the main class is + // exactly as in step-23 + // (except for the name): + template + class TATForwardProblem + { + public: + TATForwardProblem (); + void run (); + + private: + void setup_system (); + void solve_p (); + void solve_v (); + void output_results () const; + + Triangulation triangulation; + FE_Q fe; + DoFHandler dof_handler; + + ConstraintMatrix constraints; + + SparsityPattern sparsity_pattern; + SparseMatrix system_matrix; + SparseMatrix mass_matrix; + SparseMatrix laplace_matrix; + + Vector solution_p, solution_v; + Vector old_solution_p, old_solution_v; + Vector system_rhs_p, system_rhs_v; + + double time, time_step; + unsigned int timestep_number; + const double theta; + + // Here's what's new: first, we need + // that boundary mass matrix $B$ that + // came out of the absorbing boundary + // condition. Likewise, since this time + // we consider a realistic medium, we + // must have a measure of the wave speed + // $c_0$ that will enter all the + // formulas with the Laplace matrix + // (which we still define as $(\nabla + // \phi_i,\nabla \phi_j)$): + SparseMatrix boundary_matrix; + const double wave_speed; + + // The last thing we have to take care of + // is that we wanted to evaluate the + // solution at a certain number of + // detector locations. We need an array + // to hold these locations, declared here + // and filled in the constructor: + std::vector > detector_locations; + }; + + + // @sect3{Equation data} + + // As usual, we have to define our + // initial values, boundary + // conditions, and right hand side + // functions. Except things are a bit + // simpler this time: we are to + // consider a problem that is driven + // by initial conditions, so there is + // no right hand side function + // (though you could look up in + // step-23 to see how this can be + // done. Secondly, there are no + // boundary conditions: the entire + // boundary of the domain consists of + // absorbing boundary + // conditions. That only leaves + // initial conditions, and there + // things are simple too since for + // this particular application only + // nonzero initial conditions for the + // pressure are prescribed, not for + // the velocity (which is zero at the + // initial time). // - // The locations of the detectors are then - // calculated in clockwise order. Note that - // the following of course only works if we - // are computing in 2d, a condition that we - // guard with an assertion. If we later - // wanted to run the same program in 3d, we - // would have to add code here for the - // initialization of detector locations in - // 3d. Due to the assertion, there is no - // way we can forget to do this. - Assert (dim == 2, ExcNotImplemented()); - - const double detector_step_angle = 2.25; - const double detector_radius = 0.5; - - for (double detector_angle = 2*numbers::PI; - detector_angle >= 0; - detector_angle -= detector_step_angle/360*2*numbers::PI) - detector_locations.push_back (Point (std::cos(detector_angle), - std::sin(detector_angle)) * - detector_radius); -} + // So this is all we need: a class that + // specifies initial conditions for the + // pressure. In the physical setting + // considered in this program, these are + // small absorbers, which we model as a + // series of little circles where we assume + // that the pressure surplus is one, whereas + // no absorption and therefore no pressure + // surplus is anywhere else. This is how we + // do things (note that if we wanted to + // expand this program to not only compile + // but also to run, we would have to + // initialize the sources with + // three-dimensional source locations): + template + class InitialValuesP : public Function + { + public: + InitialValuesP () + : + Function() + {} + + virtual double value (const Point &p, + const unsigned int component = 0) const; + + private: + struct Source + { + Source (const Point &l, + const double r) + : + location (l), + radius (r) + {} + + const Point location; + const double radius; + }; + }; + + + template + double InitialValuesP::value (const Point &p, + const unsigned int /*component*/) const + { + static const Source sources[] = {Source (Point (0, 0), 0.025), + Source (Point (-0.135, 0), 0.05), + Source (Point (0.17, 0), 0.03), + Source (Point (-0.25, 0), 0.02), + Source (Point (-0.05, -0.15), 0.015)}; + static const unsigned int n_sources = sizeof(sources)/sizeof(sources[0]); + + for (unsigned int i=0; iTATForwardProblem class} + + // Let's start again with the + // constructor. Setting the member variables + // is straightforward. We use the acoustic + // wave speed of mineral oil (in millimeters + // per microsecond, a common unit in + // experimental biomedical imaging) since + // this is where many of the experiments we + // want to compare the output with are made + // in. The Crank-Nicolson scheme is used + // again, i.e. theta is set to 0.5. The time + // step is later selected to satisfy $k = + // \frac hc$ + template + TATForwardProblem::TATForwardProblem () + : + fe (1), + dof_handler (triangulation), + theta (0.5), + wave_speed (1.437) + { + // The second task in the constructor is to + // initialize the array that holds the + // detector locations. The results of this + // program were compared with experiments + // in which the step size of the detector + // spacing is 2.25 degree, corresponding to + // 160 detector locations. The radius of + // the scanning circle is selected to be + // half way between the center and the + // boundary to avoid that the remaining + // reflections from the imperfect boundary + // condition spoils our numerical results. + // + // The locations of the detectors are then + // calculated in clockwise order. Note that + // the following of course only works if we + // are computing in 2d, a condition that we + // guard with an assertion. If we later + // wanted to run the same program in 3d, we + // would have to add code here for the + // initialization of detector locations in + // 3d. Due to the assertion, there is no + // way we can forget to do this. + Assert (dim == 2, ExcNotImplemented()); + + const double detector_step_angle = 2.25; + const double detector_radius = 0.5; + + for (double detector_angle = 2*numbers::PI; + detector_angle >= 0; + detector_angle -= detector_step_angle/360*2*numbers::PI) + detector_locations.push_back (Point (std::cos(detector_angle), + std::sin(detector_angle)) * + detector_radius); + } - // @sect4{TATForwardProblem::setup_system} - - // The following system is pretty much what - // we've already done in - // step-23, but with two important - // differences. First, we have to create a - // circular (or spherical) mesh around the - // origin, with a radius of 1. This nothing - // new: we've done so before in - // step-6, step-10, and - // step-11, where we also explain - // how to attach a boundary object to a - // triangulation to be used whenever the - // triangulation needs to know where new - // boundary points lie when a cell is - // refined. Following this, the mesh is - // refined a number of times. - // - // One thing we had to make sure is that the - // time step satisfies the CFL condition - // discussed in the introduction of - // step-23. Back in that program, - // we ensured this by hand by setting a - // timestep that matches the mesh width, but - // that was error prone because if we refined - // the mesh once more we would also have to - // make sure the time step is changed. Here, - // we do that automatically: we ask a library - // function for the minimal diameter of any - // cell. Then we set $k=\frac h{c_0}$. The - // only problem is: what exactly is $h$? The - // point is that there is really no good - // theory on this question for the wave - // equation. It is known that for uniformly - // refined meshes consisting of rectangles, - // $h$ is the minimal edge length. But for - // meshes on general quadrilaterals, the - // exact relationship appears to be unknown, - // i.e. it is unknown what properties of - // cells are relevant for the CFL - // condition. The problem is that the CFL - // condition follows from knowledge of the - // smallest eigenvalue of the Laplace matrix, - // and that can only be computed analytically - // for simply structured meshes. - // - // The upshot of all this is that we're not - // quite sure what exactly we should take for - // $h$. The function - // GridTools::minimal_cell_diameter computes - // the minimal diameter of all cells. If the - // cells were all squares or cubes, then the - // minimal edge length would be the minimal - // diameter divided by - // std::sqrt(dim). We simply - // generalize this, without theoretical - // justification, to the case of non-uniform - // meshes. - // - // The only other significant change is that - // we need to build the boundary mass - // matrix. We will comment on this further - // down below. -template -void TATForwardProblem::setup_system () -{ - const Point center; - GridGenerator::hyper_ball (triangulation, center, 1.); - static const HyperBallBoundary boundary_description (center, 1.); - triangulation.set_boundary (0,boundary_description); - triangulation.refine_global (7); - - time_step = GridTools::minimal_cell_diameter(triangulation) / - wave_speed / - std::sqrt (1.*dim); - - std::cout << "Number of active cells: " - << triangulation.n_active_cells() - << std::endl; - - dof_handler.distribute_dofs (fe); - - std::cout << "Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl - << std::endl; - - sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - sparsity_pattern.compress(); - - system_matrix.reinit (sparsity_pattern); - mass_matrix.reinit (sparsity_pattern); - laplace_matrix.reinit (sparsity_pattern); - - MatrixCreator::create_mass_matrix (dof_handler, QGauss(3), - mass_matrix); - MatrixCreator::create_laplace_matrix (dof_handler, QGauss(3), - laplace_matrix); - - // The second difference, as mentioned, to - // step-23 is that we need - // to build the boundary mass matrix that - // grew out of the absorbing boundary - // conditions. + + + // @sect4{TATForwardProblem::setup_system} + + // The following system is pretty much what + // we've already done in + // step-23, but with two important + // differences. First, we have to create a + // circular (or spherical) mesh around the + // origin, with a radius of 1. This nothing + // new: we've done so before in + // step-6, step-10, and + // step-11, where we also explain + // how to attach a boundary object to a + // triangulation to be used whenever the + // triangulation needs to know where new + // boundary points lie when a cell is + // refined. Following this, the mesh is + // refined a number of times. // - // A first observation would be that this - // matrix is much sparser than the regular - // mass matrix, since none of the shape - // functions with purely interior support - // contributes to this matrix. We could - // therefore optimize the storage pattern - // to this situation and build up a second - // sparsity pattern that only contains the - // nonzero entries that we need. There is a - // trade-off to make here: first, we would - // have to have a second sparsity pattern - // object, so that costs memory. Secondly, - // the matrix attached to this sparsity - // pattern is going to be smaller and - // therefore requires less memory; it would - // also be faster to perform matrix-vector - // multiplications with it. The final - // argument, however, is the one that tips - // the scale: we are not primarily - // interested in performing matrix-vector - // with the boundary matrix alone (though - // we need to do that for the right hand - // side vector once per time step), but - // mostly wish to add it up to the other - // matrices used in the first of the two - // equations since this is the one that is - // going to be multiplied with once per - // iteration of the CG method, - // i.e. significantly more often. It is now - // the case that the SparseMatrix::add - // class allows to add one matrix to - // another, but only if they use the same - // sparsity pattern (the reason being that - // we can't add nonzero entries to a matrix - // after the sparsity pattern has been - // created, so we simply require that the - // two matrices have the same sparsity - // pattern). + // One thing we had to make sure is that the + // time step satisfies the CFL condition + // discussed in the introduction of + // step-23. Back in that program, + // we ensured this by hand by setting a + // timestep that matches the mesh width, but + // that was error prone because if we refined + // the mesh once more we would also have to + // make sure the time step is changed. Here, + // we do that automatically: we ask a library + // function for the minimal diameter of any + // cell. Then we set $k=\frac h{c_0}$. The + // only problem is: what exactly is $h$? The + // point is that there is really no good + // theory on this question for the wave + // equation. It is known that for uniformly + // refined meshes consisting of rectangles, + // $h$ is the minimal edge length. But for + // meshes on general quadrilaterals, the + // exact relationship appears to be unknown, + // i.e. it is unknown what properties of + // cells are relevant for the CFL + // condition. The problem is that the CFL + // condition follows from knowledge of the + // smallest eigenvalue of the Laplace matrix, + // and that can only be computed analytically + // for simply structured meshes. // - // So let's go with that: - boundary_matrix.reinit (sparsity_pattern); - - // The second thing to do is to actually - // build the matrix. Here, we need to - // integrate over faces of cells, so first - // we need a quadrature object that works - // on dim-1 dimensional - // objects. Secondly, the FEFaceValues - // variant of FEValues that works on faces, - // as its name suggest. And finally, the - // other variables that are part of the - // assembly machinery. All of this we put - // between curly braces to limit the scope - // of these variables to where we actually - // need them. + // The upshot of all this is that we're not + // quite sure what exactly we should take for + // $h$. The function + // GridTools::minimal_cell_diameter computes + // the minimal diameter of all cells. If the + // cells were all squares or cubes, then the + // minimal edge length would be the minimal + // diameter divided by + // std::sqrt(dim). We simply + // generalize this, without theoretical + // justification, to the case of non-uniform + // meshes. // - // The actual act of assembling the matrix - // is then fairly straightforward: we loop - // over all cells, over all faces of each - // of these cells, and then do something - // only if that particular face is at the - // boundary of the domain. Like this: + // The only other significant change is that + // we need to build the boundary mass + // matrix. We will comment on this further + // down below. + template + void TATForwardProblem::setup_system () { - const QGauss quadrature_formula(3); - FEFaceValues fe_values (fe, quadrature_formula, - update_values | update_JxW_values); + const Point center; + GridGenerator::hyper_ball (triangulation, center, 1.); + static const HyperBallBoundary boundary_description (center, 1.); + triangulation.set_boundary (0,boundary_description); + triangulation.refine_global (7); + + time_step = GridTools::minimal_cell_diameter(triangulation) / + wave_speed / + std::sqrt (1.*dim); + + std::cout << "Number of active cells: " + << triangulation.n_active_cells() + << std::endl; + + dof_handler.distribute_dofs (fe); + + std::cout << "Number of degrees of freedom: " + << dof_handler.n_dofs() + << std::endl + << std::endl; + + sparsity_pattern.reinit (dof_handler.n_dofs(), + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); + DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); + sparsity_pattern.compress(); + + system_matrix.reinit (sparsity_pattern); + mass_matrix.reinit (sparsity_pattern); + laplace_matrix.reinit (sparsity_pattern); + + MatrixCreator::create_mass_matrix (dof_handler, QGauss(3), + mass_matrix); + MatrixCreator::create_laplace_matrix (dof_handler, QGauss(3), + laplace_matrix); + + // The second difference, as mentioned, to + // step-23 is that we need + // to build the boundary mass matrix that + // grew out of the absorbing boundary + // conditions. + // + // A first observation would be that this + // matrix is much sparser than the regular + // mass matrix, since none of the shape + // functions with purely interior support + // contributes to this matrix. We could + // therefore optimize the storage pattern + // to this situation and build up a second + // sparsity pattern that only contains the + // nonzero entries that we need. There is a + // trade-off to make here: first, we would + // have to have a second sparsity pattern + // object, so that costs memory. Secondly, + // the matrix attached to this sparsity + // pattern is going to be smaller and + // therefore requires less memory; it would + // also be faster to perform matrix-vector + // multiplications with it. The final + // argument, however, is the one that tips + // the scale: we are not primarily + // interested in performing matrix-vector + // with the boundary matrix alone (though + // we need to do that for the right hand + // side vector once per time step), but + // mostly wish to add it up to the other + // matrices used in the first of the two + // equations since this is the one that is + // going to be multiplied with once per + // iteration of the CG method, + // i.e. significantly more often. It is now + // the case that the SparseMatrix::add + // class allows to add one matrix to + // another, but only if they use the same + // sparsity pattern (the reason being that + // we can't add nonzero entries to a matrix + // after the sparsity pattern has been + // created, so we simply require that the + // two matrices have the same sparsity + // pattern). + // + // So let's go with that: + boundary_matrix.reinit (sparsity_pattern); + + // The second thing to do is to actually + // build the matrix. Here, we need to + // integrate over faces of cells, so first + // we need a quadrature object that works + // on dim-1 dimensional + // objects. Secondly, the FEFaceValues + // variant of FEValues that works on faces, + // as its name suggest. And finally, the + // other variables that are part of the + // assembly machinery. All of this we put + // between curly braces to limit the scope + // of these variables to where we actually + // need them. + // + // The actual act of assembling the matrix + // is then fairly straightforward: we loop + // over all cells, over all faces of each + // of these cells, and then do something + // only if that particular face is at the + // boundary of the domain. Like this: + { + const QGauss quadrature_formula(3); + FEFaceValues fe_values (fe, quadrature_formula, + update_values | update_JxW_values); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - std::vector local_dof_indices (dofs_per_cell); + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + for (unsigned int f=0; f::faces_per_cell; ++f) + if (cell->at_boundary(f)) + { + cell_matrix = 0; - - - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - for (unsigned int f=0; f::faces_per_cell; ++f) - if (cell->at_boundary(f)) - { - cell_matrix = 0; + fe_values.reinit (cell, f); - fe_values.reinit (cell, f); + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); for (unsigned int i=0; iget_dof_indices (local_dof_indices); - for (unsigned int i=0; i + void TATForwardProblem::solve_p () + { + SolverControl solver_control (1000, 1e-8*system_rhs_p.l2_norm()); + SolverCG<> cg (solver_control); - constraints.close (); -} + cg.solve (system_matrix, solution_p, system_rhs_p, + PreconditionIdentity()); + std::cout << " p-equation: " << solver_control.last_step() + << " CG iterations." + << std::endl; + } - // @sect4{TATForwardProblem::solve_p and TATForwardProblem::solve_v} - // The following two functions, solving the - // linear systems for the pressure and the - // velocity variable, are taken pretty much - // verbatim (with the exception of the change - // of name from $u$ to $p$ of the primary - // variable) from step-23: -template -void TATForwardProblem::solve_p () -{ - SolverControl solver_control (1000, 1e-8*system_rhs_p.l2_norm()); - SolverCG<> cg (solver_control); + template + void TATForwardProblem::solve_v () + { + SolverControl solver_control (1000, 1e-8*system_rhs_v.l2_norm()); + SolverCG<> cg (solver_control); - cg.solve (system_matrix, solution_p, system_rhs_p, - PreconditionIdentity()); + cg.solve (mass_matrix, solution_v, system_rhs_v, + PreconditionIdentity()); - std::cout << " p-equation: " << solver_control.last_step() - << " CG iterations." - << std::endl; -} + std::cout << " v-equation: " << solver_control.last_step() + << " CG iterations." + << std::endl; + } -template -void TATForwardProblem::solve_v () -{ - SolverControl solver_control (1000, 1e-8*system_rhs_v.l2_norm()); - SolverCG<> cg (solver_control); - cg.solve (mass_matrix, solution_v, system_rhs_v, - PreconditionIdentity()); + // @sect4{TATForwardProblem::output_results} - std::cout << " v-equation: " << solver_control.last_step() - << " CG iterations." - << std::endl; -} + // The same holds here: the function is from + // step-23. + template + void TATForwardProblem::output_results () const + { + DataOut data_out; + data_out.attach_dof_handler (dof_handler); + data_out.add_data_vector (solution_p, "P"); + data_out.add_data_vector (solution_v, "V"); + data_out.build_patches (); - // @sect4{TATForwardProblem::output_results} + const std::string filename = "solution-" + + Utilities::int_to_string (timestep_number, 3) + + ".gnuplot"; + std::ofstream output (filename.c_str()); + data_out.write_gnuplot (output); + } - // The same holds here: the function is from - // step-23. -template -void TATForwardProblem::output_results () const -{ - DataOut data_out; - data_out.attach_dof_handler (dof_handler); - data_out.add_data_vector (solution_p, "P"); - data_out.add_data_vector (solution_v, "V"); - data_out.build_patches (); + // @sect4{TATForwardProblem::run} - const std::string filename = "solution-" + - Utilities::int_to_string (timestep_number, 3) + - ".gnuplot"; - std::ofstream output (filename.c_str()); - data_out.write_gnuplot (output); -} + // This function that does most of the work + // is pretty much again like in step-23, + // though we make things a bit clearer by + // using the vectors G1 and G2 mentioned in + // the introduction. Compared to the overall + // memory consumption of the program, the + // introduction of a few temporary vectors + // isn't doing much harm. + // + // The only changes to this function are: + // First, that we do not have to project + // initial values for the velocity $v$, since + // we know that it is zero. And second that + // we evaluate the solution at the detector + // locations computed in the + // constructor. This is done using the + // VectorTools::point_value function. These + // values are then written to a file that we + // open at the beginning of the function. + template + void TATForwardProblem::run () + { + setup_system(); + VectorTools::project (dof_handler, constraints, + QGauss(3), InitialValuesP(), + old_solution_p); + old_solution_v = 0; - // @sect4{TATForwardProblem::run} - - // This function that does most of the work - // is pretty much again like in step-23, - // though we make things a bit clearer by - // using the vectors G1 and G2 mentioned in - // the introduction. Compared to the overall - // memory consumption of the program, the - // introduction of a few temporary vectors - // isn't doing much harm. - // - // The only changes to this function are: - // First, that we do not have to project - // initial values for the velocity $v$, since - // we know that it is zero. And second that - // we evaluate the solution at the detector - // locations computed in the - // constructor. This is done using the - // VectorTools::point_value function. These - // values are then written to a file that we - // open at the beginning of the function. -template -void TATForwardProblem::run () -{ - setup_system(); + std::ofstream detector_data("detectors.dat"); - VectorTools::project (dof_handler, constraints, - QGauss(3), InitialValuesP(), - old_solution_p); - old_solution_v = 0; + Vector tmp (solution_p.size()); + Vector G1 (solution_p.size()); + Vector G2 (solution_v.size()); + const double end_time = 0.7; + for (timestep_number=1, time=time_step; + time<=end_time; + time+=time_step, ++timestep_number) + { + std::cout << std::endl; + std::cout<< "time_step " << timestep_number << " @ t=" << time << std::endl; - std::ofstream detector_data("detectors.dat"); + mass_matrix.vmult (G1, old_solution_p); + mass_matrix.vmult (tmp, old_solution_v); + G1.add(time_step * (1-theta), tmp); - Vector tmp (solution_p.size()); - Vector G1 (solution_p.size()); - Vector G2 (solution_v.size()); + mass_matrix.vmult (G2, old_solution_v); + laplace_matrix.vmult (tmp, old_solution_p); + G2.add (-wave_speed * wave_speed * time_step * (1-theta), tmp); - const double end_time = 0.7; - for (timestep_number=1, time=time_step; - time<=end_time; - time+=time_step, ++timestep_number) - { - std::cout << std::endl; - std::cout<< "time_step " << timestep_number << " @ t=" << time << std::endl; - - mass_matrix.vmult (G1, old_solution_p); - mass_matrix.vmult (tmp, old_solution_v); - G1.add(time_step * (1-theta), tmp); - - mass_matrix.vmult (G2, old_solution_v); - laplace_matrix.vmult (tmp, old_solution_p); - G2.add (-wave_speed * wave_speed * time_step * (1-theta), tmp); - - boundary_matrix.vmult (tmp, old_solution_p); - G2.add (wave_speed, tmp); - - system_rhs_p = G1; - system_rhs_p.add(time_step * theta , G2); - - solve_p (); - - - system_rhs_v = G2; - laplace_matrix.vmult (tmp, solution_p); - system_rhs_v.add (-time_step * theta * wave_speed * wave_speed, tmp); - - boundary_matrix.vmult (tmp, solution_p); - system_rhs_v.add (-wave_speed, tmp); - - solve_v (); - - output_results (); - - - detector_data << time; - for (unsigned int i=0 ; i::run () // program. There is nothing here that hasn't // been shown in several of the previous // programs: -int main () +int main () { try { + using namespace dealii; + using namespace Step24; + deallog.depth_console (0); + TATForwardProblem<2> forward_problem_solver; forward_problem_solver.run (); } @@ -675,7 +682,7 @@ int main () return 1; } - catch (...) + catch (...) { std::cerr << std::endl << std::endl << "----------------------------------------------------" @@ -686,6 +693,6 @@ int main () << std::endl; return 1; } - + return 0; } diff --git a/deal.II/examples/step-25/step-25.cc b/deal.II/examples/step-25/step-25.cc index f3fd473191..b274cd967f 100644 --- a/deal.II/examples/step-25/step-25.cc +++ b/deal.II/examples/step-25/step-25.cc @@ -1,5 +1,5 @@ /* $Id$ */ -/* Copyright (C) 2006, 2007, 2008, 2009 by the deal.II authors */ +/* Copyright (C) 2006, 2007, 2008, 2009, 2011 by the deal.II authors */ /* Author: Ivan Christov, Wolfgang Bangerth, Texas A&M University, 2006 */ /* */ /* This file is subject to QPL and may not be distributed */ @@ -54,705 +54,707 @@ // The last step is as in all // previous programs: -using namespace dealii; - - - // @sect3{The SineGordonProblem class template} - - // The entire algorithm for solving the - // problem is encapsulated in this class. As - // in previous example programs, the class is - // declared with a template parameter, which - // is the spatial dimension, so that we can - // solve the sine-Gordon equation in one, two - // or three spatial dimensions. For more on - // the dimension-independent - // class-encapsulation of the problem, the - // reader should consult step-3 and step-4. - // - // Compared to step-23 and step-24, there - // isn't anything newsworthy in the general - // structure of the program (though there is - // of course in the inner workings of the - // various functions!). The most notable - // difference is the presence of the two new - // functions compute_nl_term and - // compute_nl_matrix that - // compute the nonlinear contributions to the - // system matrix and right-hand side of the first - // equation, as discussed in the - // Introduction. In addition, we have to have - // a vector solution_update that - // contains the nonlinear update to the - // solution vector in each Newton step. - // - // As also mentioned in the introduction, we - // do not store the velocity variable in this - // program, but the mass matrix times the - // velocity. This is done in the - // M_x_velocity variable (the - // "x" is intended to stand for - // "times"). - // - // Finally, the - // output_timestep_skip - // variable stores the number of time - // steps to be taken each time before - // graphical output is to be - // generated. This is of importance - // when using fine meshes (and - // consequently small time steps) - // where we would run lots of time - // steps and create lots of output - // files of solutions that look - // almost the same in subsequent - // files. This only clogs up our - // visualization procedures and we - // should avoid creating more output - // than we are really interested - // in. Therefore, if this variable is - // set to a value $n$ bigger than one, - // output is generated only every - // $n$th time step. -template -class SineGordonProblem +namespace Step25 { - public: - SineGordonProblem (); - void run (); - - private: - void make_grid_and_dofs (); - void assemble_system (); - void compute_nl_term (const Vector &old_data, - const Vector &new_data, - Vector &nl_term) const; - void compute_nl_matrix (const Vector &old_data, - const Vector &new_data, - SparseMatrix &nl_matrix) const; - unsigned int solve (); - void output_results (const unsigned int timestep_number) const; - - Triangulation triangulation; - FE_Q fe; - DoFHandler dof_handler; - - SparsityPattern sparsity_pattern; - SparseMatrix system_matrix; - SparseMatrix mass_matrix; - SparseMatrix laplace_matrix; - - const unsigned int n_global_refinements; - - double time; - const double final_time, time_step; - const double theta; - - Vector solution, solution_update, old_solution; - Vector M_x_velocity; - Vector system_rhs; - - const unsigned int output_timestep_skip; -}; - - - // @sect3{Initial conditions} - - // In the following two classes, we first - // implement the exact solution for 1D, 2D, - // and 3D mentioned in the introduction to - // this program. This space-time solution may - // be of independent interest if one wanted - // to test the accuracy of the program by - // comparing the numerical against the - // analytic solution (note however that the - // program uses a finite domain, whereas - // these are analytic solutions for an - // unbounded domain). This may, for example, - // be done using the - // VectorTools::integrate_difference - // function. Note, again (as was already - // discussed in step-23), how we describe - // space-time functions as spatial functions - // that depend on a time variable that can be - // set and queried using the - // FunctionTime::set_time() and - // FunctionTime::get_time() member functions - // of the FunctionTime base class of the - // Function class. -template -class ExactSolution : public Function -{ - public: - ExactSolution (const unsigned int n_components = 1, - const double time = 0.) : Function(n_components, time) {} - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; - -template -double ExactSolution::value (const Point &p, - const unsigned int /*component*/) const -{ - double t = this->get_time (); + using namespace dealii; - switch (dim) - { - case 1: + + // @sect3{The SineGordonProblem class template} + + // The entire algorithm for solving the + // problem is encapsulated in this class. As + // in previous example programs, the class is + // declared with a template parameter, which + // is the spatial dimension, so that we can + // solve the sine-Gordon equation in one, two + // or three spatial dimensions. For more on + // the dimension-independent + // class-encapsulation of the problem, the + // reader should consult step-3 and step-4. + // + // Compared to step-23 and step-24, there + // isn't anything newsworthy in the general + // structure of the program (though there is + // of course in the inner workings of the + // various functions!). The most notable + // difference is the presence of the two new + // functions compute_nl_term and + // compute_nl_matrix that + // compute the nonlinear contributions to the + // system matrix and right-hand side of the first + // equation, as discussed in the + // Introduction. In addition, we have to have + // a vector solution_update that + // contains the nonlinear update to the + // solution vector in each Newton step. + // + // As also mentioned in the introduction, we + // do not store the velocity variable in this + // program, but the mass matrix times the + // velocity. This is done in the + // M_x_velocity variable (the + // "x" is intended to stand for + // "times"). + // + // Finally, the + // output_timestep_skip + // variable stores the number of time + // steps to be taken each time before + // graphical output is to be + // generated. This is of importance + // when using fine meshes (and + // consequently small time steps) + // where we would run lots of time + // steps and create lots of output + // files of solutions that look + // almost the same in subsequent + // files. This only clogs up our + // visualization procedures and we + // should avoid creating more output + // than we are really interested + // in. Therefore, if this variable is + // set to a value $n$ bigger than one, + // output is generated only every + // $n$th time step. + template + class SineGordonProblem + { + public: + SineGordonProblem (); + void run (); + + private: + void make_grid_and_dofs (); + void assemble_system (); + void compute_nl_term (const Vector &old_data, + const Vector &new_data, + Vector &nl_term) const; + void compute_nl_matrix (const Vector &old_data, + const Vector &new_data, + SparseMatrix &nl_matrix) const; + unsigned int solve (); + void output_results (const unsigned int timestep_number) const; + + Triangulation triangulation; + FE_Q fe; + DoFHandler dof_handler; + + SparsityPattern sparsity_pattern; + SparseMatrix system_matrix; + SparseMatrix mass_matrix; + SparseMatrix laplace_matrix; + + const unsigned int n_global_refinements; + + double time; + const double final_time, time_step; + const double theta; + + Vector solution, solution_update, old_solution; + Vector M_x_velocity; + Vector system_rhs; + + const unsigned int output_timestep_skip; + }; + + + // @sect3{Initial conditions} + + // In the following two classes, we first + // implement the exact solution for 1D, 2D, + // and 3D mentioned in the introduction to + // this program. This space-time solution may + // be of independent interest if one wanted + // to test the accuracy of the program by + // comparing the numerical against the + // analytic solution (note however that the + // program uses a finite domain, whereas + // these are analytic solutions for an + // unbounded domain). This may, for example, + // be done using the + // VectorTools::integrate_difference + // function. Note, again (as was already + // discussed in step-23), how we describe + // space-time functions as spatial functions + // that depend on a time variable that can be + // set and queried using the + // FunctionTime::set_time() and + // FunctionTime::get_time() member functions + // of the FunctionTime base class of the + // Function class. + template + class ExactSolution : public Function + { + public: + ExactSolution (const unsigned int n_components = 1, + const double time = 0.) : Function(n_components, time) {} + virtual double value (const Point &p, + const unsigned int component = 0) const; + }; + + template + double ExactSolution::value (const Point &p, + const unsigned int /*component*/) const + { + double t = this->get_time (); + + switch (dim) { - const double m = 0.5; - const double c1 = 0.; - const double c2 = 0.; - return -4.*std::atan (m / - std::sqrt(1.-m*m) * - std::sin(std::sqrt(1.-m*m)*t+c2) / - std::cosh(m*p[0]+c1)); + case 1: + { + const double m = 0.5; + const double c1 = 0.; + const double c2 = 0.; + return -4.*std::atan (m / + std::sqrt(1.-m*m) * + std::sin(std::sqrt(1.-m*m)*t+c2) / + std::cosh(m*p[0]+c1)); + } + + case 2: + { + const double theta = numbers::PI/4.; + const double lambda = 1.; + const double a0 = 1.; + const double s = 1.; + const double arg = p[0] * std::cos(theta) + + std::sin(theta) * + (p[1] * std::cosh(lambda) + + t * std::sinh(lambda)); + return 4.*std::atan(a0*std::exp(s*arg)); + } + + case 3: + { + double theta = numbers::PI/4; + double phi = numbers::PI/4; + double tau = 1.; + double c0 = 1.; + double s = 1.; + double arg = p[0]*std::cos(theta) + + p[1]*std::sin(theta) * std::cos(phi) + + std::sin(theta) * std::sin(phi) * + (p[2]*std::cosh(tau)+t*std::sinh(tau)); + return 4.*std::atan(c0*std::exp(s*arg)); + } + + default: + Assert (false, ExcNotImplemented()); + return -1e8; } - - case 2: + } + + // In the second part of this section, we + // provide the initial conditions. We are lazy + // (and cautious) and don't want to implement + // the same functions as above a second + // time. Rather, if we are queried for + // initial conditions, we create an object + // ExactSolution, set it to the + // correct time, and let it compute whatever + // values the exact solution has at that + // time: + template + class InitialValues : public Function + { + public: + InitialValues (const unsigned int n_components = 1, + const double time = 0.) + : + Function(n_components, time) + {} + + virtual double value (const Point &p, + const unsigned int component = 0) const; + }; + + template + double InitialValues::value (const Point &p, + const unsigned int component) const + { + return ExactSolution(1, this->get_time()).value (p, component); + } + + + + // @sect3{Implementation of the SineGordonProblem class} + + // Let's move on to the implementation of the + // main class, as it implements the algorithm + // outlined in the introduction. + + // @sect4{SineGordonProblem::SineGordonProblem} + + // This is the constructor of the + // SineGordonProblem class. It + // specifies the desired polynomial degree of + // the finite elements, associates a + // DoFHandler to the + // triangulation object (just as + // in the example programs step-3 and + // step-4), initializes the current or + // initial time, the final time, the time + // step size, and the value of $\theta$ for + // the time stepping scheme. Since the + // solutions we compute here are + // time-periodic, the actual value of the + // start-time doesn't matter, and we choose + // it so that we start at an interesting + // time. + // + // Note that if we were to chose the explicit + // Euler time stepping scheme ($\theta = 0$), + // then we must pick a time step $k \le h$, + // otherwise the scheme is not stable and + // oscillations might arise in the + // solution. The Crank-Nicolson scheme + // ($\theta = \frac{1}{2}$) and the implicit + // Euler scheme ($\theta=1$) do not suffer + // from this deficiency, since they are + // unconditionally stable. However, even then + // the time step should be chosen to be on + // the order of $h$ in order to obtain a good + // solution. Since we know that our mesh + // results from the uniform subdivision of a + // rectangle, we can compute that time step + // easily; if we had a different domain, the + // technique in step-24 using + // GridTools::minimal_cell_diameter would + // work as well. + template + SineGordonProblem::SineGordonProblem () + : + fe (1), + dof_handler (triangulation), + n_global_refinements (6), + time (-5.4414), + final_time (2.7207), + time_step (10*1./std::pow(2.,1.*n_global_refinements)), + theta (0.5), + output_timestep_skip (1) + {} + + // @sect4{SineGordonProblem::make_grid_and_dofs} + + // This function creates a rectangular grid + // in dim dimensions and refines + // it several times. Also, all matrix and + // vector members of the + // SineGordonProblem class are + // initialized to their appropriate sizes + // once the degrees of freedom have been + // assembled. Like step-24, we use the + // MatrixCreator class to + // generate a mass matrix $M$ and a Laplace + // matrix $A$ and store them in the + // appropriate variables for the remainder of + // the program's life. + template + void SineGordonProblem::make_grid_and_dofs () + { + GridGenerator::hyper_cube (triangulation, -10, 10); + triangulation.refine_global (n_global_refinements); + + std::cout << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl + << " Total number of cells: " + << triangulation.n_cells() + << std::endl; + + dof_handler.distribute_dofs (fe); + + std::cout << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << std::endl; + + sparsity_pattern.reinit (dof_handler.n_dofs(), + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); + DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); + sparsity_pattern.compress (); + + system_matrix.reinit (sparsity_pattern); + mass_matrix.reinit (sparsity_pattern); + laplace_matrix.reinit (sparsity_pattern); + + MatrixCreator::create_mass_matrix (dof_handler, + QGauss(3), + mass_matrix); + MatrixCreator::create_laplace_matrix (dof_handler, + QGauss(3), + laplace_matrix); + + solution.reinit (dof_handler.n_dofs()); + solution_update.reinit (dof_handler.n_dofs()); + old_solution.reinit (dof_handler.n_dofs()); + M_x_velocity.reinit (dof_handler.n_dofs()); + system_rhs.reinit (dof_handler.n_dofs()); + } + + // @sect4{SineGordonProblem::assemble_system} + + // This functions assembles the system matrix + // and right-hand side vector for each + // iteration of Newton's method. The reader + // should refer to the Introduction for the + // explicit formulas for the system matrix + // and right-hand side. + // + // Note that during each time step, we have to + // add up the various contributions to the + // matrix and right hand sides. In contrast + // to step-23 and step-24, this requires + // assembling a few more terms, since they + // depend on the solution of the previous + // time step or previous nonlinear step. We + // use the functions + // compute_nl_matrix and + // compute_nl_term to do this, + // while the present function provides the + // top-level logic. + template + void SineGordonProblem::assemble_system () + { + // First we assemble the Jacobian + // matrix $F'_h(U^{n,l})$, where + // $U^{n,l}$ is stored in the vector + // solution for + // convenience. + system_matrix = 0; + system_matrix.copy_from (mass_matrix); + system_matrix.add (std::pow(time_step*theta,2), laplace_matrix); + + SparseMatrix tmp_matrix (sparsity_pattern); + compute_nl_matrix (old_solution, solution, tmp_matrix); + system_matrix.add (-std::pow(time_step*theta,2), tmp_matrix); + + // Then, we compute the right-hand + // side vector $-F_h(U^{n,l})$. + system_rhs = 0; + + tmp_matrix = 0; + tmp_matrix.copy_from (mass_matrix); + tmp_matrix.add (std::pow(time_step*theta,2), laplace_matrix); + + Vector tmp_vector (solution.size()); + tmp_matrix.vmult (tmp_vector, solution); + system_rhs += tmp_vector; + + tmp_matrix = 0; + tmp_matrix.copy_from (mass_matrix); + tmp_matrix.add (-std::pow(time_step,2)*theta*(1-theta), laplace_matrix); + + tmp_vector = 0; + tmp_matrix.vmult (tmp_vector, old_solution); + system_rhs -= tmp_vector; + + system_rhs.add (-time_step, M_x_velocity); + + tmp_vector = 0; + compute_nl_term (old_solution, solution, tmp_vector); + system_rhs.add (std::pow(time_step,2)*theta, tmp_vector); + + system_rhs *= -1; + } + + // @sect4{SineGordonProblem::compute_nl_term} + + // This function computes the vector + // $S(\cdot,\cdot)$, which appears in the + // nonlinear term in the both equations of + // the split formulation. This function not + // only simplifies the repeated computation + // of this term, but it is also a fundamental + // part of the nonlinear iterative solver + // that we use when the time stepping is + // implicit (i.e. $\theta\ne 0$). Moreover, + // we must allow the function to receive as + // input an "old" and a "new" solution. These + // may not be the actual solutions of the + // problem stored in + // old_solution and + // solution, but are simply the + // two functions we linearize about. For the + // purposes of this function, let us call the + // first two arguments $w_{\mathrm{old}}$ and + // $w_{\mathrm{new}}$ in the documentation of + // this class below, respectively. + // + // As a side-note, it is perhaps worth + // investigating what order quadrature + // formula is best suited for this type of + // integration. Since $\sin(\cdot)$ is not a + // polynomial, there are probably no + // quadrature formulas that can integrate + // these terms exactly. It is usually + // sufficient to just make sure that the + // right hand side is integrated up to the + // same order of accuracy as the + // discretization scheme is, but it may be + // possible to improve on the constant in the + // asympotitic statement of convergence by + // choosing a more accurate quadrature + // formula. + template + void SineGordonProblem::compute_nl_term (const Vector &old_data, + const Vector &new_data, + Vector &nl_term) const + { + const QGauss quadrature_formula (3); + FEValues fe_values (fe, quadrature_formula, + update_values | + update_JxW_values | + update_quadrature_points); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + Vector local_nl_term (dofs_per_cell); + std::vector local_dof_indices (dofs_per_cell); + std::vector old_data_values (n_q_points); + std::vector new_data_values (n_q_points); + + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + + for (; cell!=endc; ++cell) { - const double theta = numbers::PI/4.; - const double lambda = 1.; - const double a0 = 1.; - const double s = 1.; - const double arg = p[0] * std::cos(theta) + - std::sin(theta) * - (p[1] * std::cosh(lambda) + - t * std::sinh(lambda)); - return 4.*std::atan(a0*std::exp(s*arg)); + // Once we re-initialize our + // FEValues instantiation + // to the current cell, we make use of + // the get_function_values + // routine to get the values of the + // "old" data (presumably at + // $t=t_{n-1}$) and the "new" data + // (presumably at $t=t_n$) at the nodes + // of the chosen quadrature formula. + fe_values.reinit (cell); + fe_values.get_function_values (old_data, old_data_values); + fe_values.get_function_values (new_data, new_data_values); + + // Now, we can evaluate $\int_K + // \sin\left[\theta w_{\mathrm{new}} + + // (1-\theta) w_{\mathrm{old}}\right] + // \,\varphi_j\,\mathrm{d}x$ using the + // desired quadrature formula. + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + + for (unsigned int i=0; icompute_nl_term, we must + // allow this function to receive as input an + // "old" and a "new" solution, which we again + // call $w_{\mathrm{old}}$ and + // $w_{\mathrm{new}}$ below, respectively. + template + void SineGordonProblem::compute_nl_matrix (const Vector &old_data, + const Vector &new_data, + SparseMatrix &nl_matrix) const + { + QGauss quadrature_formula (3); + FEValues fe_values (fe, quadrature_formula, + update_values | update_JxW_values | update_quadrature_points); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix local_nl_matrix (dofs_per_cell, dofs_per_cell); + std::vector local_dof_indices (dofs_per_cell); + std::vector old_data_values (n_q_points); + std::vector new_data_values (n_q_points); + + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + + for (; cell!=endc; ++cell) { - double theta = numbers::PI/4; - double phi = numbers::PI/4; - double tau = 1.; - double c0 = 1.; - double s = 1.; - double arg = p[0]*std::cos(theta) + - p[1]*std::sin(theta) * std::cos(phi) + - std::sin(theta) * std::sin(phi) * - (p[2]*std::cosh(tau)+t*std::sinh(tau)); - return 4.*std::atan(c0*std::exp(s*arg)); + // Again, first we + // re-initialize our + // FEValues + // instantiation to the current + // cell. + fe_values.reinit (cell); + fe_values.get_function_values (old_data, old_data_values); + fe_values.get_function_values (new_data, new_data_values); + + // Then, we evaluate $\int_K + // \cos\left[\theta + // w_{\mathrm{new}} + + // (1-\theta) + // w_{\mathrm{old}}\right]\, + // \varphi_i\, + // \varphi_j\,\mathrm{d}x$ + // using the desired quadrature + // formula. + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + + for (unsigned int i=0; iExactSolution, set it to the - // correct time, and let it compute whatever - // values the exact solution has at that - // time: -template -class InitialValues : public Function -{ - public: - InitialValues (const unsigned int n_components = 1, - const double time = 0.) - : - Function(n_components, time) - {} - - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; - -template -double InitialValues::value (const Point &p, - const unsigned int component) const -{ - return ExactSolution(1, this->get_time()).value (p, component); -} - - - - // @sect3{Implementation of the SineGordonProblem class} - - // Let's move on to the implementation of the - // main class, as it implements the algorithm - // outlined in the introduction. - - // @sect4{SineGordonProblem::SineGordonProblem} - - // This is the constructor of the - // SineGordonProblem class. It - // specifies the desired polynomial degree of - // the finite elements, associates a - // DoFHandler to the - // triangulation object (just as - // in the example programs step-3 and - // step-4), initializes the current or - // initial time, the final time, the time - // step size, and the value of $\theta$ for - // the time stepping scheme. Since the - // solutions we compute here are - // time-periodic, the actual value of the - // start-time doesn't matter, and we choose - // it so that we start at an interesting - // time. - // - // Note that if we were to chose the explicit - // Euler time stepping scheme ($\theta = 0$), - // then we must pick a time step $k \le h$, - // otherwise the scheme is not stable and - // oscillations might arise in the - // solution. The Crank-Nicolson scheme - // ($\theta = \frac{1}{2}$) and the implicit - // Euler scheme ($\theta=1$) do not suffer - // from this deficiency, since they are - // unconditionally stable. However, even then - // the time step should be chosen to be on - // the order of $h$ in order to obtain a good - // solution. Since we know that our mesh - // results from the uniform subdivision of a - // rectangle, we can compute that time step - // easily; if we had a different domain, the - // technique in step-24 using - // GridTools::minimal_cell_diameter would - // work as well. -template -SineGordonProblem::SineGordonProblem () - : - fe (1), - dof_handler (triangulation), - n_global_refinements (6), - time (-5.4414), - final_time (2.7207), - time_step (10*1./std::pow(2.,1.*n_global_refinements)), - theta (0.5), - output_timestep_skip (1) -{} - - // @sect4{SineGordonProblem::make_grid_and_dofs} - - // This function creates a rectangular grid - // in dim dimensions and refines - // it several times. Also, all matrix and - // vector members of the - // SineGordonProblem class are - // initialized to their appropriate sizes - // once the degrees of freedom have been - // assembled. Like step-24, we use the - // MatrixCreator class to - // generate a mass matrix $M$ and a Laplace - // matrix $A$ and store them in the - // appropriate variables for the remainder of - // the program's life. -template -void SineGordonProblem::make_grid_and_dofs () -{ - GridGenerator::hyper_cube (triangulation, -10, 10); - triangulation.refine_global (n_global_refinements); - - std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << " Total number of cells: " - << triangulation.n_cells() - << std::endl; - - dof_handler.distribute_dofs (fe); - - std::cout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl; - - sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - sparsity_pattern.compress (); - - system_matrix.reinit (sparsity_pattern); - mass_matrix.reinit (sparsity_pattern); - laplace_matrix.reinit (sparsity_pattern); - - MatrixCreator::create_mass_matrix (dof_handler, - QGauss(3), - mass_matrix); - MatrixCreator::create_laplace_matrix (dof_handler, - QGauss(3), - laplace_matrix); - - solution.reinit (dof_handler.n_dofs()); - solution_update.reinit (dof_handler.n_dofs()); - old_solution.reinit (dof_handler.n_dofs()); - M_x_velocity.reinit (dof_handler.n_dofs()); - system_rhs.reinit (dof_handler.n_dofs()); -} - - // @sect4{SineGordonProblem::assemble_system} - - // This functions assembles the system matrix - // and right-hand side vector for each - // iteration of Newton's method. The reader - // should refer to the Introduction for the - // explicit formulas for the system matrix - // and right-hand side. - // - // Note that during each time step, we have to - // add up the various contributions to the - // matrix and right hand sides. In contrast - // to step-23 and step-24, this requires - // assembling a few more terms, since they - // depend on the solution of the previous - // time step or previous nonlinear step. We - // use the functions - // compute_nl_matrix and - // compute_nl_term to do this, - // while the present function provides the - // top-level logic. -template -void SineGordonProblem::assemble_system () -{ - // First we assemble the Jacobian - // matrix $F'_h(U^{n,l})$, where - // $U^{n,l}$ is stored in the vector - // solution for - // convenience. - system_matrix = 0; - system_matrix.copy_from (mass_matrix); - system_matrix.add (std::pow(time_step*theta,2), laplace_matrix); - - SparseMatrix tmp_matrix (sparsity_pattern); - compute_nl_matrix (old_solution, solution, tmp_matrix); - system_matrix.add (-std::pow(time_step*theta,2), tmp_matrix); - - // Then, we compute the right-hand - // side vector $-F_h(U^{n,l})$. - system_rhs = 0; - - tmp_matrix = 0; - tmp_matrix.copy_from (mass_matrix); - tmp_matrix.add (std::pow(time_step*theta,2), laplace_matrix); - - Vector tmp_vector (solution.size()); - tmp_matrix.vmult (tmp_vector, solution); - system_rhs += tmp_vector; - - tmp_matrix = 0; - tmp_matrix.copy_from (mass_matrix); - tmp_matrix.add (-std::pow(time_step,2)*theta*(1-theta), laplace_matrix); - - tmp_vector = 0; - tmp_matrix.vmult (tmp_vector, old_solution); - system_rhs -= tmp_vector; - - system_rhs.add (-time_step, M_x_velocity); - - tmp_vector = 0; - compute_nl_term (old_solution, solution, tmp_vector); - system_rhs.add (std::pow(time_step,2)*theta, tmp_vector); - - system_rhs *= -1; -} - - // @sect4{SineGordonProblem::compute_nl_term} - - // This function computes the vector - // $S(\cdot,\cdot)$, which appears in the - // nonlinear term in the both equations of - // the split formulation. This function not - // only simplifies the repeated computation - // of this term, but it is also a fundamental - // part of the nonlinear iterative solver - // that we use when the time stepping is - // implicit (i.e. $\theta\ne 0$). Moreover, - // we must allow the function to receive as - // input an "old" and a "new" solution. These - // may not be the actual solutions of the - // problem stored in - // old_solution and - // solution, but are simply the - // two functions we linearize about. For the - // purposes of this function, let us call the - // first two arguments $w_{\mathrm{old}}$ and - // $w_{\mathrm{new}}$ in the documentation of - // this class below, respectively. - // - // As a side-note, it is perhaps worth - // investigating what order quadrature - // formula is best suited for this type of - // integration. Since $\sin(\cdot)$ is not a - // polynomial, there are probably no - // quadrature formulas that can integrate - // these terms exactly. It is usually - // sufficient to just make sure that the - // right hand side is integrated up to the - // same order of accuracy as the - // discretization scheme is, but it may be - // possible to improve on the constant in the - // asympotitic statement of convergence by - // choosing a more accurate quadrature - // formula. -template -void SineGordonProblem::compute_nl_term (const Vector &old_data, - const Vector &new_data, - Vector &nl_term) const -{ - const QGauss quadrature_formula (3); - FEValues fe_values (fe, quadrature_formula, - update_values | - update_JxW_values | - update_quadrature_points); - - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); - - Vector local_nl_term (dofs_per_cell); - std::vector local_dof_indices (dofs_per_cell); - std::vector old_data_values (n_q_points); - std::vector new_data_values (n_q_points); - - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - - for (; cell!=endc; ++cell) - { - // Once we re-initialize our - // FEValues instantiation - // to the current cell, we make use of - // the get_function_values - // routine to get the values of the - // "old" data (presumably at - // $t=t_{n-1}$) and the "new" data - // (presumably at $t=t_n$) at the nodes - // of the chosen quadrature formula. - fe_values.reinit (cell); - fe_values.get_function_values (old_data, old_data_values); - fe_values.get_function_values (new_data, new_data_values); - - // Now, we can evaluate $\int_K - // \sin\left[\theta w_{\mathrm{new}} + - // (1-\theta) w_{\mathrm{old}}\right] - // \,\varphi_j\,\mathrm{d}x$ using the - // desired quadrature formula. - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - - for (unsigned int i=0; icompute_nl_term, we must - // allow this function to receive as input an - // "old" and a "new" solution, which we again - // call $w_{\mathrm{old}}$ and - // $w_{\mathrm{new}}$ below, respectively. -template -void SineGordonProblem::compute_nl_matrix (const Vector &old_data, - const Vector &new_data, - SparseMatrix &nl_matrix) const -{ - QGauss quadrature_formula (3); - FEValues fe_values (fe, quadrature_formula, - update_values | update_JxW_values | update_quadrature_points); - - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); - - FullMatrix local_nl_matrix (dofs_per_cell, dofs_per_cell); - std::vector local_dof_indices (dofs_per_cell); - std::vector old_data_values (n_q_points); - std::vector new_data_values (n_q_points); - - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - - for (; cell!=endc; ++cell) - { - // Again, first we - // re-initialize our - // FEValues - // instantiation to the current - // cell. - fe_values.reinit (cell); - fe_values.get_function_values (old_data, old_data_values); - fe_values.get_function_values (new_data, new_data_values); - - // Then, we evaluate $\int_K - // \cos\left[\theta - // w_{\mathrm{new}} + - // (1-\theta) - // w_{\mathrm{old}}\right]\, - // \varphi_i\, - // \varphi_j\,\mathrm{d}x$ - // using the desired quadrature - // formula. - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - - for (unsigned int i=0; isolution_update and used to update - // solution in the - // run function. - // - // Note that we re-set the solution update to - // zero before solving for it. This is not - // necessary: iterative solvers can start - // from any point and converge to the correct - // solution. If one has a good estimate about - // the solution of a linear system, it may be - // worthwhile to start from that vector, but - // as a general observation it is a fact that - // the starting point doesn't matter very - // much: it has to be a very, very good guess - // to reduce the number of iterations by more - // than a few. It turns out that for this problem, - // using the previous nonlinear update as a - // starting point actually hurts convergence and - // increases the number of iterations needed, - // so we simply set it to zero. - // - // The function returns the number of - // iterations it took to converge to a - // solution. This number will later be used - // to generate output on the screen showing - // how many iterations were needed in each - // nonlinear iteration. -template -unsigned int -SineGordonProblem::solve () -{ - SolverControl solver_control (1000, 1e-12*system_rhs.l2_norm()); - SolverCG<> cg (solver_control); - - PreconditionSSOR<> preconditioner; - preconditioner.initialize(system_matrix, 1.2); - - solution_update = 0; - cg.solve (system_matrix, solution_update, - system_rhs, - preconditioner); - - return solver_control.last_step(); -} - - // @sect4{SineGordonProblem::output_results} - // This function outputs the results to a - // file. It is pretty much identical to the - // respective functions in step-23 and - // step-24: -template -void -SineGordonProblem::output_results (const unsigned int timestep_number) const -{ - DataOut data_out; - data_out.attach_dof_handler (dof_handler); - data_out.add_data_vector (solution, "u"); - data_out.build_patches (); + // @sect4{SineGordonProblem::solve} - const std::string filename = "solution-" + - Utilities::int_to_string (timestep_number, 3) + - ".vtk"; - - std::ofstream output (filename.c_str()); - data_out.write_vtk (output); -} - - // @sect4{SineGordonProblem::run} - - // This function has the top-level - // control over everything: it runs - // the (outer) time-stepping loop, - // the (inner) nonlinear-solver loop, - // and outputs the solution after each - // time step. -template -void SineGordonProblem::run () -{ - make_grid_and_dofs (); - - // To aknowledge the initial - // condition, we must use the - // function $u_0(x)$ to compute - // $U^0$. To this end, below we - // will create an object of type - // InitialValues; note - // that when we create this object - // (which is derived from the - // Function class), we - // set its internal time variable - // to $t_0$, to indicate that the - // initial condition is a function - // of space and time evaluated at - // $t=t_0$. + // As discussed in the Introduction, this + // function uses the CG iterative solver on + // the linear system of equations resulting + // from the finite element spatial + // discretization of each iteration of + // Newton's method for the (nonlinear) first + // equation of the split formulation. The + // solution to the system is, in fact, + // $\delta U^{n,l}$ so it is stored in + // solution_update and used to update + // solution in the + // run function. + // + // Note that we re-set the solution update to + // zero before solving for it. This is not + // necessary: iterative solvers can start + // from any point and converge to the correct + // solution. If one has a good estimate about + // the solution of a linear system, it may be + // worthwhile to start from that vector, but + // as a general observation it is a fact that + // the starting point doesn't matter very + // much: it has to be a very, very good guess + // to reduce the number of iterations by more + // than a few. It turns out that for this problem, + // using the previous nonlinear update as a + // starting point actually hurts convergence and + // increases the number of iterations needed, + // so we simply set it to zero. // - // Then we produce $U^0$ by projecting - // $u_0(x)$ onto the grid using - // VectorTools::project. We - // have to use the same construct using - // hanging node constraints as in step-21: - // the VectorTools::project function - // requires a hanging node constraints - // object, but to be used we first need to - // close it: + // The function returns the number of + // iterations it took to converge to a + // solution. This number will later be used + // to generate output on the screen showing + // how many iterations were needed in each + // nonlinear iteration. + template + unsigned int + SineGordonProblem::solve () + { + SolverControl solver_control (1000, 1e-12*system_rhs.l2_norm()); + SolverCG<> cg (solver_control); + + PreconditionSSOR<> preconditioner; + preconditioner.initialize(system_matrix, 1.2); + + solution_update = 0; + cg.solve (system_matrix, solution_update, + system_rhs, + preconditioner); + + return solver_control.last_step(); + } + + // @sect4{SineGordonProblem::output_results} + + // This function outputs the results to a + // file. It is pretty much identical to the + // respective functions in step-23 and + // step-24: + template + void + SineGordonProblem::output_results (const unsigned int timestep_number) const + { + DataOut data_out; + + data_out.attach_dof_handler (dof_handler); + data_out.add_data_vector (solution, "u"); + data_out.build_patches (); + + const std::string filename = "solution-" + + Utilities::int_to_string (timestep_number, 3) + + ".vtk"; + + std::ofstream output (filename.c_str()); + data_out.write_vtk (output); + } + + // @sect4{SineGordonProblem::run} + + // This function has the top-level + // control over everything: it runs + // the (outer) time-stepping loop, + // the (inner) nonlinear-solver loop, + // and outputs the solution after each + // time step. + template + void SineGordonProblem::run () + { + make_grid_and_dofs (); + + // To aknowledge the initial + // condition, we must use the + // function $u_0(x)$ to compute + // $U^0$. To this end, below we + // will create an object of type + // InitialValues; note + // that when we create this object + // (which is derived from the + // Function class), we + // set its internal time variable + // to $t_0$, to indicate that the + // initial condition is a function + // of space and time evaluated at + // $t=t_0$. + // + // Then we produce $U^0$ by projecting + // $u_0(x)$ onto the grid using + // VectorTools::project. We + // have to use the same construct using + // hanging node constraints as in step-21: + // the VectorTools::project function + // requires a hanging node constraints + // object, but to be used we first need to + // close it: { ConstraintMatrix constraints; constraints.close(); @@ -763,117 +765,118 @@ void SineGordonProblem::run () solution); } - // For completeness, we output the - // zeroth time step to a file just - // like any other other time step. - output_results (0); - - // Now we perform the time - // stepping: at every time step we - // solve the matrix equation(s) - // corresponding to the finite - // element discretization of the - // problem, and then advance our - // solution according to the time - // stepping formulas we discussed - // in the Introduction. - unsigned int timestep_number = 1; - for (time+=time_step; time<=final_time; time+=time_step, ++timestep_number) - { - old_solution = solution; - - std::cout << std::endl - << "Time step #" << timestep_number << "; " - << "advancing to t = " << time << "." - << std::endl; - - // At the beginning of each - // time step we must solve the - // nonlinear equation in the - // split formulation via - // Newton's method --- - // i.e. solve for $\delta - // U^{n,l}$ then compute - // $U^{n,l+1}$ and so on. The - // stopping criterion for this - // nonlinear iteration is that - // $\|F_h(U^{n,l})\|_2 \le - // 10^{-6} - // \|F_h(U^{n,0})\|_2$. Consequently, - // we need to record the norm - // of the residual in the first - // iteration. - // - // At the end of each iteration, we - // output to the console how many - // linear solver iterations it took - // us. When the loop below is done, we - // have (an approximation of) $U^n$. - double initial_rhs_norm = 0.; - bool first_iteration = true; - do - { - assemble_system (); - - if (first_iteration == true) - initial_rhs_norm = system_rhs.l2_norm(); - - const unsigned int n_iterations - = solve (); - - solution += solution_update; - - if (first_iteration == true) - std::cout << " " << n_iterations; - else - std::cout << '+' << n_iterations; - first_iteration = false; - } - while (system_rhs.l2_norm() > 1e-6 * initial_rhs_norm); - - std::cout << " CG iterations per nonlinear step." - << std::endl; - - // Upon obtaining the solution to the - // first equation of the problem at - // $t=t_n$, we must update the - // auxiliary velocity variable - // $V^n$. However, we do not compute - // and store $V^n$ since it is not a - // quantity we use directly in the - // problem. Hence, for simplicity, we - // update $MV^n$ directly: - Vector tmp_vector (solution.size()); - laplace_matrix.vmult (tmp_vector, solution); - M_x_velocity.add (-time_step*theta, tmp_vector); - - tmp_vector = 0; - laplace_matrix.vmult (tmp_vector, old_solution); - M_x_velocity.add (-time_step*(1-theta), tmp_vector); - - tmp_vector = 0; - compute_nl_term (old_solution, solution, tmp_vector); - M_x_velocity.add (-time_step, tmp_vector); - - // Oftentimes, in particular - // for fine meshes, we must - // pick the time step to be - // quite small in order for the - // scheme to be - // stable. Therefore, there are - // a lot of time steps during - // which "nothing interesting - // happens" in the solution. To - // improve overall efficiency - // -- in particular, speed up - // the program and save disk - // space -- we only output the - // solution every - // output_timestep_skip - // time steps: - if (timestep_number % output_timestep_skip == 0) - output_results (timestep_number); - } + // For completeness, we output the + // zeroth time step to a file just + // like any other other time step. + output_results (0); + + // Now we perform the time + // stepping: at every time step we + // solve the matrix equation(s) + // corresponding to the finite + // element discretization of the + // problem, and then advance our + // solution according to the time + // stepping formulas we discussed + // in the Introduction. + unsigned int timestep_number = 1; + for (time+=time_step; time<=final_time; time+=time_step, ++timestep_number) + { + old_solution = solution; + + std::cout << std::endl + << "Time step #" << timestep_number << "; " + << "advancing to t = " << time << "." + << std::endl; + + // At the beginning of each + // time step we must solve the + // nonlinear equation in the + // split formulation via + // Newton's method --- + // i.e. solve for $\delta + // U^{n,l}$ then compute + // $U^{n,l+1}$ and so on. The + // stopping criterion for this + // nonlinear iteration is that + // $\|F_h(U^{n,l})\|_2 \le + // 10^{-6} + // \|F_h(U^{n,0})\|_2$. Consequently, + // we need to record the norm + // of the residual in the first + // iteration. + // + // At the end of each iteration, we + // output to the console how many + // linear solver iterations it took + // us. When the loop below is done, we + // have (an approximation of) $U^n$. + double initial_rhs_norm = 0.; + bool first_iteration = true; + do + { + assemble_system (); + + if (first_iteration == true) + initial_rhs_norm = system_rhs.l2_norm(); + + const unsigned int n_iterations + = solve (); + + solution += solution_update; + + if (first_iteration == true) + std::cout << " " << n_iterations; + else + std::cout << '+' << n_iterations; + first_iteration = false; + } + while (system_rhs.l2_norm() > 1e-6 * initial_rhs_norm); + + std::cout << " CG iterations per nonlinear step." + << std::endl; + + // Upon obtaining the solution to the + // first equation of the problem at + // $t=t_n$, we must update the + // auxiliary velocity variable + // $V^n$. However, we do not compute + // and store $V^n$ since it is not a + // quantity we use directly in the + // problem. Hence, for simplicity, we + // update $MV^n$ directly: + Vector tmp_vector (solution.size()); + laplace_matrix.vmult (tmp_vector, solution); + M_x_velocity.add (-time_step*theta, tmp_vector); + + tmp_vector = 0; + laplace_matrix.vmult (tmp_vector, old_solution); + M_x_velocity.add (-time_step*(1-theta), tmp_vector); + + tmp_vector = 0; + compute_nl_term (old_solution, solution, tmp_vector); + M_x_velocity.add (-time_step, tmp_vector); + + // Oftentimes, in particular + // for fine meshes, we must + // pick the time step to be + // quite small in order for the + // scheme to be + // stable. Therefore, there are + // a lot of time steps during + // which "nothing interesting + // happens" in the solution. To + // improve overall efficiency + // -- in particular, speed up + // the program and save disk + // space -- we only output the + // solution every + // output_timestep_skip + // time steps: + if (timestep_number % output_timestep_skip == 0) + output_results (timestep_number); + } + } } // @sect3{The main function} @@ -893,10 +896,13 @@ void SineGordonProblem::run () // here. For more information about // exceptions the reader should // consult step-6. -int main () +int main () { try { + using namespace dealii; + using namespace Step25; + deallog.depth_console (0); SineGordonProblem<1> sg_problem; @@ -912,10 +918,10 @@ int main () << "Aborting!" << std::endl << "----------------------------------------------------" << std::endl; - + return 1; } - catch (...) + catch (...) { std::cerr << std::endl << std::endl << "----------------------------------------------------" @@ -926,6 +932,6 @@ int main () << std::endl; return 1; } - + return 0; } diff --git a/deal.II/examples/step-26/step-26.cc b/deal.II/examples/step-26/step-26.cc index cc070a5147..aca2b5c47a 100644 --- a/deal.II/examples/step-26/step-26.cc +++ b/deal.II/examples/step-26/step-26.cc @@ -55,267 +55,269 @@ // The last step is as in all // previous programs: -using namespace dealii; - -class PointCloudSurface : public StraightBoundary<3> -{ - public: - /** - * Constructor. - */ - PointCloudSurface (const std::string &filename); - - /** - * Let the new point be the - * arithmetic mean of the two - * vertices of the line. - * - * Refer to the general - * documentation of this class - * and the documentation of the - * base class for more - * information. - */ - virtual Point<3> - get_new_point_on_line (const Triangulation<3>::line_iterator &line) const; - - /** - * Let the new point be the - * arithmetic mean of the four - * vertices of this quad and the - * four midpoints of the lines, - * which are already created at - * the time of calling this - * function. - * - * Refer to the general - * documentation of this class - * and the documentation of the - * base class for more - * information. - */ - virtual Point<3> - get_new_point_on_quad (const Triangulation<3>::quad_iterator &quad) const; - - /** - * Gives n=points.size() - * points that splits the - * StraightBoundary line into - * $n+1$ partitions of equal - * lengths. - * - * Refer to the general - * documentation of this class - * and the documentation of the - * base class. - */ - virtual void - get_intermediate_points_on_line (const Triangulation<3>::line_iterator &line, - std::vector > &points) const; - - /** - * Gives n=points.size()=m*m - * points that splits the - * p{StraightBoundary} quad into - * (m+1)(m+1) subquads of equal - * size. - * - * Refer to the general - * documentation of this class - * and the documentation of the - * base class. - */ - virtual void - get_intermediate_points_on_quad (const Triangulation<3>::quad_iterator &quad, - std::vector > &points) const; - - /** - * A function that, given a point - * p, returns the closest - * point on the surface defined by the - * input file. For the time being, we - * simply return the closest point in the - * point cloud, rather than doing any - * sort of interpolation. - */ - Point<3> closest_point (const Point<3> &p) const; - private: - std::vector > point_list; -}; - - -PointCloudSurface::PointCloudSurface (const std::string &filename) +namespace Step26 { - // first read in all the points + using namespace dealii; + + class PointCloudSurface : public StraightBoundary<3> + { + public: + /** + * Constructor. + */ + PointCloudSurface (const std::string &filename); + + /** + * Let the new point be the + * arithmetic mean of the two + * vertices of the line. + * + * Refer to the general + * documentation of this class + * and the documentation of the + * base class for more + * information. + */ + virtual Point<3> + get_new_point_on_line (const Triangulation<3>::line_iterator &line) const; + + /** + * Let the new point be the + * arithmetic mean of the four + * vertices of this quad and the + * four midpoints of the lines, + * which are already created at + * the time of calling this + * function. + * + * Refer to the general + * documentation of this class + * and the documentation of the + * base class for more + * information. + */ + virtual Point<3> + get_new_point_on_quad (const Triangulation<3>::quad_iterator &quad) const; + + /** + * Gives n=points.size() + * points that splits the + * StraightBoundary line into + * $n+1$ partitions of equal + * lengths. + * + * Refer to the general + * documentation of this class + * and the documentation of the + * base class. + */ + virtual void + get_intermediate_points_on_line (const Triangulation<3>::line_iterator &line, + std::vector > &points) const; + + /** + * Gives n=points.size()=m*m + * points that splits the + * p{StraightBoundary} quad into + * (m+1)(m+1) subquads of equal + * size. + * + * Refer to the general + * documentation of this class + * and the documentation of the + * base class. + */ + virtual void + get_intermediate_points_on_quad (const Triangulation<3>::quad_iterator &quad, + std::vector > &points) const; + + /** + * A function that, given a point + * p, returns the closest + * point on the surface defined by the + * input file. For the time being, we + * simply return the closest point in the + * point cloud, rather than doing any + * sort of interpolation. + */ + Point<3> closest_point (const Point<3> &p) const; + private: + std::vector > point_list; + }; + + + PointCloudSurface::PointCloudSurface (const std::string &filename) { - std::ifstream in (filename.c_str()); - AssertThrow (in, ExcIO()); + // first read in all the points + { + std::ifstream in (filename.c_str()); + AssertThrow (in, ExcIO()); - while (in) - { - Point<3> p; - in >> p; - point_list.push_back (p); - } + while (in) + { + Point<3> p; + in >> p; + point_list.push_back (p); + } - AssertThrow (point_list.size() > 1, ExcIO()); - } + AssertThrow (point_list.size() > 1, ExcIO()); + } - // next fit a linear model through the data - // cloud to rectify it in a local - // coordinate system - // - // the first step is to move the center of - // mass of the points to the origin - { - const Point<3> c_o_m = std::accumulate (point_list.begin(), - point_list.end(), - Point<3>()) / + // next fit a linear model through the data + // cloud to rectify it in a local + // coordinate system + // + // the first step is to move the center of + // mass of the points to the origin + { + const Point<3> c_o_m = std::accumulate (point_list.begin(), + point_list.end(), + Point<3>()) / point_list.size(); - for (unsigned int i=0; i gradient_direction - = Point<2>(a,b) / std::sqrt(a*a+b*b); - const Point<2> orthogonal_direction - = Point<2>(-b,a) / std::sqrt(a*a+b*b); + // with this information, we can rotate + // the points so that the corresponding + // least-squares fit would be the x-y + // plane + const Point<2> gradient_direction + = Point<2>(a,b) / std::sqrt(a*a+b*b); + const Point<2> orthogonal_direction + = Point<2>(-b,a) / std::sqrt(a*a+b*b); - const double stretch_factor = std::sqrt(1.+a*a+b*b); + const double stretch_factor = std::sqrt(1.+a*a+b*b); - for (unsigned int i=0; i xy (point_list[i][0], - point_list[i][1]); - const double grad_distance = xy * gradient_direction; - const double orth_distance = xy * orthogonal_direction; - - // we then have to stretch the points - // in the gradient direction. the - // stretch factor is defined above - // (zero if the original plane was - // already the xy plane, infinity if - // it was vertical) - const Point<2> new_xy - = (grad_distance * stretch_factor * gradient_direction + - orth_distance * orthogonal_direction); - point_list[i][0] = new_xy[0]; - point_list[i][1] = new_xy[1]; - } + for (unsigned int i=0; i xy (point_list[i][0], + point_list[i][1]); + const double grad_distance = xy * gradient_direction; + const double orth_distance = xy * orthogonal_direction; + + // we then have to stretch the points + // in the gradient direction. the + // stretch factor is defined above + // (zero if the original plane was + // already the xy plane, infinity if + // it was vertical) + const Point<2> new_xy + = (grad_distance * stretch_factor * gradient_direction + + orth_distance * orthogonal_direction); + point_list[i][0] = new_xy[0]; + point_list[i][1] = new_xy[1]; + } + } } -} -Point<3> -PointCloudSurface::closest_point (const Point<3> &p) const -{ - double distance = p.distance (point_list[0]); - Point<3> point = point_list[0]; + Point<3> + PointCloudSurface::closest_point (const Point<3> &p) const + { + double distance = p.distance (point_list[0]); + Point<3> point = point_list[0]; - for (std::vector >::const_iterator i=point_list.begin(); - i != point_list.end(); ++i) - { - const double d = p.distance (*i); - if (d < distance) - { - distance = d; - point = *i; - } - } + for (std::vector >::const_iterator i=point_list.begin(); + i != point_list.end(); ++i) + { + const double d = p.distance (*i); + if (d < distance) + { + distance = d; + point = *i; + } + } - return point; -} + return point; + } -Point<3> -PointCloudSurface:: -get_new_point_on_line (const Triangulation<3>::line_iterator &line) const -{ - return closest_point (StraightBoundary<3>::get_new_point_on_line (line)); -} + Point<3> + PointCloudSurface:: + get_new_point_on_line (const Triangulation<3>::line_iterator &line) const + { + return closest_point (StraightBoundary<3>::get_new_point_on_line (line)); + } -Point<3> -PointCloudSurface:: -get_new_point_on_quad (const Triangulation<3>::quad_iterator &quad) const -{ - return closest_point (StraightBoundary<3>::get_new_point_on_quad (quad)); -} + Point<3> + PointCloudSurface:: + get_new_point_on_quad (const Triangulation<3>::quad_iterator &quad) const + { + return closest_point (StraightBoundary<3>::get_new_point_on_quad (quad)); + } -void -PointCloudSurface:: -get_intermediate_points_on_line (const Triangulation<3>::line_iterator &line, - std::vector > &points) const -{ - StraightBoundary<3>::get_intermediate_points_on_line (line, - points); - for (unsigned int i=0; i::line_iterator &line, + std::vector > &points) const + { + StraightBoundary<3>::get_intermediate_points_on_line (line, + points); + for (unsigned int i=0; i::quad_iterator &quad, - std::vector > &points) const -{ - StraightBoundary<3>::get_intermediate_points_on_quad (quad, - points); - for (unsigned int i=0; i::quad_iterator &quad, + std::vector > &points) const + { + StraightBoundary<3>::get_intermediate_points_on_quad (quad, + points); + for (unsigned int i=0; iLaplaceProblem class template} + // @sect3{The LaplaceProblem class template} - // This is again the same - // LaplaceProblem class as in the - // previous example. The only - // difference is that we have now - // declared it as a class with a - // template parameter, and the - // template parameter is of course - // the spatial dimension in which we - // would like to solve the Laplace - // equation. Of course, several of - // the member variables depend on - // this dimension as well, in - // particular the Triangulation - // class, which has to represent - // quadrilaterals or hexahedra, - // respectively. Apart from this, - // everything is as before. -template -class LaplaceProblem -{ - public: - LaplaceProblem (); - void run (); + // This is again the same + // LaplaceProblem class as in the + // previous example. The only + // difference is that we have now + // declared it as a class with a + // template parameter, and the + // template parameter is of course + // the spatial dimension in which we + // would like to solve the Laplace + // equation. Of course, several of + // the member variables depend on + // this dimension as well, in + // particular the Triangulation + // class, which has to represent + // quadrilaterals or hexahedra, + // respectively. Apart from this, + // everything is as before. + template + class LaplaceProblem + { + public: + LaplaceProblem (); + void run (); - private: - void make_grid_and_dofs (); - void assemble_system (); - void solve (); - void output_results () const; + private: + void make_grid_and_dofs (); + void assemble_system (); + void solve (); + void output_results () const; - Triangulation triangulation; - FE_Q fe; - DoFHandler dof_handler; + Triangulation triangulation; + FE_Q fe; + DoFHandler dof_handler; - SparsityPattern sparsity_pattern; - SparseMatrix system_matrix; + SparsityPattern sparsity_pattern; + SparseMatrix system_matrix; - Vector solution; - Vector system_rhs; -}; + Vector solution; + Vector system_rhs; + }; - // @sect3{Right hand side and boundary values} + // @sect3{Right hand side and boundary values} -template -class BoundaryValues : public Function -{ - public: - BoundaryValues () : Function() {} + template + class BoundaryValues : public Function + { + public: + BoundaryValues () : Function() {} - virtual double value (const Point &p, - const unsigned int component = 0) const; -}; + virtual double value (const Point &p, + const unsigned int component = 0) const; + }; -template -double BoundaryValues::value (const Point &p, - const unsigned int /*component*/) const -{ - return std::max(p[dim-1], -5.); -} + template + double BoundaryValues::value (const Point &p, + const unsigned int /*component*/) const + { + return std::max(p[dim-1], -5.); + } - // @sect3{Implementation of the LaplaceProblem class} - - // Next for the implementation of the class - // template that makes use of the functions - // above. As before, we will write everything - // as templates that have a formal parameter - // dim that we assume unknown at the time - // we define the template functions. Only - // later, the compiler will find a - // declaration of LaplaceProblem@<2@> (in - // the main function, actually) and - // compile the entire class with dim - // replaced by 2, a process referred to as - // `instantiation of a template'. When doing - // so, it will also replace instances of - // RightHandSide@ by - // RightHandSide@<2@> and instantiate the - // latter class from the class template. - // - // In fact, the compiler will also find a - // declaration LaplaceProblem@<3@> in - // main(). This will cause it to again go - // back to the general - // LaplaceProblem@ template, replace - // all occurrences of dim, this time by - // 3, and compile the class a second - // time. Note that the two instantiations - // LaplaceProblem@<2@> and - // LaplaceProblem@<3@> are completely - // independent classes; their only common - // feature is that they are both instantiated - // from the same general template, but they - // are not convertible into each other, for - // example, and share no code (both - // instantiations are compiled completely - // independently). - - - // @sect4{LaplaceProblem::LaplaceProblem} - - // After this introduction, here is the - // constructor of the LaplaceProblem - // class. It specifies the desired polynomial - // degree of the finite elements and - // associates the DoFHandler to the - // triangulation just as in the previous - // example program, step-3: -template -LaplaceProblem::LaplaceProblem () : - fe (1), - dof_handler (triangulation) -{} - - - // @sect4{LaplaceProblem::make_grid_and_dofs} - - // Grid creation is something - // inherently dimension - // dependent. However, as long as the - // domains are sufficiently similar - // in 2D or 3D, the library can - // abstract for you. In our case, we - // would like to again solve on the - // square [-1,1]x[-1,1] in 2D, or on - // the cube [-1,1]x[-1,1]x[-1,1] in - // 3D; both can be termed - // hyper_cube, so we may use the - // same function in whatever - // dimension we are. Of course, the - // functions that create a hypercube - // in two and three dimensions are - // very much different, but that is - // something you need not care - // about. Let the library handle the - // difficult things. - // - // Likewise, associating a degree of freedom - // with each vertex is something which - // certainly looks different in 2D and 3D, - // but that does not need to bother you - // either. This function therefore looks - // exactly like in the previous example, - // although it performs actions that in their - // details are quite different if dim - // happens to be 3. The only significant - // difference from a user's perspective is - // the number of cells resulting, which is - // much higher in three than in two space - // dimensions! -template -void LaplaceProblem::make_grid_and_dofs () -{ - GridGenerator::hyper_cube (triangulation, -30, 30); + // @sect3{Implementation of the LaplaceProblem class} + + // Next for the implementation of the class + // template that makes use of the functions + // above. As before, we will write everything + // as templates that have a formal parameter + // dim that we assume unknown at the time + // we define the template functions. Only + // later, the compiler will find a + // declaration of LaplaceProblem@<2@> (in + // the main function, actually) and + // compile the entire class with dim + // replaced by 2, a process referred to as + // `instantiation of a template'. When doing + // so, it will also replace instances of + // RightHandSide@ by + // RightHandSide@<2@> and instantiate the + // latter class from the class template. + // + // In fact, the compiler will also find a + // declaration LaplaceProblem@<3@> in + // main(). This will cause it to again go + // back to the general + // LaplaceProblem@ template, replace + // all occurrences of dim, this time by + // 3, and compile the class a second + // time. Note that the two instantiations + // LaplaceProblem@<2@> and + // LaplaceProblem@<3@> are completely + // independent classes; their only common + // feature is that they are both instantiated + // from the same general template, but they + // are not convertible into each other, for + // example, and share no code (both + // instantiations are compiled completely + // independently). + + + // @sect4{LaplaceProblem::LaplaceProblem} + + // After this introduction, here is the + // constructor of the LaplaceProblem + // class. It specifies the desired polynomial + // degree of the finite elements and + // associates the DoFHandler to the + // triangulation just as in the previous + // example program, step-3: + template + LaplaceProblem::LaplaceProblem () : + fe (1), + dof_handler (triangulation) + {} + + + // @sect4{LaplaceProblem::make_grid_and_dofs} + + // Grid creation is something + // inherently dimension + // dependent. However, as long as the + // domains are sufficiently similar + // in 2D or 3D, the library can + // abstract for you. In our case, we + // would like to again solve on the + // square [-1,1]x[-1,1] in 2D, or on + // the cube [-1,1]x[-1,1]x[-1,1] in + // 3D; both can be termed + // hyper_cube, so we may use the + // same function in whatever + // dimension we are. Of course, the + // functions that create a hypercube + // in two and three dimensions are + // very much different, but that is + // something you need not care + // about. Let the library handle the + // difficult things. + // + // Likewise, associating a degree of freedom + // with each vertex is something which + // certainly looks different in 2D and 3D, + // but that does not need to bother you + // either. This function therefore looks + // exactly like in the previous example, + // although it performs actions that in their + // details are quite different if dim + // happens to be 3. The only significant + // difference from a user's perspective is + // the number of cells resulting, which is + // much higher in three than in two space + // dimensions! + template + void LaplaceProblem::make_grid_and_dofs () + { + GridGenerator::hyper_cube (triangulation, -30, 30); - for (unsigned int f=0; f::faces_per_cell; ++f) - if (triangulation.begin()->face(f)->center()[2] > 15) - { - triangulation.begin()->face(f)->set_boundary_indicator (1); - for (unsigned int i=0; i::lines_per_face; ++i) - triangulation.begin()->face(f)->line(i)->set_boundary_indicator (1); - break; - } - triangulation.set_boundary (1, pds); + for (unsigned int f=0; f::faces_per_cell; ++f) + if (triangulation.begin()->face(f)->center()[2] > 15) + { + triangulation.begin()->face(f)->set_boundary_indicator (1); + for (unsigned int i=0; i::lines_per_face; ++i) + triangulation.begin()->face(f)->line(i)->set_boundary_indicator (1); + break; + } + triangulation.set_boundary (1, pds); - for (unsigned int v=0; v::vertices_per_cell; ++v) - if (triangulation.begin()->vertex(v)[2] > 0) - triangulation.begin()->vertex(v) - = pds.closest_point (Point<3>(triangulation.begin()->vertex(v)[0], - triangulation.begin()->vertex(v)[1], - 0)); + for (unsigned int v=0; v::vertices_per_cell; ++v) + if (triangulation.begin()->vertex(v)[2] > 0) + triangulation.begin()->vertex(v) + = pds.closest_point (Point<3>(triangulation.begin()->vertex(v)[0], + triangulation.begin()->vertex(v)[1], + 0)); - for (unsigned int i=0; i<4; ++i) - { - for (typename Triangulation::active_cell_iterator - cell = triangulation.begin_active(); - cell != triangulation.end(); ++cell) - for (unsigned int f=0; f::faces_per_cell; ++f) - if (cell->face(f)->boundary_indicator() == 1) - cell->set_refine_flag (); - - triangulation.execute_coarsening_and_refinement (); - - std::cout << "Refinement cycle " << i << std::endl - << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << " Total number of cells: " - << triangulation.n_cells() - << std::endl; + for (unsigned int i=0; i<4; ++i) + { + for (typename Triangulation::active_cell_iterator + cell = triangulation.begin_active(); + cell != triangulation.end(); ++cell) + for (unsigned int f=0; f::faces_per_cell; ++f) + if (cell->face(f)->boundary_indicator() == 1) + cell->set_refine_flag (); + + triangulation.execute_coarsening_and_refinement (); + + std::cout << "Refinement cycle " << i << std::endl + << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl + << " Total number of cells: " + << triangulation.n_cells() + << std::endl; - } + } - dof_handler.distribute_dofs (fe); + dof_handler.distribute_dofs (fe); - std::cout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl; + std::cout << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << std::endl; - sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - sparsity_pattern.compress(); + sparsity_pattern.reinit (dof_handler.n_dofs(), + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); + DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); + sparsity_pattern.compress(); - system_matrix.reinit (sparsity_pattern); + system_matrix.reinit (sparsity_pattern); - solution.reinit (dof_handler.n_dofs()); - system_rhs.reinit (dof_handler.n_dofs()); -} + solution.reinit (dof_handler.n_dofs()); + system_rhs.reinit (dof_handler.n_dofs()); + } - // @sect4{LaplaceProblem::assemble_system} + // @sect4{LaplaceProblem::assemble_system} - // Unlike in the previous example, we - // would now like to use a - // non-constant right hand side - // function and non-zero boundary - // values. Both are tasks that are - // readily achieved with a only a few - // new lines of code in the - // assemblage of the matrix and right - // hand side. - // - // More interesting, though, is the - // way we assemble matrix and right - // hand side vector dimension - // independently: there is simply no - // difference to the - // two-dimensional case. Since the - // important objects used in this - // function (quadrature formula, - // FEValues) depend on the dimension - // by way of a template parameter as - // well, they can take care of - // setting up properly everything for - // the dimension for which this - // function is compiled. By declaring - // all classes which might depend on - // the dimension using a template - // parameter, the library can make - // nearly all work for you and you - // don't have to care about most - // things. -template -void LaplaceProblem::assemble_system () -{ - MatrixTools::create_laplace_matrix (dof_handler, - QGauss(2), - system_matrix); - system_rhs = 0; - - std::map boundary_values; - VectorTools::interpolate_boundary_values (dof_handler, - 0, - BoundaryValues(), - boundary_values); - MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); -} + // Unlike in the previous example, we + // would now like to use a + // non-constant right hand side + // function and non-zero boundary + // values. Both are tasks that are + // readily achieved with a only a few + // new lines of code in the + // assemblage of the matrix and right + // hand side. + // + // More interesting, though, is the + // way we assemble matrix and right + // hand side vector dimension + // independently: there is simply no + // difference to the + // two-dimensional case. Since the + // important objects used in this + // function (quadrature formula, + // FEValues) depend on the dimension + // by way of a template parameter as + // well, they can take care of + // setting up properly everything for + // the dimension for which this + // function is compiled. By declaring + // all classes which might depend on + // the dimension using a template + // parameter, the library can make + // nearly all work for you and you + // don't have to care about most + // things. + template + void LaplaceProblem::assemble_system () + { + MatrixTools::create_laplace_matrix (dof_handler, + QGauss(2), + system_matrix); + system_rhs = 0; + + std::map boundary_values; + VectorTools::interpolate_boundary_values (dof_handler, + 0, + BoundaryValues(), + boundary_values); + MatrixTools::apply_boundary_values (boundary_values, + system_matrix, + solution, + system_rhs); + } - // @sect4{LaplaceProblem::solve} + // @sect4{LaplaceProblem::solve} - // Solving the linear system of - // equations is something that looks - // almost identical in most - // programs. In particular, it is - // dimension independent, so this - // function is copied verbatim from the - // previous example. -template -void LaplaceProblem::solve () -{ - // NEW - SolverControl solver_control (dof_handler.n_dofs(), - 1e-12*system_rhs.l2_norm()); - SolverCG<> cg (solver_control); + // Solving the linear system of + // equations is something that looks + // almost identical in most + // programs. In particular, it is + // dimension independent, so this + // function is copied verbatim from the + // previous example. + template + void LaplaceProblem::solve () + { + // NEW + SolverControl solver_control (dof_handler.n_dofs(), + 1e-12*system_rhs.l2_norm()); + SolverCG<> cg (solver_control); - PreconditionSSOR<> preconditioner; - preconditioner.initialize(system_matrix, 1.2); + PreconditionSSOR<> preconditioner; + preconditioner.initialize(system_matrix, 1.2); - cg.solve (system_matrix, solution, system_rhs, - preconditioner); -} + cg.solve (system_matrix, solution, system_rhs, + preconditioner); + } - // @sect4{LaplaceProblem::output_results} + // @sect4{LaplaceProblem::output_results} - // This function also does what the - // respective one did in step-3. No changes - // here for dimension independence either. - // - // The only difference to the previous - // example is that we want to write output in - // GMV format, rather than for gnuplot (GMV - // is another graphics program that, contrary - // to gnuplot, shows data in nice colors, - // allows rotation of geometries with the - // mouse, and generates reasonable - // representations of 3d data; for ways to - // obtain it see the ReadMe file of - // deal.II). To write data in this format, we - // simply replace the - // data_out.write_gnuplot call by - // data_out.write_gmv. - // - // Since the program will run both 2d and 3d - // versions of the laplace solver, we use the - // dimension in the filename to generate - // distinct filenames for each run (in a - // better program, one would check whether - // `dim' can have other values than 2 or 3, - // but we neglect this here for the sake of - // brevity). -template -void LaplaceProblem::output_results () const -{ - DataOut data_out; + // This function also does what the + // respective one did in step-3. No changes + // here for dimension independence either. + // + // The only difference to the previous + // example is that we want to write output in + // GMV format, rather than for gnuplot (GMV + // is another graphics program that, contrary + // to gnuplot, shows data in nice colors, + // allows rotation of geometries with the + // mouse, and generates reasonable + // representations of 3d data; for ways to + // obtain it see the ReadMe file of + // deal.II). To write data in this format, we + // simply replace the + // data_out.write_gnuplot call by + // data_out.write_gmv. + // + // Since the program will run both 2d and 3d + // versions of the laplace solver, we use the + // dimension in the filename to generate + // distinct filenames for each run (in a + // better program, one would check whether + // `dim' can have other values than 2 or 3, + // but we neglect this here for the sake of + // brevity). + template + void LaplaceProblem::output_results () const + { + DataOut data_out; - data_out.attach_dof_handler (dof_handler); - data_out.add_data_vector (solution, "solution"); + data_out.attach_dof_handler (dof_handler); + data_out.add_data_vector (solution, "solution"); - data_out.build_patches (); + data_out.build_patches (); - std::ofstream output (dim == 2 ? - "solution-2d.gmv" : - "solution-3d.gmv"); - data_out.write_gmv (output); -} + std::ofstream output (dim == 2 ? + "solution-2d.gmv" : + "solution-3d.gmv"); + data_out.write_gmv (output); + } - // @sect4{LaplaceProblem::run} + // @sect4{LaplaceProblem::run} - // This is the function which has the - // top-level control over - // everything. Apart from one line of - // additional output, it is the same - // as for the previous example. -template -void LaplaceProblem::run () -{ - std::cout << "Solving problem in " << dim << " space dimensions." << std::endl; + // This is the function which has the + // top-level control over + // everything. Apart from one line of + // additional output, it is the same + // as for the previous example. + template + void LaplaceProblem::run () + { + std::cout << "Solving problem in " << dim << " space dimensions." << std::endl; - make_grid_and_dofs(); - assemble_system (); - solve (); - output_results (); + make_grid_and_dofs(); + assemble_system (); + solve (); + output_results (); + } } @@ -765,7 +768,11 @@ void LaplaceProblem::run () // library. int main () { + using namespace dealii; + using namespace Step26; + deallog.depth_console (0); + { LaplaceProblem<3> laplace_problem_3d; laplace_problem_3d.run (); diff --git a/deal.II/examples/step-27/step-27.cc b/deal.II/examples/step-27/step-27.cc index 47c764fe92..a3c091558d 100644 --- a/deal.II/examples/step-27/step-27.cc +++ b/deal.II/examples/step-27/step-27.cc @@ -3,7 +3,7 @@ /* $Id$ */ /* */ -/* Copyright (C) 2006, 2007, 2008, 2009, 2010 by the deal.II authors */ +/* Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 by the deal.II authors */ /* */ /* This file is subject to QPL and may not be distributed */ /* without copyright and license information. Please refer */ @@ -63,978 +63,981 @@ // Finally, this is as in previous // programs: -using namespace dealii; - - - // @sect3{The main class} - - // The main class of this program looks very - // much like the one already used in the - // first few tutorial programs, for example - // the one in step-6. The main difference is - // that we have merged the refine_grid and - // output_results functions into one since we - // will also want to output some of the - // quantities used in deciding how to refine - // the mesh (in particular the estimated - // smoothness of the solution). There is also - // a function that computes this estimated - // smoothness, as discussed in the - // introduction. - // - // As far as member variables are concerned, - // we use the same structure as already used - // in step-6, but instead of a regular - // DoFHandler we use an object of type - // hp::DoFHandler, and we need collections - // instead of individual finite element, - // quadrature, and face quadrature - // objects. We will fill these collections in - // the constructor of the class. The last - // variable, max_degree, - // indicates the maximal polynomial degree of - // shape functions used. -template -class LaplaceProblem +namespace Step27 { - public: - LaplaceProblem (); - ~LaplaceProblem (); + using namespace dealii; + + + // @sect3{The main class} + + // The main class of this program looks very + // much like the one already used in the + // first few tutorial programs, for example + // the one in step-6. The main difference is + // that we have merged the refine_grid and + // output_results functions into one since we + // will also want to output some of the + // quantities used in deciding how to refine + // the mesh (in particular the estimated + // smoothness of the solution). There is also + // a function that computes this estimated + // smoothness, as discussed in the + // introduction. + // + // As far as member variables are concerned, + // we use the same structure as already used + // in step-6, but instead of a regular + // DoFHandler we use an object of type + // hp::DoFHandler, and we need collections + // instead of individual finite element, + // quadrature, and face quadrature + // objects. We will fill these collections in + // the constructor of the class. The last + // variable, max_degree, + // indicates the maximal polynomial degree of + // shape functions used. + template + class LaplaceProblem + { + public: + LaplaceProblem (); + ~LaplaceProblem (); - void run (); - - private: - void setup_system (); - void assemble_system (); - void solve (); - void create_coarse_grid (); - void estimate_smoothness (Vector &smoothness_indicators) const; - void postprocess (const unsigned int cycle); + void run (); - Triangulation triangulation; + private: + void setup_system (); + void assemble_system (); + void solve (); + void create_coarse_grid (); + void estimate_smoothness (Vector &smoothness_indicators) const; + void postprocess (const unsigned int cycle); - hp::DoFHandler dof_handler; - hp::FECollection fe_collection; - hp::QCollection quadrature_collection; - hp::QCollection face_quadrature_collection; + Triangulation triangulation; - ConstraintMatrix constraints; + hp::DoFHandler dof_handler; + hp::FECollection fe_collection; + hp::QCollection quadrature_collection; + hp::QCollection face_quadrature_collection; - SparsityPattern sparsity_pattern; - SparseMatrix system_matrix; + ConstraintMatrix constraints; - Vector solution; - Vector system_rhs; + SparsityPattern sparsity_pattern; + SparseMatrix system_matrix; - const unsigned int max_degree; -}; + Vector solution; + Vector system_rhs; + const unsigned int max_degree; + }; - // @sect3{Equation data} - // - // Next, let us define the right hand side - // function for this problem. It is $x+1$ in - // 1d, $(x+1)(y+1)$ in 2d, and so on. -template -class RightHandSide : public Function -{ - public: - RightHandSide () : Function () {} - - virtual double value (const Point &p, - const unsigned int component) const; -}; - - -template -double -RightHandSide::value (const Point &p, - const unsigned int /*component*/) const -{ - double product = 1; - for (unsigned int d=0; d + class RightHandSide : public Function + { + public: + RightHandSide () : Function () {} + virtual double value (const Point &p, + const unsigned int component) const; + }; - // @sect3{Implementation of the main class} - - // @sect4{LaplaceProblem::LaplaceProblem} - - // The constructor of this class is fairly - // straightforward. It associates the - // hp::DoFHandler object with the - // triangulation, and then sets the maximal - // polynomial degree to 7 (in 1d and 2d) or 5 - // (in 3d and higher). We do so because using - // higher order polynomial degrees becomes - // prohibitively expensive, especially in - // higher space dimensions. - // - // Following this, we fill the collections of - // finite element, and cell and face - // quadrature objects. We start with - // quadratic elements, and each quadrature - // formula is chosen so that it is - // appropriate for the matching finite - // element in the hp::FECollection object. -template -LaplaceProblem::LaplaceProblem () - : - dof_handler (triangulation), - max_degree (dim <= 2 ? 7 : 5) -{ - for (unsigned int degree=2; degree<=max_degree; ++degree) - { - fe_collection.push_back (FE_Q(degree)); - quadrature_collection.push_back (QGauss(degree+1)); - face_quadrature_collection.push_back (QGauss(degree+1)); - } -} + template + double + RightHandSide::value (const Point &p, + const unsigned int /*component*/) const + { + double product = 1; + for (unsigned int d=0; d -LaplaceProblem::~LaplaceProblem () -{ - dof_handler.clear (); -} + // @sect3{Implementation of the main class} - // @sect4{LaplaceProblem::setup_system} - // - // This function is again an almost - // verbatim copy of what we already did in - // step-6. The first change is that we - // append the Dirichlet boundary conditions - // to the ConstraintMatrix object, which we - // consequently call just - // constraints instead of - // hanging_node_constraints. The - // second difference is that we don't - // directly build the sparsity pattern, but - // first create an intermediate object that - // we later copy into the usual - // SparsityPattern data structure, since - // this is more efficient for the problem - // with many entries per row (and different - // number of entries in different rows). In - // another slight deviation, we do not - // first build the sparsity pattern and - // then condense away constrained degrees - // of freedom, but pass the constraint - // matrix object directly to the function - // that builds the sparsity pattern. We - // disable the insertion of constrained - // entries with false as fourth - // argument in the - // DoFTools::make_sparsity_pattern - // function. All of these changes are - // explained in the introduction of this - // program. - // - // The last change, maybe hidden in plain - // sight, is that the dof_handler variable - // here is an hp object -- nevertheless all - // the function calls we had before still - // work in exactly the same way as they - // always did. -template -void LaplaceProblem::setup_system () -{ - dof_handler.distribute_dofs (fe_collection); - - solution.reinit (dof_handler.n_dofs()); - system_rhs.reinit (dof_handler.n_dofs()); - - constraints.clear (); - DoFTools::make_hanging_node_constraints (dof_handler, - constraints); - VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction(), - constraints); - constraints.close (); - - CompressedSetSparsityPattern csp (dof_handler.n_dofs(), - dof_handler.n_dofs()); - DoFTools::make_sparsity_pattern (dof_handler, csp, constraints, false); - sparsity_pattern.copy_from (csp); - - system_matrix.reinit (sparsity_pattern); -} + // @sect4{LaplaceProblem::LaplaceProblem} + // The constructor of this class is fairly + // straightforward. It associates the + // hp::DoFHandler object with the + // triangulation, and then sets the maximal + // polynomial degree to 7 (in 1d and 2d) or 5 + // (in 3d and higher). We do so because using + // higher order polynomial degrees becomes + // prohibitively expensive, especially in + // higher space dimensions. + // + // Following this, we fill the collections of + // finite element, and cell and face + // quadrature objects. We start with + // quadratic elements, and each quadrature + // formula is chosen so that it is + // appropriate for the matching finite + // element in the hp::FECollection object. + template + LaplaceProblem::LaplaceProblem () + : + dof_handler (triangulation), + max_degree (dim <= 2 ? 7 : 5) + { + for (unsigned int degree=2; degree<=max_degree; ++degree) + { + fe_collection.push_back (FE_Q(degree)); + quadrature_collection.push_back (QGauss(degree+1)); + face_quadrature_collection.push_back (QGauss(degree+1)); + } + } - // @sect4{LaplaceProblem::assemble_system} - - // This is the function that assembles the - // global matrix and right hand side vector - // from the local contributions of each - // cell. Its main working is as has been - // described in many of the tutorial programs - // before. The significant deviations are the - // ones necessary for hp finite element - // methods. In particular, that we need to - // use a collection of FEValues object - // (implemented through the hp::FEValues - // class), and that we have to eliminate - // constrained degrees of freedom already - // when copying local contributions into - // global objects. Both of these are - // explained in detail in the introduction of - // this program. - // - // One other slight complication is the fact - // that because we use different polynomial - // degrees on different cells, the matrices - // and vectors holding local contributions do - // not have the same size on all cells. At - // the beginning of the loop over all cells, - // we therefore each time have to resize them - // to the correct size (given by - // dofs_per_cell). Because these - // classes are implement in such a way that - // reducing the size of a matrix or vector - // does not release the currently allocated - // memory (unless the new size is zero), the - // process of resizing at the beginning of - // the loop will only require re-allocation - // of memory during the first few - // iterations. Once we have found in a cell - // with the maximal finite element degree, no - // more re-allocations will happen because - // all subsequent reinit calls - // will only set the size to something that - // fits the currently allocated memory. This - // is important since allocating memory is - // expensive, and doing so every time we - // visit a new cell would take significant - // compute time. -template -void LaplaceProblem::assemble_system () -{ - hp::FEValues hp_fe_values (fe_collection, - quadrature_collection, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); - - const RightHandSide rhs_function; - - FullMatrix cell_matrix; - Vector cell_rhs; - - std::vector local_dof_indices; - - typename hp::DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; - - cell_matrix.reinit (dofs_per_cell, dofs_per_cell); - cell_matrix = 0; - - cell_rhs.reinit (dofs_per_cell); - cell_rhs = 0; - - hp_fe_values.reinit (cell); - - const FEValues &fe_values = hp_fe_values.get_present_fe_values (); - - std::vector rhs_values (fe_values.n_quadrature_points); - rhs_function.value_list (fe_values.get_quadrature_points(), - rhs_values); - - for (unsigned int q_point=0; - q_pointget_dof_indices (local_dof_indices); + // The destructor is unchanged from what we + // already did in step-6: + template + LaplaceProblem::~LaplaceProblem () + { + dof_handler.clear (); + } - constraints.distribute_local_to_global (cell_matrix, cell_rhs, - local_dof_indices, - system_matrix, system_rhs); - } - // Now with the loop over all cells - // finished, we are done for this - // function. The steps we still had to do - // at this point in earlier tutorial - // programs, namely condensing hanging - // node constraints and applying - // Dirichlet boundary conditions, have - // been taken care of by the - // ConstraintMatrix object - // constraints on the fly. -} + // @sect4{LaplaceProblem::setup_system} + // + // This function is again an almost + // verbatim copy of what we already did in + // step-6. The first change is that we + // append the Dirichlet boundary conditions + // to the ConstraintMatrix object, which we + // consequently call just + // constraints instead of + // hanging_node_constraints. The + // second difference is that we don't + // directly build the sparsity pattern, but + // first create an intermediate object that + // we later copy into the usual + // SparsityPattern data structure, since + // this is more efficient for the problem + // with many entries per row (and different + // number of entries in different rows). In + // another slight deviation, we do not + // first build the sparsity pattern and + // then condense away constrained degrees + // of freedom, but pass the constraint + // matrix object directly to the function + // that builds the sparsity pattern. We + // disable the insertion of constrained + // entries with false as fourth + // argument in the + // DoFTools::make_sparsity_pattern + // function. All of these changes are + // explained in the introduction of this + // program. + // + // The last change, maybe hidden in plain + // sight, is that the dof_handler variable + // here is an hp object -- nevertheless all + // the function calls we had before still + // work in exactly the same way as they + // always did. + template + void LaplaceProblem::setup_system () + { + dof_handler.distribute_dofs (fe_collection); + + solution.reinit (dof_handler.n_dofs()); + system_rhs.reinit (dof_handler.n_dofs()); + + constraints.clear (); + DoFTools::make_hanging_node_constraints (dof_handler, + constraints); + VectorTools::interpolate_boundary_values (dof_handler, + 0, + ZeroFunction(), + constraints); + constraints.close (); + + CompressedSetSparsityPattern csp (dof_handler.n_dofs(), + dof_handler.n_dofs()); + DoFTools::make_sparsity_pattern (dof_handler, csp, constraints, false); + sparsity_pattern.copy_from (csp); + + system_matrix.reinit (sparsity_pattern); + } - // @sect4{LaplaceProblem::solve} + // @sect4{LaplaceProblem::assemble_system} + + // This is the function that assembles the + // global matrix and right hand side vector + // from the local contributions of each + // cell. Its main working is as has been + // described in many of the tutorial programs + // before. The significant deviations are the + // ones necessary for hp finite element + // methods. In particular, that we need to + // use a collection of FEValues object + // (implemented through the hp::FEValues + // class), and that we have to eliminate + // constrained degrees of freedom already + // when copying local contributions into + // global objects. Both of these are + // explained in detail in the introduction of + // this program. + // + // One other slight complication is the fact + // that because we use different polynomial + // degrees on different cells, the matrices + // and vectors holding local contributions do + // not have the same size on all cells. At + // the beginning of the loop over all cells, + // we therefore each time have to resize them + // to the correct size (given by + // dofs_per_cell). Because these + // classes are implement in such a way that + // reducing the size of a matrix or vector + // does not release the currently allocated + // memory (unless the new size is zero), the + // process of resizing at the beginning of + // the loop will only require re-allocation + // of memory during the first few + // iterations. Once we have found in a cell + // with the maximal finite element degree, no + // more re-allocations will happen because + // all subsequent reinit calls + // will only set the size to something that + // fits the currently allocated memory. This + // is important since allocating memory is + // expensive, and doing so every time we + // visit a new cell would take significant + // compute time. + template + void LaplaceProblem::assemble_system () + { + hp::FEValues hp_fe_values (fe_collection, + quadrature_collection, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); - // The function solving the linear system is - // entirely unchanged from previous - // examples. We simply try to reduce the - // initial residual (which equals the $l_2$ - // norm of the right hand side) by a certain - // factor: -template -void LaplaceProblem::solve () -{ - SolverControl solver_control (system_rhs.size(), - 1e-8*system_rhs.l2_norm()); - SolverCG<> cg (solver_control); + const RightHandSide rhs_function; - PreconditionSSOR<> preconditioner; - preconditioner.initialize(system_matrix, 1.2); + FullMatrix cell_matrix; + Vector cell_rhs; - cg.solve (system_matrix, solution, system_rhs, - preconditioner); + std::vector local_dof_indices; - constraints.distribute (solution); -} + typename hp::DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + { + const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; + cell_matrix.reinit (dofs_per_cell, dofs_per_cell); + cell_matrix = 0; + cell_rhs.reinit (dofs_per_cell); + cell_rhs = 0; - // @sect4{LaplaceProblem::postprocess} - - // After solving the linear system, we will - // want to postprocess the solution. Here, - // all we do is to estimate the error, - // estimate the local smoothness of the - // solution as described in the introduction, - // then write graphical output, and finally - // refine the mesh in both $h$ and $p$ - // according to the indicators computed - // before. We do all this in the same - // function because we want the estimated - // error and smoothness indicators not only - // for refinement, but also include them in - // the graphical output. -template -void LaplaceProblem::postprocess (const unsigned int cycle) -{ - // Let us start with computing estimated - // error and smoothness indicators, which - // each are one number for each active cell - // of our triangulation. For the error - // indicator, we use the - // KellyErrorEstimator class as - // always. Estimating the smoothness is - // done in the respective function of this - // class; that function is discussed - // further down below: - Vector estimated_error_per_cell (triangulation.n_active_cells()); - KellyErrorEstimator::estimate (dof_handler, - face_quadrature_collection, - typename FunctionMap::type(), - solution, - estimated_error_per_cell); - - Vector smoothness_indicators (triangulation.n_active_cells()); - estimate_smoothness (smoothness_indicators); - - // Next we want to generate graphical - // output. In addition to the two estimated - // quantities derived above, we would also - // like to output the polynomial degree of - // the finite elements used on each of the - // elements on the mesh. - // - // The way to do that requires that we loop - // over all cells and poll the active - // finite element index of them using - // cell-@>active_fe_index(). We - // then use the result of this operation - // and query the finite element collection - // for the finite element with that index, - // and finally determine the polynomial - // degree of that element. The result we - // put into a vector with one element per - // cell. The DataOut class requires this to - // be a vector of float or - // double, even though our - // values are all integers, so that it what - // we use: - { - Vector fe_degrees (triangulation.n_active_cells()); - { - typename hp::DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (unsigned int index=0; cell!=endc; ++cell, ++index) - fe_degrees(index) - = fe_collection[cell->active_fe_index()].degree; - } + hp_fe_values.reinit (cell); - // With now all data vectors available -- - // solution, estimated errors and - // smoothness indicators, and finite - // element degrees --, we create a - // DataOut object for graphical output - // and attach all data. Note that the - // DataOut class has a second template - // argument (which defaults to - // DoFHandler@, which is why we - // have never seen it in previous - // tutorial programs) that indicates the - // type of DoF handler to be used. Here, - // we have to use the hp::DoFHandler - // class: - DataOut > data_out; - - data_out.attach_dof_handler (dof_handler); - data_out.add_data_vector (solution, "solution"); - data_out.add_data_vector (estimated_error_per_cell, "error"); - data_out.add_data_vector (smoothness_indicators, "smoothness"); - data_out.add_data_vector (fe_degrees, "fe_degree"); - data_out.build_patches (); - - // The final step in generating - // output is to determine a file - // name, open the file, and write - // the data into it (here, we use - // VTK format): - const std::string filename = "solution-" + - Utilities::int_to_string (cycle, 2) + - ".vtk"; - std::ofstream output (filename.c_str()); - data_out.write_vtk (output); + const FEValues &fe_values = hp_fe_values.get_present_fe_values (); + + std::vector rhs_values (fe_values.n_quadrature_points); + rhs_function.value_list (fe_values.get_quadrature_points(), + rhs_values); + + for (unsigned int q_point=0; + q_pointget_dof_indices (local_dof_indices); + + constraints.distribute_local_to_global (cell_matrix, cell_rhs, + local_dof_indices, + system_matrix, system_rhs); + } + + // Now with the loop over all cells + // finished, we are done for this + // function. The steps we still had to do + // at this point in earlier tutorial + // programs, namely condensing hanging + // node constraints and applying + // Dirichlet boundary conditions, have + // been taken care of by the + // ConstraintMatrix object + // constraints on the fly. } - // After this, we would like to actually - // refine the mesh, in both $h$ and - // $p$. The way we are going to do this is - // as follows: first, we use the estimated - // error to flag those cells for refinement - // that have the largest error. This is - // what we have always done: + + + // @sect4{LaplaceProblem::solve} + + // The function solving the linear system is + // entirely unchanged from previous + // examples. We simply try to reduce the + // initial residual (which equals the $l_2$ + // norm of the right hand side) by a certain + // factor: + template + void LaplaceProblem::solve () { - GridRefinement::refine_and_coarsen_fixed_number (triangulation, - estimated_error_per_cell, - 0.3, 0.03); - - // Next we would like to figure out which - // of the cells that have been flagged - // for refinement should actually have - // $p$ increased instead of $h$ - // decreased. The strategy we choose here - // is that we look at the smoothness - // indicators of those cells that are - // flagged for refinement, and increase - // $p$ for those with a smoothness larger - // than a certain threshold. For this, we - // first have to determine the maximal - // and minimal values of the smoothness - // indicators of all flagged cells, which - // we do using a loop over all cells and - // comparing current minimal and maximal - // values. (We start with the minimal and - // maximal values of all cells, a - // range within which the minimal and - // maximal values on cells flagged for - // refinement must surely lie.) Absent - // any better strategies, we will then - // set the threshold above which will - // increase $p$ instead of reducing $h$ - // as the mean value between minimal and - // maximal smoothness indicators on cells - // flagged for refinement: - float max_smoothness = *std::min_element (smoothness_indicators.begin(), - smoothness_indicators.end()), - min_smoothness = *std::max_element (smoothness_indicators.begin(), - smoothness_indicators.end()); - { - typename hp::DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (unsigned int index=0; cell!=endc; ++cell, ++index) - if (cell->refine_flag_set()) - { - max_smoothness = std::max (max_smoothness, - smoothness_indicators(index)); - min_smoothness = std::min (min_smoothness, - smoothness_indicators(index)); - } - } - const float threshold_smoothness = (max_smoothness + min_smoothness) / 2; - - // With this, we can go back, loop over - // all cells again, and for those cells - // for which (i) the refinement flag is - // set, (ii) the smoothness indicator is - // larger than the threshold, and (iii) - // we still have a finite element with a - // polynomial degree higher than the - // current one in the finite element - // collection, we then increase the - // polynomial degree and in return remove - // the flag indicating that the cell - // should undergo bisection. For all - // other cells, the refinement flags - // remain untouched: - { - typename hp::DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (unsigned int index=0; cell!=endc; ++cell, ++index) - if (cell->refine_flag_set() - && - (smoothness_indicators(index) > threshold_smoothness) - && - (cell->active_fe_index()+1 < fe_collection.size())) - { - cell->clear_refine_flag(); - cell->set_active_fe_index (cell->active_fe_index() + 1); - } - } - - // At the end of this procedure, we then - // refine the mesh. During this process, - // children of cells undergoing bisection - // inherit their mother cell's finite - // element index: - triangulation.execute_coarsening_and_refinement (); - } -} + SolverControl solver_control (system_rhs.size(), + 1e-8*system_rhs.l2_norm()); + SolverCG<> cg (solver_control); + PreconditionSSOR<> preconditioner; + preconditioner.initialize(system_matrix, 1.2); - // @sect4{LaplaceProblem::create_coarse_grid} - - // The following function is used when - // creating the initial grid. It is a - // specialization for the 2d case, i.e. a - // corresponding function needs to be - // implemented if the program is run in - // anything other then 2d. The function is - // actually stolen from step-14 and generates - // the same mesh used already there, i.e. the - // square domain with the square hole in the - // middle. The meaning of the different parts - // of this function are explained in the - // documentation of step-14: -template <> -void LaplaceProblem<2>::create_coarse_grid () -{ - const unsigned int dim = 2; - - static const Point<2> vertices_1[] - = { Point<2> (-1., -1.), - Point<2> (-1./2, -1.), - Point<2> (0., -1.), - Point<2> (+1./2, -1.), - Point<2> (+1, -1.), - - Point<2> (-1., -1./2.), - Point<2> (-1./2, -1./2.), - Point<2> (0., -1./2.), - Point<2> (+1./2, -1./2.), - Point<2> (+1, -1./2.), - - Point<2> (-1., 0.), - Point<2> (-1./2, 0.), - Point<2> (+1./2, 0.), - Point<2> (+1, 0.), - - Point<2> (-1., 1./2.), - Point<2> (-1./2, 1./2.), - Point<2> (0., 1./2.), - Point<2> (+1./2, 1./2.), - Point<2> (+1, 1./2.), - - Point<2> (-1., 1.), - Point<2> (-1./2, 1.), - Point<2> (0., 1.), - Point<2> (+1./2, 1.), - Point<2> (+1, 1.) }; - const unsigned int - n_vertices = sizeof(vertices_1) / sizeof(vertices_1[0]); - const std::vector > vertices (&vertices_1[0], - &vertices_1[n_vertices]); - static const int cell_vertices[][GeometryInfo::vertices_per_cell] - = {{0, 1, 5, 6}, - {1, 2, 6, 7}, - {2, 3, 7, 8}, - {3, 4, 8, 9}, - {5, 6, 10, 11}, - {8, 9, 12, 13}, - {10, 11, 14, 15}, - {12, 13, 17, 18}, - {14, 15, 19, 20}, - {15, 16, 20, 21}, - {16, 17, 21, 22}, - {17, 18, 22, 23}}; - const unsigned int - n_cells = sizeof(cell_vertices) / sizeof(cell_vertices[0]); - - std::vector > cells (n_cells, CellData()); - for (unsigned int i=0; i::vertices_per_cell; - ++j) - cells[i].vertices[j] = cell_vertices[i][j]; - cells[i].material_id = 0; - } + cg.solve (system_matrix, solution, system_rhs, + preconditioner); - triangulation.create_triangulation (vertices, - cells, - SubCellData()); - triangulation.refine_global (3); -} + constraints.distribute (solution); + } + // @sect4{LaplaceProblem::postprocess} + + // After solving the linear system, we will + // want to postprocess the solution. Here, + // all we do is to estimate the error, + // estimate the local smoothness of the + // solution as described in the introduction, + // then write graphical output, and finally + // refine the mesh in both $h$ and $p$ + // according to the indicators computed + // before. We do all this in the same + // function because we want the estimated + // error and smoothness indicators not only + // for refinement, but also include them in + // the graphical output. + template + void LaplaceProblem::postprocess (const unsigned int cycle) + { + // Let us start with computing estimated + // error and smoothness indicators, which + // each are one number for each active cell + // of our triangulation. For the error + // indicator, we use the + // KellyErrorEstimator class as + // always. Estimating the smoothness is + // done in the respective function of this + // class; that function is discussed + // further down below: + Vector estimated_error_per_cell (triangulation.n_active_cells()); + KellyErrorEstimator::estimate (dof_handler, + face_quadrature_collection, + typename FunctionMap::type(), + solution, + estimated_error_per_cell); + + Vector smoothness_indicators (triangulation.n_active_cells()); + estimate_smoothness (smoothness_indicators); + + // Next we want to generate graphical + // output. In addition to the two estimated + // quantities derived above, we would also + // like to output the polynomial degree of + // the finite elements used on each of the + // elements on the mesh. + // + // The way to do that requires that we loop + // over all cells and poll the active + // finite element index of them using + // cell-@>active_fe_index(). We + // then use the result of this operation + // and query the finite element collection + // for the finite element with that index, + // and finally determine the polynomial + // degree of that element. The result we + // put into a vector with one element per + // cell. The DataOut class requires this to + // be a vector of float or + // double, even though our + // values are all integers, so that it what + // we use: + { + Vector fe_degrees (triangulation.n_active_cells()); + { + typename hp::DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (unsigned int index=0; cell!=endc; ++cell, ++index) + fe_degrees(index) + = fe_collection[cell->active_fe_index()].degree; + } - // @sect4{LaplaceProblem::run} + // With now all data vectors available -- + // solution, estimated errors and + // smoothness indicators, and finite + // element degrees --, we create a + // DataOut object for graphical output + // and attach all data. Note that the + // DataOut class has a second template + // argument (which defaults to + // DoFHandler@, which is why we + // have never seen it in previous + // tutorial programs) that indicates the + // type of DoF handler to be used. Here, + // we have to use the hp::DoFHandler + // class: + DataOut > data_out; + + data_out.attach_dof_handler (dof_handler); + data_out.add_data_vector (solution, "solution"); + data_out.add_data_vector (estimated_error_per_cell, "error"); + data_out.add_data_vector (smoothness_indicators, "smoothness"); + data_out.add_data_vector (fe_degrees, "fe_degree"); + data_out.build_patches (); + + // The final step in generating + // output is to determine a file + // name, open the file, and write + // the data into it (here, we use + // VTK format): + const std::string filename = "solution-" + + Utilities::int_to_string (cycle, 2) + + ".vtk"; + std::ofstream output (filename.c_str()); + data_out.write_vtk (output); + } - // This function implements the logic of the - // program, as did the respective function in - // most of the previous programs already, see - // for example step-6. - // - // Basically, it contains the adaptive loop: - // in the first iteration create a coarse - // grid, and then set up the linear system, - // assemble it, solve, and postprocess the - // solution including mesh refinement. Then - // start over again. In the meantime, also - // output some information for those staring - // at the screen trying to figure out what - // the program does: -template -void LaplaceProblem::run () -{ - for (unsigned int cycle=0; cycle<6; ++cycle) + // After this, we would like to actually + // refine the mesh, in both $h$ and + // $p$. The way we are going to do this is + // as follows: first, we use the estimated + // error to flag those cells for refinement + // that have the largest error. This is + // what we have always done: { - std::cout << "Cycle " << cycle << ':' << std::endl; + GridRefinement::refine_and_coarsen_fixed_number (triangulation, + estimated_error_per_cell, + 0.3, 0.03); + + // Next we would like to figure out which + // of the cells that have been flagged + // for refinement should actually have + // $p$ increased instead of $h$ + // decreased. The strategy we choose here + // is that we look at the smoothness + // indicators of those cells that are + // flagged for refinement, and increase + // $p$ for those with a smoothness larger + // than a certain threshold. For this, we + // first have to determine the maximal + // and minimal values of the smoothness + // indicators of all flagged cells, which + // we do using a loop over all cells and + // comparing current minimal and maximal + // values. (We start with the minimal and + // maximal values of all cells, a + // range within which the minimal and + // maximal values on cells flagged for + // refinement must surely lie.) Absent + // any better strategies, we will then + // set the threshold above which will + // increase $p$ instead of reducing $h$ + // as the mean value between minimal and + // maximal smoothness indicators on cells + // flagged for refinement: + float max_smoothness = *std::min_element (smoothness_indicators.begin(), + smoothness_indicators.end()), + min_smoothness = *std::max_element (smoothness_indicators.begin(), + smoothness_indicators.end()); + { + typename hp::DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (unsigned int index=0; cell!=endc; ++cell, ++index) + if (cell->refine_flag_set()) + { + max_smoothness = std::max (max_smoothness, + smoothness_indicators(index)); + min_smoothness = std::min (min_smoothness, + smoothness_indicators(index)); + } + } + const float threshold_smoothness = (max_smoothness + min_smoothness) / 2; + + // With this, we can go back, loop over + // all cells again, and for those cells + // for which (i) the refinement flag is + // set, (ii) the smoothness indicator is + // larger than the threshold, and (iii) + // we still have a finite element with a + // polynomial degree higher than the + // current one in the finite element + // collection, we then increase the + // polynomial degree and in return remove + // the flag indicating that the cell + // should undergo bisection. For all + // other cells, the refinement flags + // remain untouched: + { + typename hp::DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (unsigned int index=0; cell!=endc; ++cell, ++index) + if (cell->refine_flag_set() + && + (smoothness_indicators(index) > threshold_smoothness) + && + (cell->active_fe_index()+1 < fe_collection.size())) + { + cell->clear_refine_flag(); + cell->set_active_fe_index (cell->active_fe_index() + 1); + } + } - if (cycle == 0) - create_coarse_grid (); + // At the end of this procedure, we then + // refine the mesh. During this process, + // children of cells undergoing bisection + // inherit their mother cell's finite + // element index: + triangulation.execute_coarsening_and_refinement (); + } + } - setup_system (); - std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl - << " Number of constraints : " - << constraints.n_constraints() - << std::endl; + // @sect4{LaplaceProblem::create_coarse_grid} + + // The following function is used when + // creating the initial grid. It is a + // specialization for the 2d case, i.e. a + // corresponding function needs to be + // implemented if the program is run in + // anything other then 2d. The function is + // actually stolen from step-14 and generates + // the same mesh used already there, i.e. the + // square domain with the square hole in the + // middle. The meaning of the different parts + // of this function are explained in the + // documentation of step-14: + template <> + void LaplaceProblem<2>::create_coarse_grid () + { + const unsigned int dim = 2; + + static const Point<2> vertices_1[] + = { Point<2> (-1., -1.), + Point<2> (-1./2, -1.), + Point<2> (0., -1.), + Point<2> (+1./2, -1.), + Point<2> (+1, -1.), + + Point<2> (-1., -1./2.), + Point<2> (-1./2, -1./2.), + Point<2> (0., -1./2.), + Point<2> (+1./2, -1./2.), + Point<2> (+1, -1./2.), + + Point<2> (-1., 0.), + Point<2> (-1./2, 0.), + Point<2> (+1./2, 0.), + Point<2> (+1, 0.), + + Point<2> (-1., 1./2.), + Point<2> (-1./2, 1./2.), + Point<2> (0., 1./2.), + Point<2> (+1./2, 1./2.), + Point<2> (+1, 1./2.), + + Point<2> (-1., 1.), + Point<2> (-1./2, 1.), + Point<2> (0., 1.), + Point<2> (+1./2, 1.), + Point<2> (+1, 1.) }; + const unsigned int + n_vertices = sizeof(vertices_1) / sizeof(vertices_1[0]); + const std::vector > vertices (&vertices_1[0], + &vertices_1[n_vertices]); + static const int cell_vertices[][GeometryInfo::vertices_per_cell] + = {{0, 1, 5, 6}, + {1, 2, 6, 7}, + {2, 3, 7, 8}, + {3, 4, 8, 9}, + {5, 6, 10, 11}, + {8, 9, 12, 13}, + {10, 11, 14, 15}, + {12, 13, 17, 18}, + {14, 15, 19, 20}, + {15, 16, 20, 21}, + {16, 17, 21, 22}, + {17, 18, 22, 23}}; + const unsigned int + n_cells = sizeof(cell_vertices) / sizeof(cell_vertices[0]); + + std::vector > cells (n_cells, CellData()); + for (unsigned int i=0; i::vertices_per_cell; + ++j) + cells[i].vertices[j] = cell_vertices[i][j]; + cells[i].material_id = 0; + } - assemble_system (); - solve (); - postprocess (cycle); - } -} + triangulation.create_triangulation (vertices, + cells, + SubCellData()); + triangulation.refine_global (3); + } - // @sect4{LaplaceProblem::estimate_smoothness} - // This last function of significance - // implements the algorithm to estimate the - // smoothness exponent using the algorithms - // explained in detail in the - // introduction. We will therefore only - // comment on those points that are of - // implementational importance. -template -void -LaplaceProblem:: -estimate_smoothness (Vector &smoothness_indicators) const -{ - // The first thing we need to do is - // to define the Fourier vectors - // ${\bf k}$ for which we want to - // compute Fourier coefficients of - // the solution on each cell. In - // 2d, we pick those vectors ${\bf - // k}=(\pi i, \pi j)^T$ for which - // $\sqrt{i^2+j^2}\le N$, with - // $i,j$ integers and $N$ being the - // maximal polynomial degree we use - // for the finite elements in this - // program. The 3d case is handled - // analogously. 1d and dimensions - // higher than 3 are not - // implemented, and we guard our - // implementation by making sure - // that we receive an exception in - // case someone tries to compile - // the program for any of these - // dimensions. + + // @sect4{LaplaceProblem::run} + + // This function implements the logic of the + // program, as did the respective function in + // most of the previous programs already, see + // for example step-6. // - // We exclude ${\bf k}=0$ to avoid problems - // computing $|{\bf k}|^{-mu}$ and $\ln - // |{\bf k}|$. The other vectors are stored - // in the field k_vectors. In - // addition, we store the square of the - // magnitude of each of these vectors (up - // to a factor $\pi^2$) in the - // k_vectors_magnitude array - // -- we will need that when we attempt to - // find out which of those Fourier - // coefficients corresponding to Fourier - // vectors of the same magnitude is the - // largest: - const unsigned int N = max_degree; - - std::vector > k_vectors; - std::vector k_vectors_magnitude; - switch (dim) - { - case 2: + // Basically, it contains the adaptive loop: + // in the first iteration create a coarse + // grid, and then set up the linear system, + // assemble it, solve, and postprocess the + // solution including mesh refinement. Then + // start over again. In the meantime, also + // output some information for those staring + // at the screen trying to figure out what + // the program does: + template + void LaplaceProblem::run () + { + for (unsigned int cycle=0; cycle<6; ++cycle) { - for (unsigned int i=0; i(numbers::PI * i, - numbers::PI * j)); - k_vectors_magnitude.push_back (i*i+j*j); - } - - break; + std::cout << "Cycle " << cycle << ':' << std::endl; + + if (cycle == 0) + create_coarse_grid (); + + setup_system (); + + std::cout << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl + << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << std::endl + << " Number of constraints : " + << constraints.n_constraints() + << std::endl; + + assemble_system (); + solve (); + postprocess (cycle); } + } - case 3: + + // @sect4{LaplaceProblem::estimate_smoothness} + + // This last function of significance + // implements the algorithm to estimate the + // smoothness exponent using the algorithms + // explained in detail in the + // introduction. We will therefore only + // comment on those points that are of + // implementational importance. + template + void + LaplaceProblem:: + estimate_smoothness (Vector &smoothness_indicators) const + { + // The first thing we need to do is + // to define the Fourier vectors + // ${\bf k}$ for which we want to + // compute Fourier coefficients of + // the solution on each cell. In + // 2d, we pick those vectors ${\bf + // k}=(\pi i, \pi j)^T$ for which + // $\sqrt{i^2+j^2}\le N$, with + // $i,j$ integers and $N$ being the + // maximal polynomial degree we use + // for the finite elements in this + // program. The 3d case is handled + // analogously. 1d and dimensions + // higher than 3 are not + // implemented, and we guard our + // implementation by making sure + // that we receive an exception in + // case someone tries to compile + // the program for any of these + // dimensions. + // + // We exclude ${\bf k}=0$ to avoid problems + // computing $|{\bf k}|^{-mu}$ and $\ln + // |{\bf k}|$. The other vectors are stored + // in the field k_vectors. In + // addition, we store the square of the + // magnitude of each of these vectors (up + // to a factor $\pi^2$) in the + // k_vectors_magnitude array + // -- we will need that when we attempt to + // find out which of those Fourier + // coefficients corresponding to Fourier + // vectors of the same magnitude is the + // largest: + const unsigned int N = max_degree; + + std::vector > k_vectors; + std::vector k_vectors_magnitude; + switch (dim) { - for (unsigned int i=0; i(numbers::PI * i, - numbers::PI * j, - numbers::PI * k)); - k_vectors_magnitude.push_back (i*i+j*j+k*k); - } - - break; - } - - default: - Assert (false, ExcNotImplemented()); - } - - // After we have set up the Fourier - // vectors, we also store their total - // number for simplicity, and compute the - // logarithm of the magnitude of each of - // these vectors since we will need it many - // times over further down below: - const unsigned n_fourier_modes = k_vectors.size(); - std::vector ln_k (n_fourier_modes); - for (unsigned int i=0; i > > - fourier_transform_matrices (fe_collection.size()); - - // In order to compute them, we of - // course can't perform the Fourier - // transform analytically, but have - // to approximate it using - // quadrature. To this end, we use - // a quadrature formula that is - // obtained by iterating a 2-point - // Gauss formula as many times as - // the maximal exponent we use for - // the term $e^{i{\bf k}\cdot{\bf - // x}}$: - QGauss<1> base_quadrature (2); - QIterated quadrature (base_quadrature, N); - - // With this, we then loop over all finite - // elements in use, reinitialize the - // respective matrix ${\cal F}$ to the - // right size, and integrate each entry of - // the matrix numerically as ${\cal - // F}_{{\bf k},j}=\sum_q e^{i{\bf k}\cdot - // {\bf x}}\varphi_j({\bf x}_q) - // w_q$, where $x_q$ - // are the quadrature points and $w_q$ are - // the quadrature weights. Note that the - // imaginary unit $i=\sqrt{-1}$ is obtained - // from the standard C++ classes using - // std::complex@(0,1). - - // Because we work on the unit cell, we can - // do all this work without a mapping from - // reference to real cell and consequently - // do not need the FEValues class. - for (unsigned int fe=0; fe sum = 0; - for (unsigned int q=0; q x_q = quadrature.point(q); - sum += std::exp(std::complex(0,1) * - (k_vectors[k] * x_q)) * - fe_collection[fe].shape_value(j,x_q) * - quadrature.weight(q); - } - fourier_transform_matrices[fe](k,j) - = sum / std::pow(2*numbers::PI, 1.*dim/2); - } - } + break; + } - // The next thing is to loop over all cells - // and do our work there, i.e. to locally - // do the Fourier transform and estimate - // the decay coefficient. We will use the - // following two arrays as scratch arrays - // in the loop and allocate them here to - // avoid repeated memory allocations: - std::vector > fourier_coefficients (n_fourier_modes); - Vector local_dof_values; - - // Then here is the loop: - typename hp::DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (unsigned int index=0; cell!=endc; ++cell, ++index) - { - // Inside the loop, we first need to - // get the values of the local degrees - // of freedom (which we put into the - // local_dof_values array - // after setting it to the right size) - // and then need to compute the Fourier - // transform by multiplying this vector - // with the matrix ${\cal F}$ - // corresponding to this finite - // element. We need to write out the - // multiplication by hand because the - // objects holding the data do not have - // vmult-like functions - // declared: - local_dof_values.reinit (cell->get_fe().dofs_per_cell); - cell->get_dof_values (solution, local_dof_values); - - for (unsigned int f=0; fget_fe().dofs_per_cell; ++i) - fourier_coefficients[f] += - fourier_transform_matrices[cell->active_fe_index()](f,i) - * - local_dof_values(i); + for (unsigned int i=0; i(numbers::PI * i, + numbers::PI * j, + numbers::PI * k)); + k_vectors_magnitude.push_back (i*i+j*j+k*k); + } + + break; } - // The next thing, as explained in the - // introduction, is that we wanted to - // only fit our exponential decay of - // Fourier coefficients to the largest - // coefficients for each possible value - // of $|{\bf k}|$. To this end, we - // create a map that for each magnitude - // $|{\bf k}|$ stores the largest $|\hat - // U_{{\bf k}}|$ found so far, i.e. we - // overwrite the existing value (or add - // it to the map) if no value for the - // current $|{\bf k}|$ exists yet, or if - // the current value is larger than the - // previously stored one: - std::map k_to_max_U_map; - for (unsigned int f=0; f ln_k (n_fourier_modes); + for (unsigned int i=0; i > > + fourier_transform_matrices (fe_collection.size()); + + // In order to compute them, we of + // course can't perform the Fourier + // transform analytically, but have + // to approximate it using + // quadrature. To this end, we use + // a quadrature formula that is + // obtained by iterating a 2-point + // Gauss formula as many times as + // the maximal exponent we use for + // the term $e^{i{\bf k}\cdot{\bf + // x}}$: + QGauss<1> base_quadrature (2); + QIterated quadrature (base_quadrature, N); + + // With this, we then loop over all finite + // elements in use, reinitialize the + // respective matrix ${\cal F}$ to the + // right size, and integrate each entry of + // the matrix numerically as ${\cal + // F}_{{\bf k},j}=\sum_q e^{i{\bf k}\cdot + // {\bf x}}\varphi_j({\bf x}_q) + // w_q$, where $x_q$ + // are the quadrature points and $w_q$ are + // the quadrature weights. Note that the + // imaginary unit $i=\sqrt{-1}$ is obtained + // from the standard C++ classes using + // std::complex@(0,1). + + // Because we work on the unit cell, we can + // do all this work without a mapping from + // reference to real cell and consequently + // do not need the FEValues class. + for (unsigned int fe=0; fe sum = 0; + for (unsigned int q=0; q x_q = quadrature.point(q); + sum += std::exp(std::complex(0,1) * + (k_vectors[k] * x_q)) * + fe_collection[fe].shape_value(j,x_q) * + quadrature.weight(q); + } + fourier_transform_matrices[fe](k,j) + = sum / std::pow(2*numbers::PI, 1.*dim/2); + } + } + + // The next thing is to loop over all cells + // and do our work there, i.e. to locally + // do the Fourier transform and estimate + // the decay coefficient. We will use the + // following two arrays as scratch arrays + // in the loop and allocate them here to + // avoid repeated memory allocations: + std::vector > fourier_coefficients (n_fourier_modes); + Vector local_dof_values; + + // Then here is the loop: + typename hp::DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (unsigned int index=0; cell!=endc; ++cell, ++index) + { + // Inside the loop, we first need to + // get the values of the local degrees + // of freedom (which we put into the + // local_dof_values array + // after setting it to the right size) + // and then need to compute the Fourier + // transform by multiplying this vector + // with the matrix ${\cal F}$ + // corresponding to this finite + // element. We need to write out the + // multiplication by hand because the + // objects holding the data do not have + // vmult-like functions + // declared: + local_dof_values.reinit (cell->get_fe().dofs_per_cell); + cell->get_dof_values (solution, local_dof_values); + + for (unsigned int f=0; fget_fe().dofs_per_cell; ++i) + fourier_coefficients[f] += + fourier_transform_matrices[cell->active_fe_index()](f,i) + * + local_dof_values(i); } - // With these so-computed sums, we can - // now evaluate the formula for $\mu$ - // derived in the introduction: - const double mu - = (1./(sum_1*sum_ln_k_square - sum_ln_k*sum_ln_k) - * - (sum_ln_k*sum_ln_U - sum_1*sum_ln_U_ln_k)); - - // The final step is to compute the - // Sobolev index $s=\mu-\frac d2$ and - // store it in the vector of estimated - // values for each cell: - smoothness_indicators(index) = mu - 1.*dim/2; - } + // The next thing, as explained in the + // introduction, is that we wanted to + // only fit our exponential decay of + // Fourier coefficients to the largest + // coefficients for each possible value + // of $|{\bf k}|$. To this end, we + // create a map that for each magnitude + // $|{\bf k}|$ stores the largest $|\hat + // U_{{\bf k}}|$ found so far, i.e. we + // overwrite the existing value (or add + // it to the map) if no value for the + // current $|{\bf k}|$ exists yet, or if + // the current value is larger than the + // previously stored one: + std::map k_to_max_U_map; + for (unsigned int f=0; f &smoothness_indicators) const // exceptions are thrown, thereby producing // meaningful output if anything should go // wrong: -int main () +int main () { try { + using namespace dealii; + using namespace Step27; + deallog.depth_console (0); LaplaceProblem<2> laplace_problem; @@ -1069,7 +1075,7 @@ int main () return 1; } - catch (...) + catch (...) { std::cerr << std::endl << std::endl << "----------------------------------------------------" diff --git a/deal.II/examples/step-28/step-28.cc b/deal.II/examples/step-28/step-28.cc index 7074671ccb..912b89c5d6 100644 --- a/deal.II/examples/step-28/step-28.cc +++ b/deal.II/examples/step-28/step-28.cc @@ -1,7 +1,7 @@ /* $Id$ */ /* Version: $Name: $ */ /* */ -/* Copyright (C) 2006, 2007, 2008, 2009, 2010 by the deal.II authors and Yaqi Wang */ +/* Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 by the deal.II authors and Yaqi Wang */ /* */ /* This file is subject to QPL and may not be distributed */ /* without copyright and license information. Please refer */ @@ -83,1793 +83,1795 @@ // The last step is as in all // previous programs: -using namespace dealii; +namespace Step28 +{ + using namespace dealii; - // @sect3{Material data} + // @sect3{Material data} - // First up, we need to define a - // class that provides material data - // (including diffusion coefficients, - // removal cross sections, scattering - // cross sections, fission cross - // sections and fission spectra) to - // the main class. - // - // The parameter to the constructor - // determines for how many energy - // groups we set up the relevant - // tables. At present, this program - // only includes data for 2 energy - // groups, but a more sophisticated - // program may be able to initialize - // the data structures for more - // groups as well, depending on how - // many energy groups are selected in - // the parameter file. - // - // For each of the different - // coefficient types, there is one - // function that returns the value of - // this coefficient for a particular - // energy group (or combination of - // energy groups, as for the - // distribution cross section - // $\chi_g\nu\Sigma_{f,g'}$ or - // scattering cross section - // $\Sigma_{s,g'\to g}$). In addition - // to the energy group or groups, - // these coefficients depend on the - // type of fuel or control rod, as - // explained in the introduction. The - // functions therefore take an - // additional parameter, @p - // material_id, that identifies the - // particular kind of rod. Within - // this program, we use - // n_materials=8 - // different kinds of rods. - // - // Except for the scattering cross - // section, each of the coefficients - // therefore can be represented as an - // entry in a two-dimensional array - // of floating point values indexed - // by the energy group number as well - // as the material ID. The Table - // class template is the ideal way to - // store such data. Finally, the - // scattering coefficient depends on - // both two energy group indices and - // therefore needs to be stored in a - // three-dimensional array, for which - // we again use the Table class, - // where this time the first template - // argument (denoting the - // dimensionality of the array) of - // course needs to be three: -class MaterialData -{ - public: - MaterialData (const unsigned int n_groups); - - double get_diffusion_coefficient (const unsigned int group, - const unsigned int material_id) const; - double get_removal_XS (const unsigned int group, - const unsigned int material_id) const; - double get_fission_XS (const unsigned int group, - const unsigned int material_id) const; - double get_fission_dist_XS (const unsigned int group_1, + // First up, we need to define a + // class that provides material data + // (including diffusion coefficients, + // removal cross sections, scattering + // cross sections, fission cross + // sections and fission spectra) to + // the main class. + // + // The parameter to the constructor + // determines for how many energy + // groups we set up the relevant + // tables. At present, this program + // only includes data for 2 energy + // groups, but a more sophisticated + // program may be able to initialize + // the data structures for more + // groups as well, depending on how + // many energy groups are selected in + // the parameter file. + // + // For each of the different + // coefficient types, there is one + // function that returns the value of + // this coefficient for a particular + // energy group (or combination of + // energy groups, as for the + // distribution cross section + // $\chi_g\nu\Sigma_{f,g'}$ or + // scattering cross section + // $\Sigma_{s,g'\to g}$). In addition + // to the energy group or groups, + // these coefficients depend on the + // type of fuel or control rod, as + // explained in the introduction. The + // functions therefore take an + // additional parameter, @p + // material_id, that identifies the + // particular kind of rod. Within + // this program, we use + // n_materials=8 + // different kinds of rods. + // + // Except for the scattering cross + // section, each of the coefficients + // therefore can be represented as an + // entry in a two-dimensional array + // of floating point values indexed + // by the energy group number as well + // as the material ID. The Table + // class template is the ideal way to + // store such data. Finally, the + // scattering coefficient depends on + // both two energy group indices and + // therefore needs to be stored in a + // three-dimensional array, for which + // we again use the Table class, + // where this time the first template + // argument (denoting the + // dimensionality of the array) of + // course needs to be three: + class MaterialData + { + public: + MaterialData (const unsigned int n_groups); + + double get_diffusion_coefficient (const unsigned int group, + const unsigned int material_id) const; + double get_removal_XS (const unsigned int group, + const unsigned int material_id) const; + double get_fission_XS (const unsigned int group, + const unsigned int material_id) const; + double get_fission_dist_XS (const unsigned int group_1, + const unsigned int group_2, + const unsigned int material_id) const; + double get_scattering_XS (const unsigned int group_1, const unsigned int group_2, const unsigned int material_id) const; - double get_scattering_XS (const unsigned int group_1, - const unsigned int group_2, - const unsigned int material_id) const; - double get_fission_spectrum (const unsigned int group, - const unsigned int material_id) const; - - private: - const unsigned int n_groups; - const unsigned int n_materials; - - Table<2,double> diffusion; - Table<2,double> sigma_r; - Table<2,double> nu_sigma_f; - Table<3,double> sigma_s; - Table<2,double> chi; -}; - - // The constructor of the class is - // used to initialize all the - // material data arrays. It takes the - // number of energy groups as an - // argument (an throws an error if - // that value is not equal to two, - // since at presently only data for - // two energy groups is implemented; - // however, using this, the function - // remains flexible and extendible - // into the future). In the member - // initialization part at the - // beginning, it also resizes the - // arrays to their correct sizes. - // - // At present, material data is - // stored for 8 different types of - // material. This, as well, may - // easily be extended in the future. -MaterialData::MaterialData (const unsigned int n_groups) - : - n_groups (n_groups), - n_materials (8), - diffusion (n_materials, n_groups), - sigma_r (n_materials, n_groups), - nu_sigma_f (n_materials, n_groups), - sigma_s (n_materials, n_groups, n_groups), - chi (n_materials, n_groups) -{ - switch (n_groups) - { - case 2: + double get_fission_spectrum (const unsigned int group, + const unsigned int material_id) const; + + private: + const unsigned int n_groups; + const unsigned int n_materials; + + Table<2,double> diffusion; + Table<2,double> sigma_r; + Table<2,double> nu_sigma_f; + Table<3,double> sigma_s; + Table<2,double> chi; + }; + + // The constructor of the class is + // used to initialize all the + // material data arrays. It takes the + // number of energy groups as an + // argument (an throws an error if + // that value is not equal to two, + // since at presently only data for + // two energy groups is implemented; + // however, using this, the function + // remains flexible and extendible + // into the future). In the member + // initialization part at the + // beginning, it also resizes the + // arrays to their correct sizes. + // + // At present, material data is + // stored for 8 different types of + // material. This, as well, may + // easily be extended in the future. + MaterialData::MaterialData (const unsigned int n_groups) + : + n_groups (n_groups), + n_materials (8), + diffusion (n_materials, n_groups), + sigma_r (n_materials, n_groups), + nu_sigma_f (n_materials, n_groups), + sigma_s (n_materials, n_groups, n_groups), + chi (n_materials, n_groups) + { + switch (n_groups) { - for (unsigned int m=0; mEnergyGroup class} + // The function computing the fission + // distribution cross section is + // slightly different, since it + // computes its value as the product + // of two other coefficients. We + // don't need to check arguments + // here, since this already happens + // when we call the two other + // functions involved, even though it + // would probably not hurt either: + double + MaterialData::get_fission_dist_XS (const unsigned int group_1, + const unsigned int group_2, + const unsigned int material_id) const + { + return (get_fission_spectrum(group_1, material_id) * + get_fission_XS(group_2, material_id)); + } - // The first interesting class is the - // one that contains everything that - // is specific to a single energy - // group. To group things that belong - // together into individual objects, - // we declare a structure that holds - // the Triangulation and DoFHandler - // objects for the mesh used for a - // single energy group, and a number - // of other objects and member - // functions that we will discuss in - // the following sections. - // - // The main reason for this class is - // as follows: for both the forward - // problem (with a specified right - // hand side) as well as for the - // eigenvalue problem, one typically - // solves a sequence of problems for - // a single energy group each, rather - // than the fully coupled - // problem. This becomes - // understandable once one realizes - // that the system matrix for a - // single energy group is symmetric - // and positive definite (it is - // simply a diffusion operator), - // whereas the matrix for the fully - // coupled problem is generally - // nonsymmetric and not definite. It - // is also very large and quite full - // if more than a few energy groups - // are involved. - // - // Let us first look at the equation - // to solve in the case of an - // external right hand side (for the time - // independent case): - // @f{eqnarray*} - // -\nabla \cdot(D_g(x) \nabla \phi_g(x)) - // + - // \Sigma_{r,g}(x)\phi_g(x) - // = - // \chi_g\sum_{g'=1}^G\nu\Sigma_{f,g'}(x)\phi_{g'}(x) - // + - // \sum_{g'\ne g}\Sigma_{s,g'\to g}(x)\phi_{g'}(x) - // + - // s_{\mathrm{ext},g}(x) - // @f} - // - // We would typically solve this - // equation by moving all the terms - // on the right hand side with $g'=g$ - // to the left hand side, and solving - // for $\phi_g$. Of course, we don't - // know $\phi_{g'}$ yet, since the - // equations for those variables - // include right hand side terms - // involving $\phi_g$. What one - // typically does in such situations - // is to iterate: compute - // @f{eqnarray*} - // -\nabla \cdot(D_g(x) \nabla \phi^{(n)}_g(x)) - // &+& - // \Sigma_{r,g}(x)\phi^{(n)}_g(x) - // \\ &=& - // \chi_g\sum_{g'=1}^{g-1}\nu\Sigma_{f,g'}(x)\phi^{(n)}_{g'}(x) - // + - // \chi_g\sum_{g'=g}^G\nu\Sigma_{f,g'}(x)\phi^{(n-1)}_{g'}(x) - // + - // \sum_{g'\ne g, g'g}\Sigma_{s,g'\to g}(x)\phi^{(n-1)}_{g'}(x) - // + - // s_{\mathrm{ext},g}(x) - // @f} - // - // In other words, we solve the - // equation one by one, using values - // for $\phi_{g'}$ from the previous - // iteration $n-1$ if $g'\ge g$ and - // already computed values for - // $\phi_{g'}$ from the present - // iteration if $g' -class EnergyGroup -{ - public: - // @sect5{Public member functions} - // - // The class has a good number of - // public member functions, since - // its the way it operates is - // controlled from the outside, - // and therefore all functions - // that do something significant - // need to be called from another - // class. Let's start off with - // book-keeping: the class - // obviously needs to know which - // energy group it represents, - // which material data to use, - // and from what coarse grid to - // start. The constructor takes - // this information and - // initializes the relevant - // member variables with that - // (see below). - // - // Then we also need functions - // that set up the linear system, - // i.e. correctly size the matrix - // and its sparsity pattern, etc, - // given a finite element object - // to use. The - // setup_linear_system - // function does that. Finally, - // for this initial block, there - // are two functions that return - // the number of active cells and - // degrees of freedom used in - // this object -- using this, we - // can make the triangulation and - // DoF handler member variables - // private, and do not have to - // grant external use to it, - // enhancing encapsulation: - EnergyGroup (const unsigned int group, - const MaterialData &material_data, - const Triangulation &coarse_grid, - const FiniteElement &fe); - - void setup_linear_system (); - - unsigned int n_active_cells () const; - unsigned int n_dofs () const; - - // Then there are functions that - // assemble the linear system for - // each iteration and the present - // energy group. Note that the - // matrix is independent of the - // iteration number, so only has - // to be computed once for each - // refinement cycle. The - // situation is a bit more - // involved for the right hand - // side that has to be updated in - // each inverse power iteration, - // and that is further - // complicated by the fact that - // computing it may involve - // several different meshes as - // explained in the - // introduction. To make things - // more flexible with regard to - // solving the forward or the - // eigenvalue problem, we split - // the computation of the right - // hand side into a function that - // assembles the extraneous - // source and in-group - // contributions (which we will - // call with a zero function as - // source terms for the - // eigenvalue problem) and one - // that computes contributions to - // the right hand side from - // another energy group: - void assemble_system_matrix (); - void assemble_ingroup_rhs (const Function &extraneous_source); - void assemble_cross_group_rhs (const EnergyGroup &g_prime); - - // Next we need a set of - // functions that actually - // compute the solution of a - // linear system, and do - // something with it (such as - // computing the fission source - // contribution mentioned in the - // introduction, writing - // graphical information to an - // output file, computing error - // indicators, or actually - // refining the grid based on - // these criteria and thresholds - // for refinement and - // coarsening). All these - // functions will later be called - // from the driver class - // NeutronDiffusionProblem, - // or any other class you may - // want to implement to solve a - // problem involving the neutron - // flux equations: - void solve (); - - double get_fission_source () const; - - void output_results (const unsigned int cycle) const; - - void estimate_errors (Vector &error_indicators) const; - - void refine_grid (const Vector &error_indicators, - const double refine_threshold, - const double coarsen_threshold); - - // @sect5{Public data members} - // - // As is good practice in object - // oriented programming, we hide - // most data members by making - // them private. However, we have - // to grant the class that drives - // the process access to the - // solution vector as well as the - // solution of the previous - // iteration, since in the power - // iteration, the solution vector - // is scaled in every iteration - // by the present guess of the - // eigenvalue we are looking for: - public: - - Vector solution; - Vector solution_old; - - - // @sect5{Private data members} - // - // The rest of the data members - // are private. Compared to all - // the previous tutorial - // programs, the only new data - // members are an integer storing - // which energy group this object - // represents, and a reference to - // the material data object that - // this object's constructor gets - // passed from the driver - // class. Likewise, the - // constructor gets a reference - // to the finite element object - // we are to use. - // - // Finally, we have to apply - // boundary values to the linear - // system in each iteration, - // i.e. quite frequently. Rather - // than interpolating them every - // time, we interpolate them once - // on each new mesh and then - // store them along with all the - // other data of this class: - private: - - const unsigned int group; - const MaterialData &material_data; - - Triangulation triangulation; - const FiniteElement &fe; - DoFHandler dof_handler; - - SparsityPattern sparsity_pattern; - SparseMatrix system_matrix; - - Vector system_rhs; - - std::map boundary_values; - ConstraintMatrix hanging_node_constraints; - - - // @sect5{Private member functionss} - // - // There is one private member - // function in this class. It - // recursively walks over cells - // of two meshes to compute the - // cross-group right hand side - // terms. The algorithm for this - // is explained in the - // introduction to this - // program. The arguments to this - // function are a reference to an - // object representing the energy - // group against which we want to - // integrate a right hand side - // term, an iterator to a cell of - // the mesh used for the present - // energy group, an iterator to a - // corresponding cell on the - // other mesh, and the matrix - // that interpolates the degrees - // of freedom from the coarser of - // the two cells to the finer - // one: - private: - - void - assemble_cross_group_rhs_recursive (const EnergyGroup &g_prime, - const typename DoFHandler::cell_iterator &cell_g, - const typename DoFHandler::cell_iterator &cell_g_prime, - const FullMatrix prolongation_matrix); -}; - - - // @sect4{Implementation of the EnergyGroup class} - - // The first few functions of this - // class are mostly - // self-explanatory. The constructor - // only sets a few data members and - // creates a copy of the given - // triangulation as the base for the - // triangulation used for this energy - // group. The next two functions - // simply return data from private - // data members, thereby enabling us - // to make these data members - // private. -template -EnergyGroup::EnergyGroup (const unsigned int group, - const MaterialData &material_data, - const Triangulation &coarse_grid, - const FiniteElement &fe) - : - group (group), - material_data (material_data), - fe (fe), - dof_handler (triangulation) -{ - triangulation.copy_triangulation (coarse_grid); - dof_handler.distribute_dofs (fe); -} + // @sect3{The EnergyGroup class} + // The first interesting class is the + // one that contains everything that + // is specific to a single energy + // group. To group things that belong + // together into individual objects, + // we declare a structure that holds + // the Triangulation and DoFHandler + // objects for the mesh used for a + // single energy group, and a number + // of other objects and member + // functions that we will discuss in + // the following sections. + // + // The main reason for this class is + // as follows: for both the forward + // problem (with a specified right + // hand side) as well as for the + // eigenvalue problem, one typically + // solves a sequence of problems for + // a single energy group each, rather + // than the fully coupled + // problem. This becomes + // understandable once one realizes + // that the system matrix for a + // single energy group is symmetric + // and positive definite (it is + // simply a diffusion operator), + // whereas the matrix for the fully + // coupled problem is generally + // nonsymmetric and not definite. It + // is also very large and quite full + // if more than a few energy groups + // are involved. + // + // Let us first look at the equation + // to solve in the case of an + // external right hand side (for the time + // independent case): + // @f{eqnarray*} + // -\nabla \cdot(D_g(x) \nabla \phi_g(x)) + // + + // \Sigma_{r,g}(x)\phi_g(x) + // = + // \chi_g\sum_{g'=1}^G\nu\Sigma_{f,g'}(x)\phi_{g'}(x) + // + + // \sum_{g'\ne g}\Sigma_{s,g'\to g}(x)\phi_{g'}(x) + // + + // s_{\mathrm{ext},g}(x) + // @f} + // + // We would typically solve this + // equation by moving all the terms + // on the right hand side with $g'=g$ + // to the left hand side, and solving + // for $\phi_g$. Of course, we don't + // know $\phi_{g'}$ yet, since the + // equations for those variables + // include right hand side terms + // involving $\phi_g$. What one + // typically does in such situations + // is to iterate: compute + // @f{eqnarray*} + // -\nabla \cdot(D_g(x) \nabla \phi^{(n)}_g(x)) + // &+& + // \Sigma_{r,g}(x)\phi^{(n)}_g(x) + // \\ &=& + // \chi_g\sum_{g'=1}^{g-1}\nu\Sigma_{f,g'}(x)\phi^{(n)}_{g'}(x) + // + + // \chi_g\sum_{g'=g}^G\nu\Sigma_{f,g'}(x)\phi^{(n-1)}_{g'}(x) + // + + // \sum_{g'\ne g, g'g}\Sigma_{s,g'\to g}(x)\phi^{(n-1)}_{g'}(x) + // + + // s_{\mathrm{ext},g}(x) + // @f} + // + // In other words, we solve the + // equation one by one, using values + // for $\phi_{g'}$ from the previous + // iteration $n-1$ if $g'\ge g$ and + // already computed values for + // $\phi_{g'}$ from the present + // iteration if $g' + class EnergyGroup + { + public: + + // @sect5{Public member functions} + // + // The class has a good number of + // public member functions, since + // its the way it operates is + // controlled from the outside, + // and therefore all functions + // that do something significant + // need to be called from another + // class. Let's start off with + // book-keeping: the class + // obviously needs to know which + // energy group it represents, + // which material data to use, + // and from what coarse grid to + // start. The constructor takes + // this information and + // initializes the relevant + // member variables with that + // (see below). + // + // Then we also need functions + // that set up the linear system, + // i.e. correctly size the matrix + // and its sparsity pattern, etc, + // given a finite element object + // to use. The + // setup_linear_system + // function does that. Finally, + // for this initial block, there + // are two functions that return + // the number of active cells and + // degrees of freedom used in + // this object -- using this, we + // can make the triangulation and + // DoF handler member variables + // private, and do not have to + // grant external use to it, + // enhancing encapsulation: + EnergyGroup (const unsigned int group, + const MaterialData &material_data, + const Triangulation &coarse_grid, + const FiniteElement &fe); + + void setup_linear_system (); + + unsigned int n_active_cells () const; + unsigned int n_dofs () const; + + // Then there are functions that + // assemble the linear system for + // each iteration and the present + // energy group. Note that the + // matrix is independent of the + // iteration number, so only has + // to be computed once for each + // refinement cycle. The + // situation is a bit more + // involved for the right hand + // side that has to be updated in + // each inverse power iteration, + // and that is further + // complicated by the fact that + // computing it may involve + // several different meshes as + // explained in the + // introduction. To make things + // more flexible with regard to + // solving the forward or the + // eigenvalue problem, we split + // the computation of the right + // hand side into a function that + // assembles the extraneous + // source and in-group + // contributions (which we will + // call with a zero function as + // source terms for the + // eigenvalue problem) and one + // that computes contributions to + // the right hand side from + // another energy group: + void assemble_system_matrix (); + void assemble_ingroup_rhs (const Function &extraneous_source); + void assemble_cross_group_rhs (const EnergyGroup &g_prime); + + // Next we need a set of + // functions that actually + // compute the solution of a + // linear system, and do + // something with it (such as + // computing the fission source + // contribution mentioned in the + // introduction, writing + // graphical information to an + // output file, computing error + // indicators, or actually + // refining the grid based on + // these criteria and thresholds + // for refinement and + // coarsening). All these + // functions will later be called + // from the driver class + // NeutronDiffusionProblem, + // or any other class you may + // want to implement to solve a + // problem involving the neutron + // flux equations: + void solve (); + + double get_fission_source () const; + + void output_results (const unsigned int cycle) const; + + void estimate_errors (Vector &error_indicators) const; + + void refine_grid (const Vector &error_indicators, + const double refine_threshold, + const double coarsen_threshold); + + // @sect5{Public data members} + // + // As is good practice in object + // oriented programming, we hide + // most data members by making + // them private. However, we have + // to grant the class that drives + // the process access to the + // solution vector as well as the + // solution of the previous + // iteration, since in the power + // iteration, the solution vector + // is scaled in every iteration + // by the present guess of the + // eigenvalue we are looking for: + public: + + Vector solution; + Vector solution_old; + + + // @sect5{Private data members} + // + // The rest of the data members + // are private. Compared to all + // the previous tutorial + // programs, the only new data + // members are an integer storing + // which energy group this object + // represents, and a reference to + // the material data object that + // this object's constructor gets + // passed from the driver + // class. Likewise, the + // constructor gets a reference + // to the finite element object + // we are to use. + // + // Finally, we have to apply + // boundary values to the linear + // system in each iteration, + // i.e. quite frequently. Rather + // than interpolating them every + // time, we interpolate them once + // on each new mesh and then + // store them along with all the + // other data of this class: + private: + + const unsigned int group; + const MaterialData &material_data; + + Triangulation triangulation; + const FiniteElement &fe; + DoFHandler dof_handler; + + SparsityPattern sparsity_pattern; + SparseMatrix system_matrix; + + Vector system_rhs; + + std::map boundary_values; + ConstraintMatrix hanging_node_constraints; + + + // @sect5{Private member functionss} + // + // There is one private member + // function in this class. It + // recursively walks over cells + // of two meshes to compute the + // cross-group right hand side + // terms. The algorithm for this + // is explained in the + // introduction to this + // program. The arguments to this + // function are a reference to an + // object representing the energy + // group against which we want to + // integrate a right hand side + // term, an iterator to a cell of + // the mesh used for the present + // energy group, an iterator to a + // corresponding cell on the + // other mesh, and the matrix + // that interpolates the degrees + // of freedom from the coarser of + // the two cells to the finer + // one: + private: + + void + assemble_cross_group_rhs_recursive (const EnergyGroup &g_prime, + const typename DoFHandler::cell_iterator &cell_g, + const typename DoFHandler::cell_iterator &cell_g_prime, + const FullMatrix prolongation_matrix); + }; + + + // @sect4{Implementation of the EnergyGroup class} + + // The first few functions of this + // class are mostly + // self-explanatory. The constructor + // only sets a few data members and + // creates a copy of the given + // triangulation as the base for the + // triangulation used for this energy + // group. The next two functions + // simply return data from private + // data members, thereby enabling us + // to make these data members + // private. + template + EnergyGroup::EnergyGroup (const unsigned int group, + const MaterialData &material_data, + const Triangulation &coarse_grid, + const FiniteElement &fe) + : + group (group), + material_data (material_data), + fe (fe), + dof_handler (triangulation) + { + triangulation.copy_triangulation (coarse_grid); + dof_handler.distribute_dofs (fe); + } -template -unsigned int -EnergyGroup::n_active_cells () const -{ - return triangulation.n_active_cells (); -} + template + unsigned int + EnergyGroup::n_active_cells () const + { + return triangulation.n_active_cells (); + } -template -unsigned int -EnergyGroup::n_dofs () const -{ - return dof_handler.n_dofs (); -} + template + unsigned int + EnergyGroup::n_dofs () const + { + return dof_handler.n_dofs (); + } - // @sect5{EnergyGroup::setup_linear_system} - // - // The first "real" function is the - // one that sets up the mesh, - // matrices, etc, on the new mesh or - // after mesh refinement. We use this - // function to initialize sparse - // system matrices, and the right - // hand side vector. If the solution - // vector has never been set before - // (as indicated by a zero size), we - // also initialize it and set it to a - // default value. We don't do that if - // it already has a non-zero size - // (i.e. this function is called - // after mesh refinement) since in - // that case we want to preserve the - // solution across mesh refinement - // (something we do in the - // EnergyGroup::refine_grid - // function). -template -void -EnergyGroup::setup_linear_system () -{ - const unsigned int n_dofs = dof_handler.n_dofs(); - hanging_node_constraints.clear (); - DoFTools::make_hanging_node_constraints (dof_handler, - hanging_node_constraints); - hanging_node_constraints.close (); - system_matrix.clear (); + // @sect5{EnergyGroup::setup_linear_system} + // + // The first "real" function is the + // one that sets up the mesh, + // matrices, etc, on the new mesh or + // after mesh refinement. We use this + // function to initialize sparse + // system matrices, and the right + // hand side vector. If the solution + // vector has never been set before + // (as indicated by a zero size), we + // also initialize it and set it to a + // default value. We don't do that if + // it already has a non-zero size + // (i.e. this function is called + // after mesh refinement) since in + // that case we want to preserve the + // solution across mesh refinement + // (something we do in the + // EnergyGroup::refine_grid + // function). + template + void + EnergyGroup::setup_linear_system () + { + const unsigned int n_dofs = dof_handler.n_dofs(); - sparsity_pattern.reinit (n_dofs, n_dofs, - dof_handler.max_couplings_between_dofs()); - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - hanging_node_constraints.condense (sparsity_pattern); - sparsity_pattern.compress (); + hanging_node_constraints.clear (); + DoFTools::make_hanging_node_constraints (dof_handler, + hanging_node_constraints); + hanging_node_constraints.close (); - system_matrix.reinit (sparsity_pattern); + system_matrix.clear (); - system_rhs.reinit (n_dofs); + sparsity_pattern.reinit (n_dofs, n_dofs, + dof_handler.max_couplings_between_dofs()); + DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); + hanging_node_constraints.condense (sparsity_pattern); + sparsity_pattern.compress (); - if (solution.size() == 0) - { - solution.reinit (n_dofs); - solution_old.reinit(n_dofs); - solution_old = 1.0; - solution = solution_old; - } + system_matrix.reinit (sparsity_pattern); + system_rhs.reinit (n_dofs); - // At the end of this function, we - // update the list of boundary - // nodes and their values, by first - // clearing this list and the - // re-interpolating boundary values - // (remember that this function is - // called after first setting up - // the mesh, and each time after - // mesh refinement). - // - // To understand the code, it is - // necessary to realize that we - // create the mesh using the - // GridGenerator::subdivided_hyper_rectangle - // function (in - // NeutronDiffusionProblem::initialize_problem) - // where we set the last parameter - // to true. This means that - // boundaries of the domain are - // "colored", i.e. the four (or - // six, in 3d) sides of the domain - // are assigned different boundary - // indicators. As it turns out, the - // bottom boundary gets indicator - // zero, the top one boundary - // indicator one, and left and - // right boundaries get indicators - // two and three, respectively. - // - // In this program, we simulate - // only one, namely the top right, - // quarter of a reactor. That is, - // we want to interpolate boundary - // conditions only on the top and - // right boundaries, while do - // nothing on the bottom and left - // boundaries (i.e. impose natural, - // no-flux Neumann boundary - // conditions). This is most easily - // generalized to arbitrary - // dimension by saying that we want - // to interpolate on those - // boundaries with indicators 1, 3, - // ..., which we do in the - // following loop (note that calls - // to - // VectorTools::interpolate_boundary_values - // are additive, i.e. they do not - // first clear the boundary value - // map): - boundary_values.clear(); - - for (unsigned int i=0; i(), - boundary_values); -} + if (solution.size() == 0) + { + solution.reinit (n_dofs); + solution_old.reinit(n_dofs); + solution_old = 1.0; + solution = solution_old; + } + // At the end of this function, we + // update the list of boundary + // nodes and their values, by first + // clearing this list and the + // re-interpolating boundary values + // (remember that this function is + // called after first setting up + // the mesh, and each time after + // mesh refinement). + // + // To understand the code, it is + // necessary to realize that we + // create the mesh using the + // GridGenerator::subdivided_hyper_rectangle + // function (in + // NeutronDiffusionProblem::initialize_problem) + // where we set the last parameter + // to true. This means that + // boundaries of the domain are + // "colored", i.e. the four (or + // six, in 3d) sides of the domain + // are assigned different boundary + // indicators. As it turns out, the + // bottom boundary gets indicator + // zero, the top one boundary + // indicator one, and left and + // right boundaries get indicators + // two and three, respectively. + // + // In this program, we simulate + // only one, namely the top right, + // quarter of a reactor. That is, + // we want to interpolate boundary + // conditions only on the top and + // right boundaries, while do + // nothing on the bottom and left + // boundaries (i.e. impose natural, + // no-flux Neumann boundary + // conditions). This is most easily + // generalized to arbitrary + // dimension by saying that we want + // to interpolate on those + // boundaries with indicators 1, 3, + // ..., which we do in the + // following loop (note that calls + // to + // VectorTools::interpolate_boundary_values + // are additive, i.e. they do not + // first clear the boundary value + // map): + boundary_values.clear(); + + for (unsigned int i=0; i(), + boundary_values); + } - // @sect5{EnergyGroup::assemble_system_matrix} - // - // Next we need functions assembling - // the system matrix and right hand - // sides. Assembling the matrix is - // straightforward given the - // equations outlined in the - // introduction as well as what we've - // seen in previous example - // programs. Note the use of - // cell->material_id() to get at - // the kind of material from which a - // cell is made up of. Note also how - // we set the order of the quadrature - // formula so that it is always - // appropriate for the finite element - // in use. - // - // Finally, note that since we only - // assemble the system matrix here, - // we can't yet eliminate boundary - // values (we need the right hand - // side vector for this). We defer - // this to the EnergyGroup::solve - // function, at which point all the - // information is available. -template -void -EnergyGroup::assemble_system_matrix () -{ - const QGauss quadrature_formula(fe.degree + 1); - FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_JxW_values); - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); + // @sect5{EnergyGroup::assemble_system_matrix} + // + // Next we need functions assembling + // the system matrix and right hand + // sides. Assembling the matrix is + // straightforward given the + // equations outlined in the + // introduction as well as what we've + // seen in previous example + // programs. Note the use of + // cell->material_id() to get at + // the kind of material from which a + // cell is made up of. Note also how + // we set the order of the quadrature + // formula so that it is always + // appropriate for the finite element + // in use. + // + // Finally, note that since we only + // assemble the system matrix here, + // we can't yet eliminate boundary + // values (we need the right hand + // side vector for this). We defer + // this to the EnergyGroup::solve + // function, at which point all the + // information is available. + template + void + EnergyGroup::assemble_system_matrix () + { + const QGauss quadrature_formula(fe.degree + 1); - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - Vector cell_rhs (dofs_per_cell); + FEValues fe_values (fe, quadrature_formula, + update_values | update_gradients | + update_JxW_values); - std::vector local_dof_indices (dofs_per_cell); + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); + FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + Vector cell_rhs (dofs_per_cell); - for (; cell!=endc; ++cell) - { - cell_matrix = 0; + std::vector local_dof_indices (dofs_per_cell); - fe_values.reinit (cell); + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); - const double diffusion_coefficient - = material_data.get_diffusion_coefficient (group, cell->material_id()); - const double removal_XS - = material_data.get_removal_XS (group,cell->material_id()); + for (; cell!=endc; ++cell) + { + cell_matrix = 0; + + fe_values.reinit (cell); + + const double diffusion_coefficient + = material_data.get_diffusion_coefficient (group, cell->material_id()); + const double removal_XS + = material_data.get_removal_XS (group,cell->material_id()); + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - - for (unsigned int i=0; iEnergyGroup::assemble_ingroup_rhs} - // - // As explained in the documentation - // of the EnergyGroup class, we - // split assembling the right hand - // side into two parts: the ingroup - // and the cross-group - // couplings. First, we need a - // function to assemble the right - // hand side of one specific group - // here, i.e. including an extraneous - // source (that we will set to zero - // for the eigenvalue problem) as - // well as the ingroup fission - // contributions. (In-group - // scattering has already been - // accounted for with the definition - // of removal cross section.) The - // function's workings are pretty - // standard as far as assembling - // right hand sides go, and therefore - // does not require more comments - // except that we mention that the - // right hand side vector is set to - // zero at the beginning of the - // function -- something we are not - // going to do for the cross-group - // terms that simply add to the right - // hand side vector. -template -void EnergyGroup::assemble_ingroup_rhs (const Function &extraneous_source) -{ - system_rhs.reinit (dof_handler.n_dofs()); + // @sect5{EnergyGroup::assemble_ingroup_rhs} + // + // As explained in the documentation + // of the EnergyGroup class, we + // split assembling the right hand + // side into two parts: the ingroup + // and the cross-group + // couplings. First, we need a + // function to assemble the right + // hand side of one specific group + // here, i.e. including an extraneous + // source (that we will set to zero + // for the eigenvalue problem) as + // well as the ingroup fission + // contributions. (In-group + // scattering has already been + // accounted for with the definition + // of removal cross section.) The + // function's workings are pretty + // standard as far as assembling + // right hand sides go, and therefore + // does not require more comments + // except that we mention that the + // right hand side vector is set to + // zero at the beginning of the + // function -- something we are not + // going to do for the cross-group + // terms that simply add to the right + // hand side vector. + template + void EnergyGroup::assemble_ingroup_rhs (const Function &extraneous_source) + { + system_rhs.reinit (dof_handler.n_dofs()); - const QGauss quadrature_formula (fe.degree + 1); + const QGauss quadrature_formula (fe.degree + 1); - const unsigned int dofs_per_cell = fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); - FEValues fe_values (fe, quadrature_formula, - update_values | update_quadrature_points | - update_JxW_values); + FEValues fe_values (fe, quadrature_formula, + update_values | update_quadrature_points | + update_JxW_values); - Vector cell_rhs (dofs_per_cell); - std::vector extraneous_source_values (n_q_points); - std::vector solution_old_values (n_q_points); + Vector cell_rhs (dofs_per_cell); + std::vector extraneous_source_values (n_q_points); + std::vector solution_old_values (n_q_points); - std::vector local_dof_indices (dofs_per_cell); + std::vector local_dof_indices (dofs_per_cell); - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - cell_rhs = 0; + for (; cell!=endc; ++cell) + { + cell_rhs = 0; + + fe_values.reinit (cell); - fe_values.reinit (cell); + const double fission_dist_XS + = material_data.get_fission_dist_XS (group, group, cell->material_id()); - const double fission_dist_XS - = material_data.get_fission_dist_XS (group, group, cell->material_id()); + extraneous_source.value_list (fe_values.get_quadrature_points(), + extraneous_source_values); - extraneous_source.value_list (fe_values.get_quadrature_points(), - extraneous_source_values); + fe_values.get_function_values (solution_old, solution_old_values); - fe_values.get_function_values (solution_old, solution_old_values); + cell->get_dof_indices (local_dof_indices); - cell->get_dof_indices (local_dof_indices); + for (unsigned int q_point=0; q_pointEnergyGroup::assemble_cross_group_rhs} - // - // The more interesting function for - // assembling the right hand side - // vector for the equation of a - // single energy group is the one - // that couples energy group $g$ and - // $g'$. As explained in the - // introduction, we first have to - // find the set of cells common to - // the meshes of the two energy - // groups. First we call - // get_finest_common_cells to - // obtain this list of pairs of - // common cells from both - // meshes. Both cells in a pair may - // not be active but at least one of - // them is. We then hand each of - // these cell pairs off to a function - // tha computes the right hand side - // terms recursively. - // - // Note that ingroup coupling is - // handled already before, so we exit - // the function early if $g=g'$. -template -void EnergyGroup::assemble_cross_group_rhs (const EnergyGroup &g_prime) -{ - if (group == g_prime.group) - return; + // @sect5{EnergyGroup::assemble_cross_group_rhs} + // + // The more interesting function for + // assembling the right hand side + // vector for the equation of a + // single energy group is the one + // that couples energy group $g$ and + // $g'$. As explained in the + // introduction, we first have to + // find the set of cells common to + // the meshes of the two energy + // groups. First we call + // get_finest_common_cells to + // obtain this list of pairs of + // common cells from both + // meshes. Both cells in a pair may + // not be active but at least one of + // them is. We then hand each of + // these cell pairs off to a function + // tha computes the right hand side + // terms recursively. + // + // Note that ingroup coupling is + // handled already before, so we exit + // the function early if $g=g'$. + template + void EnergyGroup::assemble_cross_group_rhs (const EnergyGroup &g_prime) + { + if (group == g_prime.group) + return; - const std::list::cell_iterator, - typename DoFHandler::cell_iterator> > - cell_list - = GridTools::get_finest_common_cells (dof_handler, - g_prime.dof_handler); + const std::list::cell_iterator, + typename DoFHandler::cell_iterator> > + cell_list + = GridTools::get_finest_common_cells (dof_handler, + g_prime.dof_handler); - typename std::list::cell_iterator, - typename DoFHandler::cell_iterator> > - ::const_iterator - cell_iter = cell_list.begin(); + typename std::list::cell_iterator, + typename DoFHandler::cell_iterator> > + ::const_iterator + cell_iter = cell_list.begin(); - for (; cell_iter!=cell_list.end(); ++cell_iter) - { - FullMatrix unit_matrix (fe.dofs_per_cell); - for (unsigned int i=0; ifirst, - cell_iter->second, - unit_matrix); - } -} + for (; cell_iter!=cell_list.end(); ++cell_iter) + { + FullMatrix unit_matrix (fe.dofs_per_cell); + for (unsigned int i=0; ifirst, + cell_iter->second, + unit_matrix); + } + } - // @sect5{EnergyGroup::assemble_cross_group_rhs_recursive} - // - // This is finally the function that - // handles assembling right hand side - // terms on potentially different - // meshes recursively, using the - // algorithm described in the - // introduction. The function takes a - // reference to the object - // representing energy group $g'$, as - // well as iterators to corresponding - // cells in the meshes for energy - // groups $g$ and $g'$. At first, - // i.e. when this function is called - // from the one above, these two - // cells will be matching cells on - // two meshes; however, one of the - // two may be further refined, and we - // will call the function recursively - // with one of the two iterators - // replaced by one of the children of - // the original cell. - // - // The last argument is the matrix - // product matrix $B_{c^{(k)}}^T - // \cdots B_{c'}^T B_c^T$ from the - // introduction that interpolates - // from the coarser of the two cells - // to the finer one. If the two cells - // match, then this is the identity - // matrix -- exactly what we pass to - // this function initially. - // - // The function has to consider two - // cases: that both of the two cells - // are not further refined, i.e. have - // no children, in which case we can - // finally assemble the right hand - // side contributions of this pair of - // cells; and that one of the two - // cells is further refined, in which - // case we have to keep recursing by - // looping over the children of the - // one cell that is not active. These - // two cases will be discussed below: -template -void -EnergyGroup:: -assemble_cross_group_rhs_recursive (const EnergyGroup &g_prime, - const typename DoFHandler::cell_iterator &cell_g, - const typename DoFHandler::cell_iterator &cell_g_prime, - const FullMatrix prolongation_matrix) -{ - // The first case is that both - // cells are no further refined. In - // that case, we can assemble the - // relevant terms (see the - // introduction). This involves - // assembling the mass matrix on - // the finer of the two cells (in - // fact there are two mass matrices - // with different coefficients, one - // for the fission distribution - // cross section - // $\chi_g\nu\Sigma_{f,g'}$ and one - // for the scattering cross section - // $\Sigma_{s,g'\to g}$). This is - // straight forward, but note how - // we determine which of the two - // cells is ther finer one by - // looking at the refinement level - // of the two cells: - if (!cell_g->has_children() && !cell_g_prime->has_children()) - { - const QGauss quadrature_formula (fe.degree+1); - const unsigned int n_q_points = quadrature_formula.size(); + // @sect5{EnergyGroup::assemble_cross_group_rhs_recursive} + // + // This is finally the function that + // handles assembling right hand side + // terms on potentially different + // meshes recursively, using the + // algorithm described in the + // introduction. The function takes a + // reference to the object + // representing energy group $g'$, as + // well as iterators to corresponding + // cells in the meshes for energy + // groups $g$ and $g'$. At first, + // i.e. when this function is called + // from the one above, these two + // cells will be matching cells on + // two meshes; however, one of the + // two may be further refined, and we + // will call the function recursively + // with one of the two iterators + // replaced by one of the children of + // the original cell. + // + // The last argument is the matrix + // product matrix $B_{c^{(k)}}^T + // \cdots B_{c'}^T B_c^T$ from the + // introduction that interpolates + // from the coarser of the two cells + // to the finer one. If the two cells + // match, then this is the identity + // matrix -- exactly what we pass to + // this function initially. + // + // The function has to consider two + // cases: that both of the two cells + // are not further refined, i.e. have + // no children, in which case we can + // finally assemble the right hand + // side contributions of this pair of + // cells; and that one of the two + // cells is further refined, in which + // case we have to keep recursing by + // looping over the children of the + // one cell that is not active. These + // two cases will be discussed below: + template + void + EnergyGroup:: + assemble_cross_group_rhs_recursive (const EnergyGroup &g_prime, + const typename DoFHandler::cell_iterator &cell_g, + const typename DoFHandler::cell_iterator &cell_g_prime, + const FullMatrix prolongation_matrix) + { + // The first case is that both + // cells are no further refined. In + // that case, we can assemble the + // relevant terms (see the + // introduction). This involves + // assembling the mass matrix on + // the finer of the two cells (in + // fact there are two mass matrices + // with different coefficients, one + // for the fission distribution + // cross section + // $\chi_g\nu\Sigma_{f,g'}$ and one + // for the scattering cross section + // $\Sigma_{s,g'\to g}$). This is + // straight forward, but note how + // we determine which of the two + // cells is ther finer one by + // looking at the refinement level + // of the two cells: + if (!cell_g->has_children() && !cell_g_prime->has_children()) + { + const QGauss quadrature_formula (fe.degree+1); + const unsigned int n_q_points = quadrature_formula.size(); - FEValues fe_values (fe, quadrature_formula, - update_values | update_JxW_values); + FEValues fe_values (fe, quadrature_formula, + update_values | update_JxW_values); - if (cell_g->level() > cell_g_prime->level()) - fe_values.reinit (cell_g); - else - fe_values.reinit (cell_g_prime); + if (cell_g->level() > cell_g_prime->level()) + fe_values.reinit (cell_g); + else + fe_values.reinit (cell_g_prime); + + const double fission_dist_XS + = material_data.get_fission_dist_XS (group, g_prime.group, + cell_g_prime->material_id()); + + const double scattering_XS + = material_data.get_scattering_XS (g_prime.group, group, + cell_g_prime->material_id()); + + FullMatrix local_mass_matrix_f (fe.dofs_per_cell, + fe.dofs_per_cell); + FullMatrix local_mass_matrix_g (fe.dofs_per_cell, + fe.dofs_per_cell); + + for (unsigned int q_point=0; q_pointvmult + // function, or the product with the + // transpose matrix using Tvmult. + // After doing so, we transfer the + // result into the global right hand + // side vector of energy group $g$. + Vector g_prime_new_values (fe.dofs_per_cell); + Vector g_prime_old_values (fe.dofs_per_cell); + cell_g_prime->get_dof_values (g_prime.solution_old, g_prime_old_values); + cell_g_prime->get_dof_values (g_prime.solution, g_prime_new_values); + + Vector cell_rhs (fe.dofs_per_cell); + Vector tmp (fe.dofs_per_cell); + + if (cell_g->level() > cell_g_prime->level()) + { + prolongation_matrix.vmult (tmp, g_prime_old_values); + local_mass_matrix_f.vmult (cell_rhs, tmp); - const double fission_dist_XS - = material_data.get_fission_dist_XS (group, g_prime.group, - cell_g_prime->material_id()); + prolongation_matrix.vmult (tmp, g_prime_new_values); + local_mass_matrix_g.vmult_add (cell_rhs, tmp); + } + else + { + local_mass_matrix_f.vmult (tmp, g_prime_old_values); + prolongation_matrix.Tvmult (cell_rhs, tmp); - const double scattering_XS - = material_data.get_scattering_XS (g_prime.group, group, - cell_g_prime->material_id()); + local_mass_matrix_g.vmult (tmp, g_prime_new_values); + prolongation_matrix.Tvmult_add (cell_rhs, tmp); + } - FullMatrix local_mass_matrix_f (fe.dofs_per_cell, - fe.dofs_per_cell); - FullMatrix local_mass_matrix_g (fe.dofs_per_cell, - fe.dofs_per_cell); + std::vector local_dof_indices (fe.dofs_per_cell); + cell_g->get_dof_indices (local_dof_indices); - for (unsigned int q_point=0; q_pointvmult - // function, or the product with the - // transpose matrix using Tvmult. - // After doing so, we transfer the - // result into the global right hand - // side vector of energy group $g$. - Vector g_prime_new_values (fe.dofs_per_cell); - Vector g_prime_old_values (fe.dofs_per_cell); - cell_g_prime->get_dof_values (g_prime.solution_old, g_prime_old_values); - cell_g_prime->get_dof_values (g_prime.solution, g_prime_new_values); - - Vector cell_rhs (fe.dofs_per_cell); - Vector tmp (fe.dofs_per_cell); - - if (cell_g->level() > cell_g_prime->level()) - { - prolongation_matrix.vmult (tmp, g_prime_old_values); - local_mass_matrix_f.vmult (cell_rhs, tmp); + system_rhs(local_dof_indices[i]) += cell_rhs(i); + } - prolongation_matrix.vmult (tmp, g_prime_new_values); - local_mass_matrix_g.vmult_add (cell_rhs, tmp); - } - else + // The alternative is that one of + // the two cells is further + // refined. In that case, we have + // to loop over all the children, + // multiply the existing + // interpolation (prolongation) + // product of matrices from the + // left with the interpolation from + // the present cell to its child + // (using the matrix-matrix + // multiplication function + // mmult), and then hand the + // result off to this very same + // function again, but with the + // cell that has children replaced + // by one of its children: + else + for (unsigned int child=0; child::max_children_per_cell;++child) { - local_mass_matrix_f.vmult (tmp, g_prime_old_values); - prolongation_matrix.Tvmult (cell_rhs, tmp); - - local_mass_matrix_g.vmult (tmp, g_prime_new_values); - prolongation_matrix.Tvmult_add (cell_rhs, tmp); + FullMatrix new_matrix (fe.dofs_per_cell, fe.dofs_per_cell); + fe.get_prolongation_matrix(child).mmult (new_matrix, + prolongation_matrix); + + if (cell_g->has_children()) + assemble_cross_group_rhs_recursive (g_prime, + cell_g->child(child), cell_g_prime, + new_matrix); + else + assemble_cross_group_rhs_recursive (g_prime, + cell_g, cell_g_prime->child(child), + new_matrix); } + } - std::vector local_dof_indices (fe.dofs_per_cell); - cell_g->get_dof_indices (local_dof_indices); - - for (unsigned int i=0; immult), and then hand the - // result off to this very same - // function again, but with the - // cell that has children replaced - // by one of its children: - else - for (unsigned int child=0; child::max_children_per_cell;++child) - { - FullMatrix new_matrix (fe.dofs_per_cell, fe.dofs_per_cell); - fe.get_prolongation_matrix(child).mmult (new_matrix, - prolongation_matrix); - - if (cell_g->has_children()) - assemble_cross_group_rhs_recursive (g_prime, - cell_g->child(child), cell_g_prime, - new_matrix); - else - assemble_cross_group_rhs_recursive (g_prime, - cell_g, cell_g_prime->child(child), - new_matrix); - } -} - - - // @sect5{EnergyGroup::get_fission_source} - // - // In the (inverse) power iteration, - // we use the integrated fission - // source to update the - // $k$-eigenvalue. Given its - // definition, the following function - // is essentially self-explanatory: -template -double EnergyGroup::get_fission_source () const -{ - const QGauss quadrature_formula (fe.degree + 1); - const unsigned int n_q_points = quadrature_formula.size(); - - FEValues fe_values (fe, quadrature_formula, - update_values | update_JxW_values); - - std::vector solution_values (n_q_points); - - double fission_source = 0; - - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - fe_values.reinit (cell); - - const double fission_XS - = material_data.get_fission_XS(group, cell->material_id()); - fe_values.get_function_values (solution, solution_values); + // @sect5{EnergyGroup::get_fission_source} + // + // In the (inverse) power iteration, + // we use the integrated fission + // source to update the + // $k$-eigenvalue. Given its + // definition, the following function + // is essentially self-explanatory: + template + double EnergyGroup::get_fission_source () const + { + const QGauss quadrature_formula (fe.degree + 1); + const unsigned int n_q_points = quadrature_formula.size(); - for (unsigned int q_point=0; q_point fe_values (fe, quadrature_formula, + update_values | update_JxW_values); - return fission_source; -} + std::vector solution_values (n_q_points); + double fission_source = 0; - // @sect5{EnergyGroup::solve} - // - // Next a function that solves the - // linear system assembled - // before. Things are pretty much - // standard, except that we delayed - // applying boundary values until we - // get here, since in all the - // previous functions we were still - // adding up contributions the right - // hand side vector. -template -void -EnergyGroup::solve () -{ - hanging_node_constraints.condense (system_rhs); - MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + { + fe_values.reinit (cell); - SolverControl solver_control (system_matrix.m(), - 1e-12*system_rhs.l2_norm()); - SolverCG<> cg (solver_control); + const double fission_XS + = material_data.get_fission_XS(group, cell->material_id()); - PreconditionSSOR<> preconditioner; - preconditioner.initialize(system_matrix, 1.2); + fe_values.get_function_values (solution, solution_values); - cg.solve (system_matrix, solution, system_rhs, preconditioner); + for (unsigned int q_point=0; q_pointEnergyGroup::solve} + // + // Next a function that solves the + // linear system assembled + // before. Things are pretty much + // standard, except that we delayed + // applying boundary values until we + // get here, since in all the + // previous functions we were still + // adding up contributions the right + // hand side vector. + template + void + EnergyGroup::solve () + { + hanging_node_constraints.condense (system_rhs); + MatrixTools::apply_boundary_values (boundary_values, + system_matrix, + solution, + system_rhs); - // @sect5{EnergyGroup::estimate_errors} - // - // Mesh refinement is split into two - // functions. The first estimates the - // error for each cell, normalizes it - // by the magnitude of the solution, - // and returns it in the vector given - // as an argument. The calling - // function collects all error - // indicators from all energy groups, - // and computes thresholds for - // refining and coarsening cells. -template -void EnergyGroup::estimate_errors (Vector &error_indicators) const -{ - KellyErrorEstimator::estimate (dof_handler, - QGauss (fe.degree + 1), - typename FunctionMap::type(), - solution, - error_indicators); - error_indicators /= solution.linfty_norm(); -} + SolverControl solver_control (system_matrix.m(), + 1e-12*system_rhs.l2_norm()); + SolverCG<> cg (solver_control); + PreconditionSSOR<> preconditioner; + preconditioner.initialize(system_matrix, 1.2); + cg.solve (system_matrix, solution, system_rhs, preconditioner); - // @sect5{EnergyGroup::refine_grid} - // - // The second part is to refine the - // grid given the error indicators - // compute in the previous function - // and error thresholds above which - // cells shall be refined or below - // which cells shall be - // coarsened. Note that we do not use - // any of the functions in - // GridRefinement here, - // but rather set refinement flags - // ourselves. - // - // After setting these flags, we use - // the SolutionTransfer class to move - // the solution vector from the old - // to the new mesh. The procedure - // used here is described in detail - // in the documentation of that - // class: -template -void EnergyGroup::refine_grid (const Vector &error_indicators, - const double refine_threshold, - const double coarsen_threshold) -{ - typename Triangulation::active_cell_iterator - cell = triangulation.begin_active(), - endc = triangulation.end(); + hanging_node_constraints.distribute (solution); + } - for (unsigned int cell_index=0; cell!=endc; ++cell, ++cell_index) - if (error_indicators(cell_index) > refine_threshold) - cell->set_refine_flag (); - else if (error_indicators(cell_index) < coarsen_threshold) - cell->set_coarsen_flag (); - SolutionTransfer soltrans(dof_handler); - triangulation.prepare_coarsening_and_refinement(); - soltrans.prepare_for_coarsening_and_refinement(solution); + // @sect5{EnergyGroup::estimate_errors} + // + // Mesh refinement is split into two + // functions. The first estimates the + // error for each cell, normalizes it + // by the magnitude of the solution, + // and returns it in the vector given + // as an argument. The calling + // function collects all error + // indicators from all energy groups, + // and computes thresholds for + // refining and coarsening cells. + template + void EnergyGroup::estimate_errors (Vector &error_indicators) const + { + KellyErrorEstimator::estimate (dof_handler, + QGauss (fe.degree + 1), + typename FunctionMap::type(), + solution, + error_indicators); + error_indicators /= solution.linfty_norm(); + } - triangulation.execute_coarsening_and_refinement (); - dof_handler.distribute_dofs (fe); - solution.reinit (dof_handler.n_dofs()); - soltrans.interpolate(solution_old, solution); - solution_old.reinit (dof_handler.n_dofs()); - solution_old = solution; -} + // @sect5{EnergyGroup::refine_grid} + // + // The second part is to refine the + // grid given the error indicators + // compute in the previous function + // and error thresholds above which + // cells shall be refined or below + // which cells shall be + // coarsened. Note that we do not use + // any of the functions in + // GridRefinement here, + // but rather set refinement flags + // ourselves. + // + // After setting these flags, we use + // the SolutionTransfer class to move + // the solution vector from the old + // to the new mesh. The procedure + // used here is described in detail + // in the documentation of that + // class: + template + void EnergyGroup::refine_grid (const Vector &error_indicators, + const double refine_threshold, + const double coarsen_threshold) + { + typename Triangulation::active_cell_iterator + cell = triangulation.begin_active(), + endc = triangulation.end(); + for (unsigned int cell_index=0; cell!=endc; ++cell, ++cell_index) + if (error_indicators(cell_index) > refine_threshold) + cell->set_refine_flag (); + else if (error_indicators(cell_index) < coarsen_threshold) + cell->set_coarsen_flag (); - // @sect5{EnergyGroup::output_results} - // - // The last function of this class - // outputs meshes and solutions after - // each mesh iteration. This has been - // shown many times before. The only - // thing worth pointing out is the - // use of the - // Utilities::int_to_string - // function to convert an integer - // into its string - // representation. The second - // argument of that function denotes - // how many digits we shall use -- if - // this value was larger than one, - // then the number would be padded by - // leading zeros. -template -void -EnergyGroup::output_results (const unsigned int cycle) const -{ - { - const std::string filename = std::string("grid-") + - Utilities::int_to_string(group,1) + - "." + - Utilities::int_to_string(cycle,1) + - ".eps"; - std::ofstream output (filename.c_str()); - - GridOut grid_out; - grid_out.write_eps (triangulation, output); - } + SolutionTransfer soltrans(dof_handler); - { - const std::string filename = std::string("solution-") + - Utilities::int_to_string(group,1) + - "." + - Utilities::int_to_string(cycle,1) + - ".gmv"; + triangulation.prepare_coarsening_and_refinement(); + soltrans.prepare_for_coarsening_and_refinement(solution); - DataOut data_out; + triangulation.execute_coarsening_and_refinement (); + dof_handler.distribute_dofs (fe); - data_out.attach_dof_handler (dof_handler); - data_out.add_data_vector (solution, "solution"); - data_out.build_patches (); + solution.reinit (dof_handler.n_dofs()); + soltrans.interpolate(solution_old, solution); - std::ofstream output (filename.c_str()); - data_out.write_gmv (output); + solution_old.reinit (dof_handler.n_dofs()); + solution_old = solution; } -} - - // @sect3{The NeutronDiffusionProblem class template} - - // This is the main class of the - // program, not because it implements - // all the functionality (in fact, - // most of it is implemented in the - // EnergyGroup class) - // but because it contains the - // driving algorithm that determines - // what to compute and when. It is - // mostly as shown in many of the - // other tutorial programs in that it - // has a public run - // function and private functions - // doing all the rest. In several - // places, we have to do something - // for all energy groups, in which - // case we will start threads for - // each group to let these things run - // in parallel if deal.II was - // configured for multithreading. - // For strategies of parallelization, - // take a look at the @ref threads module. - // - // The biggest difference to previous - // example programs is that we also - // declare a nested class that has - // member variables for all the - // run-time parameters that can be - // passed to the program in an input - // file. Right now, these are the - // number of energy groups, the - // number of refinement cycles, the - // polynomial degree of the finite - // element to be used, and the - // tolerance used to determine when - // convergence of the inverse power - // iteration has occurred. In - // addition, we have a constructor of - // this class that sets all these - // values to their default values, a - // function - // declare_parameters - // that described to the - // ParameterHandler class already - // used in step-19 - // what parameters are accepted in - // the input file, and a function - // get_parameters that - // can extract the values of these - // parameters from a ParameterHandler - // object. -template -class NeutronDiffusionProblem -{ - public: - class Parameters + // @sect5{EnergyGroup::output_results} + // + // The last function of this class + // outputs meshes and solutions after + // each mesh iteration. This has been + // shown many times before. The only + // thing worth pointing out is the + // use of the + // Utilities::int_to_string + // function to convert an integer + // into its string + // representation. The second + // argument of that function denotes + // how many digits we shall use -- if + // this value was larger than one, + // then the number would be padded by + // leading zeros. + template + void + EnergyGroup::output_results (const unsigned int cycle) const + { { - public: - Parameters (); + const std::string filename = std::string("grid-") + + Utilities::int_to_string(group,1) + + "." + + Utilities::int_to_string(cycle,1) + + ".eps"; + std::ofstream output (filename.c_str()); + + GridOut grid_out; + grid_out.write_eps (triangulation, output); + } - static void declare_parameters (ParameterHandler &prm); - void get_parameters (ParameterHandler &prm); + { + const std::string filename = std::string("solution-") + + Utilities::int_to_string(group,1) + + "." + + Utilities::int_to_string(cycle,1) + + ".gmv"; - unsigned int n_groups; - unsigned int n_refinement_cycles; + DataOut data_out; - unsigned int fe_degree; + data_out.attach_dof_handler (dof_handler); + data_out.add_data_vector (solution, "solution"); + data_out.build_patches (); - double convergence_tolerance; - }; + std::ofstream output (filename.c_str()); + data_out.write_gmv (output); + } + } - NeutronDiffusionProblem (const Parameters ¶meters); - ~NeutronDiffusionProblem (); - - void run (); - - private: - // @sect5{Private member functions} - - // There are not that many member - // functions in this class since - // most of the functionality has - // been moved into the - // EnergyGroup class - // and is simply called from the - // run() member - // function of this class. The - // ones that remain have - // self-explanatory names: - void initialize_problem(); - - void refine_grid (); - - double get_total_fission_source () const; - - - // @sect5{Private member variables} - - // Next, we have a few member - // variables. In particular, - // these are (i) a reference to - // the parameter object (owned by - // the main function of this - // program, and passed to the - // constructor of this class), - // (ii) an object describing the - // material parameters for the - // number of energy groups - // requested in the input file, - // and (iii) the finite element - // to be used by all energy - // groups: - const Parameters ¶meters; - const MaterialData material_data; - FE_Q fe; - - // Furthermore, we have (iv) the - // value of the computed - // eigenvalue at the present - // iteration. This is, in fact, - // the only part of the solution - // that is shared between all - // energy groups -- all other - // parts of the solution, such as - // neutron fluxes are particular - // to one or the other energy - // group, and are therefore - // stored in objects that - // describe a single energy - // group: - double k_eff; - - // Finally, (v), we have an array - // of pointers to the energy - // group objects. The length of - // this array is, of course, - // equal to the number of energy - // groups specified in the - // parameter file. - std::vector*> energy_groups; -}; - - - // @sect4{Implementation of the NeutronDiffusionProblem::Parameters class} - - // Before going on to the - // implementation of the outer class, - // we have to implement the functions - // of the parameters structure. This - // is pretty straightforward and, in - // fact, looks pretty much the same - // for all such parameters classes - // using the ParameterHandler - // capabilities. We will therefore - // not comment further on this: -template -NeutronDiffusionProblem::Parameters::Parameters () - : - n_groups (2), - n_refinement_cycles (5), - fe_degree (2), - convergence_tolerance (1e-12) -{} - - - -template -void -NeutronDiffusionProblem::Parameters:: -declare_parameters (ParameterHandler &prm) -{ - prm.declare_entry ("Number of energy groups", "2", - Patterns::Integer (), - "The number of energy different groups considered"); - prm.declare_entry ("Refinement cycles", "5", - Patterns::Integer (), - "Number of refinement cycles to be performed"); - prm.declare_entry ("Finite element degree", "2", - Patterns::Integer (), - "Polynomial degree of the finite element to be used"); - prm.declare_entry ("Power iteration tolerance", "1e-12", - Patterns::Double (), - "Inner power iterations are stopped when the change in k_eff falls " - "below this tolerance"); -} + // @sect3{The NeutronDiffusionProblem class template} + + // This is the main class of the + // program, not because it implements + // all the functionality (in fact, + // most of it is implemented in the + // EnergyGroup class) + // but because it contains the + // driving algorithm that determines + // what to compute and when. It is + // mostly as shown in many of the + // other tutorial programs in that it + // has a public run + // function and private functions + // doing all the rest. In several + // places, we have to do something + // for all energy groups, in which + // case we will start threads for + // each group to let these things run + // in parallel if deal.II was + // configured for multithreading. + // For strategies of parallelization, + // take a look at the @ref threads module. + // + // The biggest difference to previous + // example programs is that we also + // declare a nested class that has + // member variables for all the + // run-time parameters that can be + // passed to the program in an input + // file. Right now, these are the + // number of energy groups, the + // number of refinement cycles, the + // polynomial degree of the finite + // element to be used, and the + // tolerance used to determine when + // convergence of the inverse power + // iteration has occurred. In + // addition, we have a constructor of + // this class that sets all these + // values to their default values, a + // function + // declare_parameters + // that described to the + // ParameterHandler class already + // used in step-19 + // what parameters are accepted in + // the input file, and a function + // get_parameters that + // can extract the values of these + // parameters from a ParameterHandler + // object. + template + class NeutronDiffusionProblem + { + public: + class Parameters + { + public: + Parameters (); + + static void declare_parameters (ParameterHandler &prm); + void get_parameters (ParameterHandler &prm); + + unsigned int n_groups; + unsigned int n_refinement_cycles; + + unsigned int fe_degree; + + double convergence_tolerance; + }; + + + + NeutronDiffusionProblem (const Parameters ¶meters); + ~NeutronDiffusionProblem (); + + void run (); + + private: + // @sect5{Private member functions} + + // There are not that many member + // functions in this class since + // most of the functionality has + // been moved into the + // EnergyGroup class + // and is simply called from the + // run() member + // function of this class. The + // ones that remain have + // self-explanatory names: + void initialize_problem(); + + void refine_grid (); + + double get_total_fission_source () const; + + + // @sect5{Private member variables} + + // Next, we have a few member + // variables. In particular, + // these are (i) a reference to + // the parameter object (owned by + // the main function of this + // program, and passed to the + // constructor of this class), + // (ii) an object describing the + // material parameters for the + // number of energy groups + // requested in the input file, + // and (iii) the finite element + // to be used by all energy + // groups: + const Parameters ¶meters; + const MaterialData material_data; + FE_Q fe; + + // Furthermore, we have (iv) the + // value of the computed + // eigenvalue at the present + // iteration. This is, in fact, + // the only part of the solution + // that is shared between all + // energy groups -- all other + // parts of the solution, such as + // neutron fluxes are particular + // to one or the other energy + // group, and are therefore + // stored in objects that + // describe a single energy + // group: + double k_eff; + + // Finally, (v), we have an array + // of pointers to the energy + // group objects. The length of + // this array is, of course, + // equal to the number of energy + // groups specified in the + // parameter file. + std::vector*> energy_groups; + }; + + + // @sect4{Implementation of the NeutronDiffusionProblem::Parameters class} + + // Before going on to the + // implementation of the outer class, + // we have to implement the functions + // of the parameters structure. This + // is pretty straightforward and, in + // fact, looks pretty much the same + // for all such parameters classes + // using the ParameterHandler + // capabilities. We will therefore + // not comment further on this: + template + NeutronDiffusionProblem::Parameters::Parameters () + : + n_groups (2), + n_refinement_cycles (5), + fe_degree (2), + convergence_tolerance (1e-12) + {} + + + + template + void + NeutronDiffusionProblem::Parameters:: + declare_parameters (ParameterHandler &prm) + { + prm.declare_entry ("Number of energy groups", "2", + Patterns::Integer (), + "The number of energy different groups considered"); + prm.declare_entry ("Refinement cycles", "5", + Patterns::Integer (), + "Number of refinement cycles to be performed"); + prm.declare_entry ("Finite element degree", "2", + Patterns::Integer (), + "Polynomial degree of the finite element to be used"); + prm.declare_entry ("Power iteration tolerance", "1e-12", + Patterns::Double (), + "Inner power iterations are stopped when the change in k_eff falls " + "below this tolerance"); + } -template -void -NeutronDiffusionProblem::Parameters:: -get_parameters (ParameterHandler &prm) -{ - n_groups = prm.get_integer ("Number of energy groups"); - n_refinement_cycles = prm.get_integer ("Refinement cycles"); - fe_degree = prm.get_integer ("Finite element degree"); - convergence_tolerance = prm.get_double ("Power iteration tolerance"); -} + template + void + NeutronDiffusionProblem::Parameters:: + get_parameters (ParameterHandler &prm) + { + n_groups = prm.get_integer ("Number of energy groups"); + n_refinement_cycles = prm.get_integer ("Refinement cycles"); + fe_degree = prm.get_integer ("Finite element degree"); + convergence_tolerance = prm.get_double ("Power iteration tolerance"); + } - // @sect4{Implementation of the NeutronDiffusionProblem class} + // @sect4{Implementation of the NeutronDiffusionProblem class} - // Now for the - // NeutronDiffusionProblem - // class. The constructor and - // destructor have nothing of much - // interest: -template -NeutronDiffusionProblem:: -NeutronDiffusionProblem (const Parameters ¶meters) - : - parameters (parameters), - material_data (parameters.n_groups), - fe (parameters.fe_degree) -{} + // Now for the + // NeutronDiffusionProblem + // class. The constructor and + // destructor have nothing of much + // interest: + template + NeutronDiffusionProblem:: + NeutronDiffusionProblem (const Parameters ¶meters) + : + parameters (parameters), + material_data (parameters.n_groups), + fe (parameters.fe_degree) + {} -template -NeutronDiffusionProblem::~NeutronDiffusionProblem () -{ - for (unsigned int group=0; group + NeutronDiffusionProblem::~NeutronDiffusionProblem () + { + for (unsigned int group=0; groupNeutronDiffusionProblem::initialize_problem} - // - // The first function of interest is - // the one that sets up the geometry - // of the reactor core. This is - // described in more detail in the - // introduction. - // - // The first part of the function - // defines geometry data, and then - // creates a coarse mesh that has as - // many cells as there are fuel rods - // (or pin cells, for that matter) in - // that part of the reactor core that - // we simulate. As mentioned when - // interpolating boundary values - // above, the last parameter to the - // GridGenerator::subdivided_hyper_rectangle - // function specifies that sides of - // the domain shall have unique - // boundary indicators that will - // later allow us to determine in a - // simple way which of the boundaries - // have Neumann and which have - // Dirichlet conditions attached to - // them. -template -void NeutronDiffusionProblem::initialize_problem() -{ - const unsigned int rods_per_assembly_x = 17, - rods_per_assembly_y = 17; - const double pin_pitch_x = 1.26, - pin_pitch_y = 1.26; - const double assembly_height = 200; - - const unsigned int assemblies_x = 2, - assemblies_y = 2, - assemblies_z = 1; - - const Point bottom_left = Point(); - const Point upper_right = (dim == 2 - ? - Point (assemblies_x*rods_per_assembly_x*pin_pitch_x, - assemblies_y*rods_per_assembly_y*pin_pitch_y) - : - Point (assemblies_x*rods_per_assembly_x*pin_pitch_x, - assemblies_y*rods_per_assembly_y*pin_pitch_y, - assemblies_z*assembly_height)); - - std::vector n_subdivisions; - n_subdivisions.push_back (assemblies_x*rods_per_assembly_x); - if (dim >= 2) - n_subdivisions.push_back (assemblies_y*rods_per_assembly_y); - if (dim >= 3) - n_subdivisions.push_back (assemblies_z); - - Triangulation coarse_grid; - GridGenerator::subdivided_hyper_rectangle (coarse_grid, - n_subdivisions, - bottom_left, - upper_right, - true); - - - // The second part of the function - // deals with material numbers of - // pin cells of each type of - // assembly. Here, we define four - // different types of assembly, for - // which we describe the - // arrangement of fuel rods in the - // following tables. + // @sect5{NeutronDiffusionProblem::initialize_problem} // - // The assemblies described here - // are taken from the benchmark - // mentioned in the introduction - // and are (in this order): - //
    - //
  1. 'UX' Assembly: UO2 fuel assembly - // with 24 guide tubes and a central - // Moveable Fission Chamber - //
  2. 'UA' Assembly: UO2 fuel assembly - // with 24 AIC and a central - // Moveable Fission Chamber - //
  3. 'PX' Assembly: MOX fuel assembly - // with 24 guide tubes and a central - // Moveable Fission Chamber - //
  4. 'R' Assembly: a reflector. - //
+ // The first function of interest is + // the one that sets up the geometry + // of the reactor core. This is + // described in more detail in the + // introduction. // - // Note that the numbers listed - // here and taken from the - // benchmark description are, in - // good old Fortran fashion, - // one-based. We will later - // subtract one from each number - // when assigning materials to - // individual cells to convert - // things into the C-style - // zero-based indexing. - const unsigned int n_assemblies=4; - const unsigned int - assembly_materials[n_assemblies][rods_per_assembly_x][rods_per_assembly_y] - = { + // The first part of the function + // defines geometry data, and then + // creates a coarse mesh that has as + // many cells as there are fuel rods + // (or pin cells, for that matter) in + // that part of the reactor core that + // we simulate. As mentioned when + // interpolating boundary values + // above, the last parameter to the + // GridGenerator::subdivided_hyper_rectangle + // function specifies that sides of + // the domain shall have unique + // boundary indicators that will + // later allow us to determine in a + // simple way which of the boundaries + // have Neumann and which have + // Dirichlet conditions attached to + // them. + template + void NeutronDiffusionProblem::initialize_problem() + { + const unsigned int rods_per_assembly_x = 17, + rods_per_assembly_y = 17; + const double pin_pitch_x = 1.26, + pin_pitch_y = 1.26; + const double assembly_height = 200; + + const unsigned int assemblies_x = 2, + assemblies_y = 2, + assemblies_z = 1; + + const Point bottom_left = Point(); + const Point upper_right = (dim == 2 + ? + Point (assemblies_x*rods_per_assembly_x*pin_pitch_x, + assemblies_y*rods_per_assembly_y*pin_pitch_y) + : + Point (assemblies_x*rods_per_assembly_x*pin_pitch_x, + assemblies_y*rods_per_assembly_y*pin_pitch_y, + assemblies_z*assembly_height)); + + std::vector n_subdivisions; + n_subdivisions.push_back (assemblies_x*rods_per_assembly_x); + if (dim >= 2) + n_subdivisions.push_back (assemblies_y*rods_per_assembly_y); + if (dim >= 3) + n_subdivisions.push_back (assemblies_z); + + Triangulation coarse_grid; + GridGenerator::subdivided_hyper_rectangle (coarse_grid, + n_subdivisions, + bottom_left, + upper_right, + true); + + + // The second part of the function + // deals with material numbers of + // pin cells of each type of + // assembly. Here, we define four + // different types of assembly, for + // which we describe the + // arrangement of fuel rods in the + // following tables. + // + // The assemblies described here + // are taken from the benchmark + // mentioned in the introduction + // and are (in this order): + //
    + //
  1. 'UX' Assembly: UO2 fuel assembly + // with 24 guide tubes and a central + // Moveable Fission Chamber + //
  2. 'UA' Assembly: UO2 fuel assembly + // with 24 AIC and a central + // Moveable Fission Chamber + //
  3. 'PX' Assembly: MOX fuel assembly + // with 24 guide tubes and a central + // Moveable Fission Chamber + //
  4. 'R' Assembly: a reflector. + //
+ // + // Note that the numbers listed + // here and taken from the + // benchmark description are, in + // good old Fortran fashion, + // one-based. We will later + // subtract one from each number + // when assigning materials to + // individual cells to convert + // things into the C-style + // zero-based indexing. + const unsigned int n_assemblies=4; + const unsigned int + assembly_materials[n_assemblies][rods_per_assembly_x][rods_per_assembly_y] + = { { { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, @@ -1948,286 +1950,287 @@ void NeutronDiffusionProblem::initialize_problem() } }; - // After the description of the - // materials that make up an - // assembly, we have to specify the - // arrangement of assemblies within - // the core. We use a symmetric - // pattern that in fact only uses - // the 'UX' and 'PX' assemblies: - const unsigned int core[assemblies_x][assemblies_y][assemblies_z] - = {{{0}, {2}}, {{2}, {0}}}; - - // We are now in a position to - // actually set material IDs for - // each cell. To this end, we loop - // over all cells, look at the - // location of the cell's center, - // and determine which assembly and - // fuel rod this would be in. (We - // add a few checks to see that the - // locations we compute are within - // the bounds of the arrays in - // which we have to look up - // materials.) At the end of the - // loop, we set material - // identifiers accordingly: - for (typename Triangulation::active_cell_iterator - cell = coarse_grid.begin_active(); - cell!=coarse_grid.end(); - ++cell) - { - const Point cell_center = cell->center(); - - const unsigned int tmp_x = int(cell_center[0]/pin_pitch_x); - const unsigned int ax = tmp_x/rods_per_assembly_x; - const unsigned int cx = tmp_x - ax * rods_per_assembly_x; - - const unsigned tmp_y = int(cell_center[1]/pin_pitch_y); - const unsigned int ay = tmp_y/rods_per_assembly_y; - const unsigned int cy = tmp_y - ay * rods_per_assembly_y; - - const unsigned int az = (dim == 2 - ? - 0 - : - int (cell_center[dim-1]/assembly_height)); - - Assert (ax < assemblies_x, ExcInternalError()); - Assert (ay < assemblies_y, ExcInternalError()); - Assert (az < assemblies_z, ExcInternalError()); - - Assert (core[ax][ay][az] < n_assemblies, ExcInternalError()); - - Assert (cx < rods_per_assembly_x, ExcInternalError()); - Assert (cy < rods_per_assembly_y, ExcInternalError()); - - cell->set_material_id(assembly_materials[core[ax][ay][az]][cx][cy] - 1); - } - - // With the coarse mesh so - // initialized, we create the - // appropriate number of energy - // group objects and let them - // initialize their individual - // meshes with the coarse mesh - // generated above: - energy_groups.resize (parameters.n_groups); - for (unsigned int group=0; group (group, material_data, - coarse_grid, fe); -} - - - // @sect5{NeutronDiffusionProblem::get_total_fission_source} - // - // In the eigenvalue computation, we - // need to calculate total fission - // neutron source after each power - // iteration. The total power then is - // used to renew k-effective. - // - // Since the total fission source is a sum - // over all the energy groups, and since each - // of these sums can be computed - // independently, we actually do this in - // parallel. One of the problems is that the - // function in the EnergyGroup - // class that computes the fission source - // returns a value. If we now simply spin off - // a new thread, we have to later capture the - // return value of the function run on that - // thread. The way this can be done is to use - // the return value of the - // Threads::new_thread function, which - // returns an object of type - // Threads::Thread@ if the function - // spawned returns a double. We can then later - // ask this object for the returned value - // (when doing so, the - // Threads::Thread::return_value - // function first waits for the thread to - // finish if it hasn't done so already). - // - // The way this function then works - // is to first spawn one thread for - // each energy group we work with, - // then one-by-one collecting the - // returned values of each thread and - // return the sum. -template -double NeutronDiffusionProblem::get_total_fission_source () const -{ - std::vector > threads; - for (unsigned int group=0; group::get_fission_source, - *energy_groups[group])); + // After the description of the + // materials that make up an + // assembly, we have to specify the + // arrangement of assemblies within + // the core. We use a symmetric + // pattern that in fact only uses + // the 'UX' and 'PX' assemblies: + const unsigned int core[assemblies_x][assemblies_y][assemblies_z] + = {{{0}, {2}}, {{2}, {0}}}; + + // We are now in a position to + // actually set material IDs for + // each cell. To this end, we loop + // over all cells, look at the + // location of the cell's center, + // and determine which assembly and + // fuel rod this would be in. (We + // add a few checks to see that the + // locations we compute are within + // the bounds of the arrays in + // which we have to look up + // materials.) At the end of the + // loop, we set material + // identifiers accordingly: + for (typename Triangulation::active_cell_iterator + cell = coarse_grid.begin_active(); + cell!=coarse_grid.end(); + ++cell) + { + const Point cell_center = cell->center(); - double fission_source = 0; - for (unsigned int group=0; groupNeutronDiffusionProblem::refine_grid} - // - // The next function lets the - // individual energy group objects - // refine their meshes. Much of this, - // again, is a task that can be done - // independently in parallel: first, - // let all the energy group objects - // calculate their error indicators - // in parallel, then compute the - // maximum error indicator over all - // energy groups and determine - // thresholds for refinement and - // coarsening of cells, and then ask - // all the energy groups to refine - // their meshes accordingly, again in - // parallel. -template -void NeutronDiffusionProblem::refine_grid () -{ - std::vector n_cells (parameters.n_groups); - for (unsigned int group=0; groupn_active_cells(); + Assert (cx < rods_per_assembly_x, ExcInternalError()); + Assert (cy < rods_per_assembly_y, ExcInternalError()); - BlockVector group_error_indicators(n_cells); + cell->set_material_id(assembly_materials[core[ax][ay][az]][cx][cy] - 1); + } - { - Threads::ThreadGroup<> threads; + // With the coarse mesh so + // initialized, we create the + // appropriate number of energy + // group objects and let them + // initialize their individual + // meshes with the coarse mesh + // generated above: + energy_groups.resize (parameters.n_groups); for (unsigned int group=0; group::estimate_errors, - *energy_groups[group], - group_error_indicators.block(group)); - threads.join_all (); + energy_groups[group] = new EnergyGroup (group, material_data, + coarse_grid, fe); } - const float max_error = group_error_indicators.linfty_norm(); - const float refine_threshold = 0.3*max_error; - const float coarsen_threshold = 0.01*max_error; + // @sect5{NeutronDiffusionProblem::get_total_fission_source} + // + // In the eigenvalue computation, we + // need to calculate total fission + // neutron source after each power + // iteration. The total power then is + // used to renew k-effective. + // + // Since the total fission source is a sum + // over all the energy groups, and since each + // of these sums can be computed + // independently, we actually do this in + // parallel. One of the problems is that the + // function in the EnergyGroup + // class that computes the fission source + // returns a value. If we now simply spin off + // a new thread, we have to later capture the + // return value of the function run on that + // thread. The way this can be done is to use + // the return value of the + // Threads::new_thread function, which + // returns an object of type + // Threads::Thread@ if the function + // spawned returns a double. We can then later + // ask this object for the returned value + // (when doing so, the + // Threads::Thread::return_value + // function first waits for the thread to + // finish if it hasn't done so already). + // + // The way this function then works + // is to first spawn one thread for + // each energy group we work with, + // then one-by-one collecting the + // returned values of each thread and + // return the sum. + template + double NeutronDiffusionProblem::get_total_fission_source () const { - Threads::ThreadGroup<> threads; + std::vector > threads; for (unsigned int group=0; group::refine_grid, - *energy_groups[group], - group_error_indicators.block(group), - refine_threshold, - coarsen_threshold); - threads.join_all (); - } -} + threads.push_back (Threads::new_thread (&EnergyGroup::get_fission_source, + *energy_groups[group])); + double fission_source = 0; + for (unsigned int group=0; groupNeutronDiffusionProblem::run} - // - // Finally, this is the function - // where the meat is: iterate on a - // sequence of meshes, and on each of - // them do a power iteration to - // compute the eigenvalue. - // - // Given the description of the - // algorithm in the introduction, - // there is actually not much to - // comment on: -template -void NeutronDiffusionProblem::run () -{ - std::cout << std::setprecision (12) << std::fixed; + return fission_source; + } - double k_eff_old = k_eff; - Timer timer; - timer.start (); - for (unsigned int cycle=0; cyclesolution *= k_eff; - } + // @sect5{NeutronDiffusionProblem::refine_grid} + // + // The next function lets the + // individual energy group objects + // refine their meshes. Much of this, + // again, is a task that can be done + // independently in parallel: first, + // let all the energy group objects + // calculate their error indicators + // in parallel, then compute the + // maximum error indicator over all + // energy groups and determine + // thresholds for refinement and + // coarsening of cells, and then ask + // all the energy groups to refine + // their meshes accordingly, again in + // parallel. + template + void NeutronDiffusionProblem::refine_grid () + { + std::vector n_cells (parameters.n_groups); + for (unsigned int group=0; groupn_active_cells(); - for (unsigned int group=0; groupsetup_linear_system (); + BlockVector group_error_indicators(n_cells); - std::cout << " Numbers of active cells: "; - for (unsigned int group=0; groupn_active_cells() - << ' '; - std::cout << std::endl; - std::cout << " Numbers of degrees of freedom: "; + { + Threads::ThreadGroup<> threads; for (unsigned int group=0; groupn_dofs() - << ' '; - std::cout << std::endl << std::endl; + threads += Threads::new_thread (&EnergyGroup::estimate_errors, + *energy_groups[group], + group_error_indicators.block(group)); + threads.join_all (); + } + const float max_error = group_error_indicators.linfty_norm(); + const float refine_threshold = 0.3*max_error; + const float coarsen_threshold = 0.01*max_error; + { Threads::ThreadGroup<> threads; for (unsigned int group=0; group::assemble_system_matrix, - *energy_groups[group]); + threads += Threads::new_thread (&EnergyGroup::refine_grid, + *energy_groups[group], + group_error_indicators.block(group), + refine_threshold, + coarsen_threshold); threads.join_all (); + } + } - double error; - unsigned int iteration = 1; - do - { - for (unsigned int group=0; groupassemble_ingroup_rhs (ZeroFunction()); - for (unsigned int bgroup=0; bgroupassemble_cross_group_rhs (*energy_groups[bgroup]); + // @sect5{NeutronDiffusionProblem::run} + // + // Finally, this is the function + // where the meat is: iterate on a + // sequence of meshes, and on each of + // them do a power iteration to + // compute the eigenvalue. + // + // Given the description of the + // algorithm in the introduction, + // there is actually not much to + // comment on: + template + void NeutronDiffusionProblem::run () + { + std::cout << std::setprecision (12) << std::fixed; + + double k_eff_old = k_eff; - energy_groups[group]->solve (); - } + Timer timer; + timer.start (); - k_eff = get_total_fission_source(); - error = fabs(k_eff-k_eff_old)/fabs(k_eff); - std::cout << " Iteration " << iteration - << ": k_eff=" << k_eff - << std::endl; - k_eff_old=k_eff; + for (unsigned int cycle=0; cyclesolution_old = energy_groups[group]->solution; - energy_groups[group]->solution_old /= k_eff; - } + if (cycle == 0) + initialize_problem(); + else + { + refine_grid (); + for (unsigned int group=0; groupsolution *= k_eff; + } - ++iteration; - } - while((error > parameters.convergence_tolerance) - && - (iteration < 500)); + for (unsigned int group=0; groupsetup_linear_system (); + + std::cout << " Numbers of active cells: "; + for (unsigned int group=0; groupn_active_cells() + << ' '; + std::cout << std::endl; + std::cout << " Numbers of degrees of freedom: "; + for (unsigned int group=0; groupn_dofs() + << ' '; + std::cout << std::endl << std::endl; + + + Threads::ThreadGroup<> threads; + for (unsigned int group=0; group::assemble_system_matrix, + *energy_groups[group]); + threads.join_all (); + + double error; + unsigned int iteration = 1; + do + { + for (unsigned int group=0; groupassemble_ingroup_rhs (ZeroFunction()); + + for (unsigned int bgroup=0; bgroupassemble_cross_group_rhs (*energy_groups[bgroup]); + + energy_groups[group]->solve (); + } + + k_eff = get_total_fission_source(); + error = fabs(k_eff-k_eff_old)/fabs(k_eff); + std::cout << " Iteration " << iteration + << ": k_eff=" << k_eff + << std::endl; + k_eff_old=k_eff; + + for (unsigned int group=0; groupsolution_old = energy_groups[group]->solution; + energy_groups[group]->solution_old /= k_eff; + } + + ++iteration; + } + while((error > parameters.convergence_tolerance) + && + (iteration < 500)); - for (unsigned int group=0; groupoutput_results (cycle); + for (unsigned int group=0; groupoutput_results (cycle); - std::cout << std::endl; - std::cout << " Cycle=" << cycle - << ", n_dofs=" << energy_groups[0]->n_dofs() + energy_groups[1]->n_dofs() - << ", k_eff=" << k_eff - << ", time=" << timer() - << std::endl; + std::cout << std::endl; + std::cout << " Cycle=" << cycle + << ", n_dofs=" << energy_groups[0]->n_dofs() + energy_groups[1]->n_dofs() + << ", k_eff=" << k_eff + << ", time=" << timer() + << std::endl; - std::cout << std::endl << std::endl; - } + std::cout << std::endl << std::endl; + } + } } @@ -2265,10 +2268,11 @@ void NeutronDiffusionProblem::run () // for computation of the eigenvalue: int main (int argc, char ** argv) { - const unsigned int dim = 2; - try { + using namespace dealii; + using namespace Step28; + deallog.depth_console (0); std::string filename; @@ -2278,6 +2282,8 @@ int main (int argc, char ** argv) filename = argv[1]; + const unsigned int dim = 2; + ParameterHandler parameter_handler; NeutronDiffusionProblem::Parameters parameters; diff --git a/deal.II/examples/step-29/step-29.cc b/deal.II/examples/step-29/step-29.cc index c866b3dee2..bfaf1adca6 100644 --- a/deal.II/examples/step-29/step-29.cc +++ b/deal.II/examples/step-29/step-29.cc @@ -2,7 +2,7 @@ /* Author: Moritz Allmaras, Texas A&M University, 2007 */ /* */ -/* Copyright (C) 2007, 2008, 2010 by the deal.II authors and M. Allmaras */ +/* Copyright (C) 2007, 2008, 2010, 2011 by the deal.II authors and M. Allmaras */ /* */ /* This file is subject to QPL and may not be distributed */ /* without copyright and license information. Please refer */ @@ -13,7 +13,7 @@ // @sect3{Include files} - // The following header files are unchanged + // The following header files are unchanged // from step-7 and have been discussed before: #include @@ -80,1349 +80,1352 @@ // program takes: #include - // As the last step at the beginning - // of this program, we make - // everything that is in the deal.II - // namespace globally available, - // without the need to prefix - // everything with + // As the last step at the beginning of this + // program, we put everything that is in this + // program into its namespace and, within it, + // make everything that is in the deal.II + // namespace globally available, without the + // need to prefix everything with // dealii::: -using namespace dealii; - - - // @sect3{The DirichletBoundaryValues class} - - // First we define a class for the - // function representing the - // Dirichlet boundary values. This - // has been done many times before - // and therefore does not need much - // explanation. - // - // Since there are two values $v$ and - // $w$ that need to be prescribed at - // the boundary, we have to tell the - // base class that this is a - // vector-valued function with two - // components, and the - // vector_value function - // and its cousin - // vector_value_list must - // return vectors with two entries. In - // our case the function is very - // simple, it just returns 1 for the - // real part $v$ and 0 for the - // imaginary part $w$ regardless of - // the point where it is evaluated. -template -class DirichletBoundaryValues : public Function +namespace Step29 { - public: - DirichletBoundaryValues() : Function (2) {}; + using namespace dealii; + + + // @sect3{The DirichletBoundaryValues class} + + // First we define a class for the + // function representing the + // Dirichlet boundary values. This + // has been done many times before + // and therefore does not need much + // explanation. + // + // Since there are two values $v$ and + // $w$ that need to be prescribed at + // the boundary, we have to tell the + // base class that this is a + // vector-valued function with two + // components, and the + // vector_value function + // and its cousin + // vector_value_list must + // return vectors with two entries. In + // our case the function is very + // simple, it just returns 1 for the + // real part $v$ and 0 for the + // imaginary part $w$ regardless of + // the point where it is evaluated. + template + class DirichletBoundaryValues : public Function + { + public: + DirichletBoundaryValues() : Function (2) {}; - virtual void vector_value (const Point &p, - Vector &values) const; + virtual void vector_value (const Point &p, + Vector &values) const; - virtual void vector_value_list (const std::vector > &points, - std::vector > &value_list) const; -}; + virtual void vector_value_list (const std::vector > &points, + std::vector > &value_list) const; + }; -template -inline -void DirichletBoundaryValues::vector_value (const Point &/*p*/, - Vector &values) const -{ - Assert (values.size() == 2, ExcDimensionMismatch (values.size(), 2)); + template + inline + void DirichletBoundaryValues::vector_value (const Point &/*p*/, + Vector &values) const + { + Assert (values.size() == 2, ExcDimensionMismatch (values.size(), 2)); - values(0) = 1; - values(1) = 0; -} + values(0) = 1; + values(1) = 0; + } -template -void DirichletBoundaryValues::vector_value_list (const std::vector > &points, - std::vector > &value_list) const -{ - Assert (value_list.size() == points.size(), - ExcDimensionMismatch (value_list.size(), points.size())); + template + void DirichletBoundaryValues::vector_value_list (const std::vector > &points, + std::vector > &value_list) const + { + Assert (value_list.size() == points.size(), + ExcDimensionMismatch (value_list.size(), points.size())); - for (unsigned int p=0; p::vector_value (points[p], value_list[p]); -} + for (unsigned int p=0; p::vector_value (points[p], value_list[p]); + } - // @sect3{The ParameterReader class} - - // The next class is responsible for - // preparing the ParameterHandler - // object and reading parameters from - // an input file. It includes a - // function - // declare_parameters - // that declares all the necessary - // parameters and a - // read_parameters - // function that is called from - // outside to initiate the parameter - // reading process. -class ParameterReader : public Subscriptor -{ - public: - ParameterReader(ParameterHandler &); - void read_parameters(const std::string); - - private: - void declare_parameters(); - ParameterHandler &prm; -}; - - // The constructor stores a reference to - // the ParameterHandler object that is passed to it: -ParameterReader::ParameterReader(ParameterHandler ¶mhandler) - : - prm(paramhandler) -{} - - // @sect4{ParameterReader::declare_parameters} - - // The declare_parameters - // function declares all the - // parameters that our - // ParameterHandler object will be - // able to read from input files, - // along with their types, range - // conditions and the subsections they - // appear in. We will wrap all the - // entries that go into a section in a - // pair of braces to force the editor - // to indent them by one level, making - // it simpler to read which entries - // together form a section: -void ParameterReader::declare_parameters() -{ - // Parameters for mesh and geometry - // include the number of global - // refinement steps that are applied - // to the initial coarse mesh and the - // focal distance $d$ of the - // transducer lens. For the number of - // refinement steps, we allow integer - // values in the range $[0,\infty)$, - // where the omitted second argument - // to the Patterns::Integer object - // denotes the half-open interval. - // For the focal distance any number - // greater than zero is accepted: - prm.enter_subsection ("Mesh & geometry parameters"); + // @sect3{The ParameterReader class} + + // The next class is responsible for + // preparing the ParameterHandler + // object and reading parameters from + // an input file. It includes a + // function + // declare_parameters + // that declares all the necessary + // parameters and a + // read_parameters + // function that is called from + // outside to initiate the parameter + // reading process. + class ParameterReader : public Subscriptor { - prm.declare_entry("Number of refinements", "6", - Patterns::Integer(0), - "Number of global mesh refinement steps " - "applied to initial coarse grid"); - - prm.declare_entry("Focal distance", "0.3", - Patterns::Double(0), - "Distance of the focal point of the lens " - "to the x-axis"); - } - prm.leave_subsection (); - - // The next subsection is devoted to - // the physical parameters appearing - // in the equation, which are the - // frequency $\omega$ and wave speed - // $c$. Again, both need to lie in the - // half-open interval $[0,\infty)$ - // represented by calling the - // Patterns::Double class with only - // the left end-point as argument: - prm.enter_subsection ("Physical constants"); + public: + ParameterReader(ParameterHandler &); + void read_parameters(const std::string); + + private: + void declare_parameters(); + ParameterHandler &prm; + }; + + // The constructor stores a reference to + // the ParameterHandler object that is passed to it: + ParameterReader::ParameterReader(ParameterHandler ¶mhandler) + : + prm(paramhandler) + {} + + // @sect4{ParameterReader::declare_parameters} + + // The declare_parameters + // function declares all the + // parameters that our + // ParameterHandler object will be + // able to read from input files, + // along with their types, range + // conditions and the subsections they + // appear in. We will wrap all the + // entries that go into a section in a + // pair of braces to force the editor + // to indent them by one level, making + // it simpler to read which entries + // together form a section: + void ParameterReader::declare_parameters() { - prm.declare_entry("c", "1.5e5", - Patterns::Double(0), - "Wave speed"); + // Parameters for mesh and geometry + // include the number of global + // refinement steps that are applied + // to the initial coarse mesh and the + // focal distance $d$ of the + // transducer lens. For the number of + // refinement steps, we allow integer + // values in the range $[0,\infty)$, + // where the omitted second argument + // to the Patterns::Integer object + // denotes the half-open interval. + // For the focal distance any number + // greater than zero is accepted: + prm.enter_subsection ("Mesh & geometry parameters"); + { + prm.declare_entry("Number of refinements", "6", + Patterns::Integer(0), + "Number of global mesh refinement steps " + "applied to initial coarse grid"); + + prm.declare_entry("Focal distance", "0.3", + Patterns::Double(0), + "Distance of the focal point of the lens " + "to the x-axis"); + } + prm.leave_subsection (); + + // The next subsection is devoted to + // the physical parameters appearing + // in the equation, which are the + // frequency $\omega$ and wave speed + // $c$. Again, both need to lie in the + // half-open interval $[0,\infty)$ + // represented by calling the + // Patterns::Double class with only + // the left end-point as argument: + prm.enter_subsection ("Physical constants"); + { + prm.declare_entry("c", "1.5e5", + Patterns::Double(0), + "Wave speed"); - prm.declare_entry("omega", "5.0e7", - Patterns::Double(0), - "Frequency"); - } - prm.leave_subsection (); + prm.declare_entry("omega", "5.0e7", + Patterns::Double(0), + "Frequency"); + } + prm.leave_subsection (); - // Last but not least we would like - // to be able to change some - // properties of the output, like - // filename and format, through - // entries in the configuration - // file, which is the purpose of - // the last subsection: - prm.enter_subsection ("Output parameters"); - { - prm.declare_entry("Output file", "solution", - Patterns::Anything(), - "Name of the output file (without extension)"); - - // Since different output formats - // may require different - // parameters for generating - // output (like for example, - // postscript output needs - // viewpoint angles, line widths, - // colors etc), it would be - // cumbersome if we had to - // declare all these parameters - // by hand for every possible - // output format supported in the - // library. Instead, each output - // format has a - // FormatFlags::declare_parameters - // function, which declares all - // the parameters specific to - // that format in an own - // subsection. The following call - // of - // DataOutInterface<1>::declare_parameters - // executes - // declare_parameters - // for all available output - // formats, so that for each - // format an own subsection will - // be created with parameters - // declared for that particular - // output format. (The actual - // value of the template - // parameter in the call, - // @<1@> above, does - // not matter here: the function - // does the same work independent - // of the dimension, but happens - // to be in a - // template-parameter-dependent - // class.) To find out what - // parameters there are for which - // output format, you can either - // consult the documentation of - // the DataOutBase class, or - // simply run this program - // without a parameter file - // present. It will then create a - // file with all declared - // parameters set to their - // default values, which can - // conveniently serve as a - // starting point for setting the - // parameters to the values you - // desire. - DataOutInterface<1>::declare_parameters (prm); + // Last but not least we would like + // to be able to change some + // properties of the output, like + // filename and format, through + // entries in the configuration + // file, which is the purpose of + // the last subsection: + prm.enter_subsection ("Output parameters"); + { + prm.declare_entry("Output file", "solution", + Patterns::Anything(), + "Name of the output file (without extension)"); + + // Since different output formats + // may require different + // parameters for generating + // output (like for example, + // postscript output needs + // viewpoint angles, line widths, + // colors etc), it would be + // cumbersome if we had to + // declare all these parameters + // by hand for every possible + // output format supported in the + // library. Instead, each output + // format has a + // FormatFlags::declare_parameters + // function, which declares all + // the parameters specific to + // that format in an own + // subsection. The following call + // of + // DataOutInterface<1>::declare_parameters + // executes + // declare_parameters + // for all available output + // formats, so that for each + // format an own subsection will + // be created with parameters + // declared for that particular + // output format. (The actual + // value of the template + // parameter in the call, + // @<1@> above, does + // not matter here: the function + // does the same work independent + // of the dimension, but happens + // to be in a + // template-parameter-dependent + // class.) To find out what + // parameters there are for which + // output format, you can either + // consult the documentation of + // the DataOutBase class, or + // simply run this program + // without a parameter file + // present. It will then create a + // file with all declared + // parameters set to their + // default values, which can + // conveniently serve as a + // starting point for setting the + // parameters to the values you + // desire. + DataOutInterface<1>::declare_parameters (prm); + } + prm.leave_subsection (); } - prm.leave_subsection (); -} - - // @sect4{ParameterReader::read_parameters} - - // This is the main function in the - // ParameterReader class. It gets - // called from outside, first - // declares all the parameters, and - // then reads them from the input - // file whose filename is provided by - // the caller. After the call to this - // function is complete, the - // prm object can be - // used to retrieve the values of the - // parameters read in from the file: -void ParameterReader::read_parameters (const std::string parameter_file) -{ - declare_parameters(); - prm.read_input (parameter_file); -} + // @sect4{ParameterReader::read_parameters} + + // This is the main function in the + // ParameterReader class. It gets + // called from outside, first + // declares all the parameters, and + // then reads them from the input + // file whose filename is provided by + // the caller. After the call to this + // function is complete, the + // prm object can be + // used to retrieve the values of the + // parameters read in from the file: + void ParameterReader::read_parameters (const std::string parameter_file) + { + declare_parameters(); + prm.read_input (parameter_file); + } - // @sect3{The ComputeIntensity class} - - // As mentioned in the introduction, - // the quantitiy that we are really - // after is the spatial distribution - // of the intensity of the ultrasound - // wave, which corresponds to - // $|u|=\sqrt{v^2+w^2}$. Now we could - // just be content with having $v$ - // and $w$ in our output, and use a - // suitable visualization or - // postprocessing tool to derive - // $|u|$ from the solution we - // computed. However, there is also a - // way to output data derived from - // the solution in deal.II, and we - // are going to make use of this - // mechanism here. - - // So far we have always used the - // DataOut::add_data_vector function - // to add vectors containing output - // data to a DataOut object. There - // is a special version of this - // function that in addition to the - // data vector has an additional - // argument of type - // DataPostprocessor. What happens - // when this function is used for - // output is that at each point where - // output data is to be generated, - // the compute_derived_quantities - // function of the specified - // DataPostprocessor object is - // invoked to compute the output - // quantities from the values, the - // gradients and the second - // derivatives of the finite element - // function represented by the data - // vector (in the case of face - // related data, normal vectors are - // available as well). Hence, this - // allows us to output any quantity - // that can locally be derived from - // the values of the solution and its - // derivatives. Of course, the - // ultrasound intensity $|u|$ is such - // a quantity and its computation - // doesn't even involve any - // derivatives of $v$ or $w$. - - // In practice, the DataPostprocessor - // class only provides an interface - // to this functionality, and we need - // to derive our own class from it in - // order to implement the functions - // specified by the interface. This - // is what the - // ComputeIntensity - // class is about. Notice that all - // its member functions are - // implementations of virtual - // functions defined by the interface - // class DataPostprocessor. -template -class ComputeIntensity : public DataPostprocessor -{ - public: - - virtual - void - compute_derived_quantities_vector (const std::vector< Vector< double > > &uh, - const std::vector< std::vector< Tensor< 1, dim > > > &duh, - const std::vector< std::vector< Tensor< 2, dim > > > &dduh, - const std::vector< Point< dim > > &normals, - const std::vector > &evaluation_points, - std::vector< Vector< double > > &computed_quantities) const; - - virtual std::vector get_names () const; - virtual UpdateFlags get_needed_update_flags () const; - virtual unsigned int n_output_variables () const; -}; - - // The get_names - // function returns a vector of - // strings representing the names we - // assign to the individual - // quantities that our postprocessor - // outputs. In our case, the - // postprocessor has only $|u|$ as an - // output, so we return a vector with - // a single component named - // "Intensity": -template -std::vector -ComputeIntensity::get_names() const -{ - return std::vector (1, "Intensity"); -} - // The next function returns a set of - // flags that indicate which data is - // needed by the postprocessor in - // order to compute the output - // quantities. This can be any - // subset of update_values, - // update_gradients and - // update_hessians (and, in the case - // of face data, also - // update_normal_vectors), which are - // documented in UpdateFlags. Of - // course, computation of the - // derivatives requires additional - // resources, so only the flags for - // data that is really needed should - // be given here, just as we do when - // we use FEValues objects. In our - // case, only the function values of - // $v$ and $w$ are needed to compute - // $|u|$, so we're good with the - // update_values flag. -template -UpdateFlags -ComputeIntensity::get_needed_update_flags () const -{ - return update_values; -} + // @sect3{The ComputeIntensity class} + + // As mentioned in the introduction, + // the quantitiy that we are really + // after is the spatial distribution + // of the intensity of the ultrasound + // wave, which corresponds to + // $|u|=\sqrt{v^2+w^2}$. Now we could + // just be content with having $v$ + // and $w$ in our output, and use a + // suitable visualization or + // postprocessing tool to derive + // $|u|$ from the solution we + // computed. However, there is also a + // way to output data derived from + // the solution in deal.II, and we + // are going to make use of this + // mechanism here. + + // So far we have always used the + // DataOut::add_data_vector function + // to add vectors containing output + // data to a DataOut object. There + // is a special version of this + // function that in addition to the + // data vector has an additional + // argument of type + // DataPostprocessor. What happens + // when this function is used for + // output is that at each point where + // output data is to be generated, + // the compute_derived_quantities + // function of the specified + // DataPostprocessor object is + // invoked to compute the output + // quantities from the values, the + // gradients and the second + // derivatives of the finite element + // function represented by the data + // vector (in the case of face + // related data, normal vectors are + // available as well). Hence, this + // allows us to output any quantity + // that can locally be derived from + // the values of the solution and its + // derivatives. Of course, the + // ultrasound intensity $|u|$ is such + // a quantity and its computation + // doesn't even involve any + // derivatives of $v$ or $w$. + + // In practice, the DataPostprocessor + // class only provides an interface + // to this functionality, and we need + // to derive our own class from it in + // order to implement the functions + // specified by the interface. This + // is what the + // ComputeIntensity + // class is about. Notice that all + // its member functions are + // implementations of virtual + // functions defined by the interface + // class DataPostprocessor. + template + class ComputeIntensity : public DataPostprocessor + { + public: + + virtual + void + compute_derived_quantities_vector (const std::vector< Vector< double > > &uh, + const std::vector< std::vector< Tensor< 1, dim > > > &duh, + const std::vector< std::vector< Tensor< 2, dim > > > &dduh, + const std::vector< Point< dim > > &normals, + const std::vector > &evaluation_points, + std::vector< Vector< double > > &computed_quantities) const; + + virtual std::vector get_names () const; + virtual UpdateFlags get_needed_update_flags () const; + virtual unsigned int n_output_variables () const; + }; + + // The get_names + // function returns a vector of + // strings representing the names we + // assign to the individual + // quantities that our postprocessor + // outputs. In our case, the + // postprocessor has only $|u|$ as an + // output, so we return a vector with + // a single component named + // "Intensity": + template + std::vector + ComputeIntensity::get_names() const + { + return std::vector (1, "Intensity"); + } - // To allow the caller to find out - // how many derived quantities are - // returned by the postprocessor, the - // n_output_variables - // function is used. Since we compute - // only $|u|$, the correct value to - // return in our case is just 1: -template -unsigned int -ComputeIntensity::n_output_variables () const -{ - return 1; -} + // The next function returns a set of + // flags that indicate which data is + // needed by the postprocessor in + // order to compute the output + // quantities. This can be any + // subset of update_values, + // update_gradients and + // update_hessians (and, in the case + // of face data, also + // update_normal_vectors), which are + // documented in UpdateFlags. Of + // course, computation of the + // derivatives requires additional + // resources, so only the flags for + // data that is really needed should + // be given here, just as we do when + // we use FEValues objects. In our + // case, only the function values of + // $v$ and $w$ are needed to compute + // $|u|$, so we're good with the + // update_values flag. + template + UpdateFlags + ComputeIntensity::get_needed_update_flags () const + { + return update_values; + } + // To allow the caller to find out + // how many derived quantities are + // returned by the postprocessor, the + // n_output_variables + // function is used. Since we compute + // only $|u|$, the correct value to + // return in our case is just 1: + template + unsigned int + ComputeIntensity::n_output_variables () const + { + return 1; + } - // The actual prostprocessing happens - // in the following function. Its - // inputs are a vector representing - // values of the function (which is - // here vector-valued) representing - // the data vector given to - // DataOut::add_data_vector, - // evaluated at all quadrature points - // where we generate output, and some - // tensor objects representing - // derivatives (that we don't use - // here since $|u|$ is computed from - // just $v$ and $w$, and for which we - // assign no name to the - // corresponding function argument). - // The derived quantities are - // returned in the - // computed_quantities - // vector. Remember that this - // function may only use data for - // which the respective update flag - // is specified by - // get_needed_update_flags. For - // example, we may not use the - // derivatives here, since our - // implementation of - // get_needed_update_flags - // requests that only function values - // are provided. -template -void -ComputeIntensity::compute_derived_quantities_vector ( - const std::vector< Vector< double > > & uh, - const std::vector< std::vector< Tensor< 1, dim > > > & /*duh*/, - const std::vector< std::vector< Tensor< 2, dim > > > & /*dduh*/, - const std::vector< Point< dim > > & /*normals*/, - const std::vector > & /*evaluation_points*/, - std::vector< Vector< double > > & computed_quantities -) const -{ - Assert(computed_quantities.size() == uh.size(), - ExcDimensionMismatch (computed_quantities.size(), uh.size())); - - // The computation itself is - // straightforward: We iterate over - // each entry in the output vector - // and compute $|u|$ from the - // corresponding values of $v$ and - // $w$: - for (unsigned int i=0; icomputed_quantities + // vector. Remember that this + // function may only use data for + // which the respective update flag + // is specified by + // get_needed_update_flags. For + // example, we may not use the + // derivatives here, since our + // implementation of + // get_needed_update_flags + // requests that only function values + // are provided. + template + void + ComputeIntensity::compute_derived_quantities_vector ( + const std::vector< Vector< double > > & uh, + const std::vector< std::vector< Tensor< 1, dim > > > & /*duh*/, + const std::vector< std::vector< Tensor< 2, dim > > > & /*dduh*/, + const std::vector< Point< dim > > & /*normals*/, + const std::vector > & /*evaluation_points*/, + std::vector< Vector< double > > & computed_quantities + ) const + { + Assert(computed_quantities.size() == uh.size(), + ExcDimensionMismatch (computed_quantities.size(), uh.size())); + + // The computation itself is + // straightforward: We iterate over + // each entry in the output vector + // and compute $|u|$ from the + // corresponding values of $v$ and + // $w$: + for (unsigned int i=0; iUltrasoundProblem class} - - // Finally here is the main class of - // this program. It's member - // functions are very similar to the - // previous examples, in particular - // step-4, and the list of member - // variables does not contain any - // major surprises either. The - // ParameterHandler object that is - // passed to the constructor is - // stored as a reference to allow - // easy access to the parameters from - // all functions of the class. Since - // we are working with vector valued - // finite elements, the FE object we - // are using is of type FESystem. -template -class UltrasoundProblem -{ - public: - UltrasoundProblem (ParameterHandler &); - ~UltrasoundProblem (); - void run (); - - private: - void make_grid (); - void setup_system (); - void assemble_system (); - void solve (); - void output_results () const; - - ParameterHandler &prm; - - Triangulation triangulation; - DoFHandler dof_handler; - FESystem fe; - - SparsityPattern sparsity_pattern; - SparseMatrix system_matrix; - Vector solution, system_rhs; -}; - - - - // The constructor takes the - // ParameterHandler object and stores - // it in a reference. It also - // initializes the DoF-Handler and - // the finite element system, which - // consists of two copies of the - // scalar Q1 field, one for $v$ and - // one for $w$: -template -UltrasoundProblem::UltrasoundProblem (ParameterHandler& param) - : - prm(param), - dof_handler(triangulation), - fe(FE_Q(1), 2) -{} - - -template -UltrasoundProblem::~UltrasoundProblem () -{ - dof_handler.clear(); -} + // @sect3{The UltrasoundProblem class} + + // Finally here is the main class of + // this program. It's member + // functions are very similar to the + // previous examples, in particular + // step-4, and the list of member + // variables does not contain any + // major surprises either. The + // ParameterHandler object that is + // passed to the constructor is + // stored as a reference to allow + // easy access to the parameters from + // all functions of the class. Since + // we are working with vector valued + // finite elements, the FE object we + // are using is of type FESystem. + template + class UltrasoundProblem + { + public: + UltrasoundProblem (ParameterHandler &); + ~UltrasoundProblem (); + void run (); + + private: + void make_grid (); + void setup_system (); + void assemble_system (); + void solve (); + void output_results () const; + + ParameterHandler &prm; + + Triangulation triangulation; + DoFHandler dof_handler; + FESystem fe; + + SparsityPattern sparsity_pattern; + SparseMatrix system_matrix; + Vector solution, system_rhs; + }; + + + + // The constructor takes the + // ParameterHandler object and stores + // it in a reference. It also + // initializes the DoF-Handler and + // the finite element system, which + // consists of two copies of the + // scalar Q1 field, one for $v$ and + // one for $w$: + template + UltrasoundProblem::UltrasoundProblem (ParameterHandler& param) + : + prm(param), + dof_handler(triangulation), + fe(FE_Q(1), 2) + {} + + + template + UltrasoundProblem::~UltrasoundProblem () + { + dof_handler.clear(); + } - // @sect4{UltrasoundProblem::make_grid} - - // Here we setup the grid for our - // domain. As mentioned in the - // exposition, the geometry is just a - // unit square (in 2d) with the part - // of the boundary that represents - // the transducer lens replaced by a - // sector of a circle. -template -void UltrasoundProblem::make_grid () -{ - // First we generate some logging - // output and start a timer so we - // can compute execution time when - // this function is done: - deallog << "Generating grid... "; - Timer timer; - timer.start (); - - // Then we query the values for the - // focal distance of the transducer - // lens and the number of mesh - // refinement steps from our - // ParameterHandler object: - prm.enter_subsection ("Mesh & geometry parameters"); - - const double focal_distance = prm.get_double("Focal distance"); - const unsigned int n_refinements = prm.get_integer("Number of refinements"); - - prm.leave_subsection (); - - // Next, two points are defined for - // position and focal point of the - // transducer lens, which is the - // center of the circle whose - // segment will form the transducer - // part of the boundary. We compute - // the radius of this circle in - // such a way that the segment fits - // in the interval [0.4,0.6] on the - // x-axis. Notice that this is the - // only point in the program where - // things are slightly different in - // 2D and 3D. Even though this - // tutorial only deals with the 2D - // case, the necessary additions to - // make this program functional in - // 3D are so minimal that we opt - // for including them: - const Point transducer = (dim == 2) ? + // @sect4{UltrasoundProblem::make_grid} + + // Here we setup the grid for our + // domain. As mentioned in the + // exposition, the geometry is just a + // unit square (in 2d) with the part + // of the boundary that represents + // the transducer lens replaced by a + // sector of a circle. + template + void UltrasoundProblem::make_grid () + { + // First we generate some logging + // output and start a timer so we + // can compute execution time when + // this function is done: + deallog << "Generating grid... "; + Timer timer; + timer.start (); + + // Then we query the values for the + // focal distance of the transducer + // lens and the number of mesh + // refinement steps from our + // ParameterHandler object: + prm.enter_subsection ("Mesh & geometry parameters"); + + const double focal_distance = prm.get_double("Focal distance"); + const unsigned int n_refinements = prm.get_integer("Number of refinements"); + + prm.leave_subsection (); + + // Next, two points are defined for + // position and focal point of the + // transducer lens, which is the + // center of the circle whose + // segment will form the transducer + // part of the boundary. We compute + // the radius of this circle in + // such a way that the segment fits + // in the interval [0.4,0.6] on the + // x-axis. Notice that this is the + // only point in the program where + // things are slightly different in + // 2D and 3D. Even though this + // tutorial only deals with the 2D + // case, the necessary additions to + // make this program functional in + // 3D are so minimal that we opt + // for including them: + const Point transducer = (dim == 2) ? Point (0.5, 0.0) : - Point (0.5, 0.5, 0.0), + Point (0.5, 0.5, 0.0), focal_point = (dim == 2) ? Point (0.5, focal_distance) : Point (0.5, 0.5, focal_distance); - const double radius = std::sqrt( (focal_point.distance(transducer) * - focal_point.distance(transducer)) + - ((dim==2) ? 0.01 : 0.02)); - - - // As initial coarse grid we take a - // simple unit square with 5 - // subdivisions in each - // direction. The number of - // subdivisions is chosen so that - // the line segment $[0.4,0.6]$ - // that we want to designate as the - // transducer boundary is spanned - // by a single face. Then we step - // through all cells to find the - // faces where the transducer is to - // be located, which in fact is - // just the single edge from 0.4 to - // 0.6 on the x-axis. This is where - // we want the refinements to be - // made according to a circle - // shaped boundary, so we mark this - // edge with a different boundary - // indicator. - GridGenerator::subdivided_hyper_cube (triangulation, 5, 0, 1); - - typename Triangulation::cell_iterator - cell = triangulation.begin (), - endc = triangulation.end(); - - for (; cell!=endc; ++cell) - for (unsigned int face=0; face::faces_per_cell; ++face) - if ( cell->face(face)->at_boundary() && - ((cell->face(face)->center() - transducer).square() < 0.01) ) - - cell->face(face)->set_boundary_indicator (1); - - // For the circle part of the - // transducer lens, a hyper-ball - // object is used (which, of course, - // in 2D just represents a circle), - // with radius and center as computed - // above. By marking this object as - // static, we ensure that - // it lives until the end of the - // program and thereby longer than the - // triangulation object we will - // associated with it. We then assign - // this boundary-object to the part of - // the boundary with boundary - // indicator 1: - static const HyperBallBoundary boundary(focal_point, radius); - triangulation.set_boundary(1, boundary); - - // Now global refinement is - // executed. Cells near the - // transducer location will be - // automatically refined according - // to the circle shaped boundary of - // the transducer lens: - triangulation.refine_global (n_refinements); - - // Lastly, we generate some more - // logging output. We stop the - // timer and query the number of - // CPU seconds elapsed since the - // beginning of the function: - timer.stop (); - deallog << "done (" - << timer() - << "s)" - << std::endl; - - deallog << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl; -} - - - // @sect4{UltrasoundProblem::setup_system} - // - // Initialization of the system - // matrix, sparsity patterns and - // vectors are the same as in - // previous examples and therefore do - // not need further comment. As in - // the previous function, we also - // output the run time of what we do - // here: -template -void UltrasoundProblem::setup_system () -{ - deallog << "Setting up system... "; - Timer timer; - timer.start(); + const double radius = std::sqrt( (focal_point.distance(transducer) * + focal_point.distance(transducer)) + + ((dim==2) ? 0.01 : 0.02)); + + + // As initial coarse grid we take a + // simple unit square with 5 + // subdivisions in each + // direction. The number of + // subdivisions is chosen so that + // the line segment $[0.4,0.6]$ + // that we want to designate as the + // transducer boundary is spanned + // by a single face. Then we step + // through all cells to find the + // faces where the transducer is to + // be located, which in fact is + // just the single edge from 0.4 to + // 0.6 on the x-axis. This is where + // we want the refinements to be + // made according to a circle + // shaped boundary, so we mark this + // edge with a different boundary + // indicator. + GridGenerator::subdivided_hyper_cube (triangulation, 5, 0, 1); + + typename Triangulation::cell_iterator + cell = triangulation.begin (), + endc = triangulation.end(); + + for (; cell!=endc; ++cell) + for (unsigned int face=0; face::faces_per_cell; ++face) + if ( cell->face(face)->at_boundary() && + ((cell->face(face)->center() - transducer).square() < 0.01) ) + + cell->face(face)->set_boundary_indicator (1); + + // For the circle part of the + // transducer lens, a hyper-ball + // object is used (which, of course, + // in 2D just represents a circle), + // with radius and center as computed + // above. By marking this object as + // static, we ensure that + // it lives until the end of the + // program and thereby longer than the + // triangulation object we will + // associated with it. We then assign + // this boundary-object to the part of + // the boundary with boundary + // indicator 1: + static const HyperBallBoundary boundary(focal_point, radius); + triangulation.set_boundary(1, boundary); + + // Now global refinement is + // executed. Cells near the + // transducer location will be + // automatically refined according + // to the circle shaped boundary of + // the transducer lens: + triangulation.refine_global (n_refinements); + + // Lastly, we generate some more + // logging output. We stop the + // timer and query the number of + // CPU seconds elapsed since the + // beginning of the function: + timer.stop (); + deallog << "done (" + << timer() + << "s)" + << std::endl; + + deallog << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl; + } - dof_handler.distribute_dofs (fe); - sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); + // @sect4{UltrasoundProblem::setup_system} + // + // Initialization of the system + // matrix, sparsity patterns and + // vectors are the same as in + // previous examples and therefore do + // not need further comment. As in + // the previous function, we also + // output the run time of what we do + // here: + template + void UltrasoundProblem::setup_system () + { + deallog << "Setting up system... "; + Timer timer; + timer.start(); - DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - sparsity_pattern.compress(); + dof_handler.distribute_dofs (fe); - system_matrix.reinit (sparsity_pattern); - system_rhs.reinit (dof_handler.n_dofs()); - solution.reinit (dof_handler.n_dofs()); + sparsity_pattern.reinit (dof_handler.n_dofs(), + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); - timer.stop (); - deallog << "done (" - << timer() - << "s)" - << std::endl; + DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); + sparsity_pattern.compress(); - deallog << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl; -} + system_matrix.reinit (sparsity_pattern); + system_rhs.reinit (dof_handler.n_dofs()); + solution.reinit (dof_handler.n_dofs()); + timer.stop (); + deallog << "done (" + << timer() + << "s)" + << std::endl; - // @sect4{UltrasoundProblem::assemble_system} - // As before, this function takes - // care of assembling the system - // matrix and right hand side vector: -template -void UltrasoundProblem::assemble_system () -{ - deallog << "Assembling system matrix... "; - Timer timer; - timer.start (); - - // First we query wavespeed and - // frequency from the - // ParameterHandler object and - // store them in local variables, - // as they will be used frequently - // throughout this function. - - prm.enter_subsection ("Physical constants"); - - const double omega = prm.get_double("omega"), - c = prm.get_double("c"); - - prm.leave_subsection (); - - // As usual, for computing - // integrals ordinary Gauss - // quadrature rule is used. Since - // our bilinear form involves - // boundary integrals on - // $\Gamma_2$, we also need a - // quadrature rule for surface - // integration on the faces, which - // are $dim-1$ dimensional: - QGauss quadrature_formula(2); - QGauss face_quadrature_formula(2); - - const unsigned int n_q_points = quadrature_formula.size(), - n_face_q_points = face_quadrature_formula.size(), - dofs_per_cell = fe.dofs_per_cell; - - // The FEValues objects will - // evaluate the shape functions for - // us. For the part of the - // bilinear form that involves - // integration on $\Omega$, we'll - // need the values and gradients of - // the shape functions, and of - // course the quadrature weights. - // For the terms involving the - // boundary integrals, only shape - // function values and the - // quadrature weights are - // necessary. - FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_JxW_values); - - FEFaceValues fe_face_values (fe, face_quadrature_formula, - update_values | update_JxW_values); - - // As usual, the system matrix is - // assembled cell by cell, and we - // need a matrix for storing the - // local cell contributions as well - // as an index vector to transfer - // the cell contributions to the - // appropriate location in the - // global system matrix after. - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - std::vector local_dof_indices (dofs_per_cell); - - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - - for (; cell!=endc; ++cell) - { + deallog << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << std::endl; + } - // On each cell, we first need - // to reset the local - // contribution matrix and - // request the FEValues object - // to compute the shape - // functions for the current - // cell: - cell_matrix = 0; - fe_values.reinit (cell); - - for (unsigned int i=0; i::faces_per_cell; ++face) - if (cell->face(face)->at_boundary() && - (cell->face(face)->boundary_indicator() == 0) ) + // @sect4{UltrasoundProblem::assemble_system} + // As before, this function takes + // care of assembling the system + // matrix and right hand side vector: + template + void UltrasoundProblem::assemble_system () + { + deallog << "Assembling system matrix... "; + Timer timer; + timer.start (); + + // First we query wavespeed and + // frequency from the + // ParameterHandler object and + // store them in local variables, + // as they will be used frequently + // throughout this function. + + prm.enter_subsection ("Physical constants"); + + const double omega = prm.get_double("omega"), + c = prm.get_double("c"); + + prm.leave_subsection (); + + // As usual, for computing + // integrals ordinary Gauss + // quadrature rule is used. Since + // our bilinear form involves + // boundary integrals on + // $\Gamma_2$, we also need a + // quadrature rule for surface + // integration on the faces, which + // are $dim-1$ dimensional: + QGauss quadrature_formula(2); + QGauss face_quadrature_formula(2); + + const unsigned int n_q_points = quadrature_formula.size(), + n_face_q_points = face_quadrature_formula.size(), + dofs_per_cell = fe.dofs_per_cell; + + // The FEValues objects will + // evaluate the shape functions for + // us. For the part of the + // bilinear form that involves + // integration on $\Omega$, we'll + // need the values and gradients of + // the shape functions, and of + // course the quadrature weights. + // For the terms involving the + // boundary integrals, only shape + // function values and the + // quadrature weights are + // necessary. + FEValues fe_values (fe, quadrature_formula, + update_values | update_gradients | + update_JxW_values); + + FEFaceValues fe_face_values (fe, face_quadrature_formula, + update_values | update_JxW_values); + + // As usual, the system matrix is + // assembled cell by cell, and we + // need a matrix for storing the + // local cell contributions as well + // as an index vector to transfer + // the cell contributions to the + // appropriate location in the + // global system matrix after. + FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + std::vector local_dof_indices (dofs_per_cell); + + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + + for (; cell!=endc; ++cell) + { + + // On each cell, we first need + // to reset the local + // contribution matrix and + // request the FEValues object + // to compute the shape + // functions for the current + // cell: + cell_matrix = 0; + fe_values.reinit (cell); + + for (unsigned int i=0; i::faces_per_cell; ++face) + if (cell->face(face)->at_boundary() && + (cell->face(face)->boundary_indicator() == 0) ) + { - // Now we are done with this - // cell and have to transfer - // its contributions from the - // local to the global system - // matrix. To this end, we - // first get a list of the - // global indices of the this - // cells DoFs... - cell->get_dof_indices (local_dof_indices); - - - // ...and then add the entries to - // the system matrix one by - // one: - for (unsigned int i=0; iDirichletBoundaryValues - // class we defined above: - std::map boundary_values; - VectorTools::interpolate_boundary_values (dof_handler, - 1, - DirichletBoundaryValues(), - boundary_values); - - MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); - - timer.stop (); - deallog << "done (" - << timer() - << "s)" - << std::endl; -} + // Now we are done with this + // cell and have to transfer + // its contributions from the + // local to the global system + // matrix. To this end, we + // first get a list of the + // global indices of the this + // cells DoFs... + cell->get_dof_indices (local_dof_indices); + + + // ...and then add the entries to + // the system matrix one by + // one: + for (unsigned int i=0; iDirichletBoundaryValues + // class we defined above: + std::map boundary_values; + VectorTools::interpolate_boundary_values (dof_handler, + 1, + DirichletBoundaryValues(), + boundary_values); + + MatrixTools::apply_boundary_values (boundary_values, + system_matrix, + solution, + system_rhs); + + timer.stop (); + deallog << "done (" + << timer() + << "s)" + << std::endl; + } - // @sect4{UltrasoundProblem::solve} - - // As already mentioned in the - // introduction, the system matrix is - // neither symmetric nor definite, - // and so it is not quite obvious how - // to come up with an iterative - // solver and a preconditioner that - // do a good job on this matrix. We - // chose instead to go a different - // way and solve the linear system - // with the sparse LU decomposition - // provided by UMFPACK. This is often - // a good first choice for 2D - // problems and works reasonably well - // even for a large number of DoFs. - // The deal.II interface to UMFPACK - // is given by the - // SparseDirectUMFPACK class, which - // is very easy to use and allows us - // to solve our linear system with - // just 3 lines of code. - - // Note again that for compiling this - // example program, you need to have - // the deal.II library built with - // UMFPACK support, which can be - // achieved by providing the - // --with-umfpack switch to - // the configure script prior to - // compilation of the library. -template -void UltrasoundProblem::solve () -{ - deallog << "Solving linear system... "; - Timer timer; - timer.start (); - - // The code to solve the linear - // system is short: First, we - // allocate an object of the right - // type. The following - // initialize call - // provides the matrix that we - // would like to invert to the - // SparseDirectUMFPACK object, and - // at the same time kicks off the - // LU-decomposition. Hence, this is - // also the point where most of the - // computational work in this - // program happens. - SparseDirectUMFPACK A_direct; - A_direct.initialize(system_matrix); - - // After the decomposition, we can - // use A_direct like a - // matrix representing the inverse - // of our system matrix, so to - // compute the solution we just - // have to multiply with the right - // hand side vector: - A_direct.vmult (solution, system_rhs); - - timer.stop (); - deallog << "done (" - << timer () - << "s)" - << std::endl; -} + // @sect4{UltrasoundProblem::solve} + + // As already mentioned in the + // introduction, the system matrix is + // neither symmetric nor definite, + // and so it is not quite obvious how + // to come up with an iterative + // solver and a preconditioner that + // do a good job on this matrix. We + // chose instead to go a different + // way and solve the linear system + // with the sparse LU decomposition + // provided by UMFPACK. This is often + // a good first choice for 2D + // problems and works reasonably well + // even for a large number of DoFs. + // The deal.II interface to UMFPACK + // is given by the + // SparseDirectUMFPACK class, which + // is very easy to use and allows us + // to solve our linear system with + // just 3 lines of code. + + // Note again that for compiling this + // example program, you need to have + // the deal.II library built with + // UMFPACK support, which can be + // achieved by providing the + // --with-umfpack switch to + // the configure script prior to + // compilation of the library. + template + void UltrasoundProblem::solve () + { + deallog << "Solving linear system... "; + Timer timer; + timer.start (); + + // The code to solve the linear + // system is short: First, we + // allocate an object of the right + // type. The following + // initialize call + // provides the matrix that we + // would like to invert to the + // SparseDirectUMFPACK object, and + // at the same time kicks off the + // LU-decomposition. Hence, this is + // also the point where most of the + // computational work in this + // program happens. + SparseDirectUMFPACK A_direct; + A_direct.initialize(system_matrix); + + // After the decomposition, we can + // use A_direct like a + // matrix representing the inverse + // of our system matrix, so to + // compute the solution we just + // have to multiply with the right + // hand side vector: + A_direct.vmult (solution, system_rhs); + + timer.stop (); + deallog << "done (" + << timer () + << "s)" + << std::endl; + } - // @sect4{UltrasoundProblem::output_results} - - // Here we output our solution $v$ - // and $w$ as well as the derived - // quantity $|u|$ in the format - // specified in the parameter - // file. Most of the work for - // deriving $|u|$ from $v$ and $w$ - // was already done in the - // implementation of the - // ComputeIntensity - // class, so that the output routine - // is rather straightforward and very - // similar to what is done in the - // previous tutorials. -template -void UltrasoundProblem::output_results () const -{ - deallog << "Generating output... "; - Timer timer; - timer.start (); + // @sect4{UltrasoundProblem::output_results} - // Define objects of our - // ComputeIntensity - // class and a DataOut object: - ComputeIntensity intensities; - DataOut data_out; - - data_out.attach_dof_handler (dof_handler); - - // Next we query the output-related - // parameters from the - // ParameterHandler. The - // DataOut::parse_parameters call - // acts as a counterpart to the - // DataOutInterface<1>::declare_parameters - // call in - // ParameterReader::declare_parameters. It - // collects all the output format - // related parameters from the - // ParameterHandler and sets the - // corresponding properties of the - // DataOut object accordingly. - prm.enter_subsection("Output parameters"); - - const std::string output_file = prm.get("Output file"); - data_out.parse_parameters(prm); - - prm.leave_subsection (); - - // Now we put together the filename from - // the base name provided by the - // ParameterHandler and the suffix which is - // provided by the DataOut class (the - // default suffix is set to the right type - // that matches the one set in the .prm - // file through parse_parameters()): - const std::string filename = output_file + - data_out.default_suffix(); - - std::ofstream output (filename.c_str()); - - // The solution vectors $v$ and $w$ - // are added to the DataOut object - // in the usual way: - std::vector solution_names; - solution_names.push_back ("Re_u"); - solution_names.push_back ("Im_u"); - - data_out.add_data_vector (solution, solution_names); - - // For the intensity, we just call - // add_data_vector - // again, but this with our + // Here we output our solution $v$ + // and $w$ as well as the derived + // quantity $|u|$ in the format + // specified in the parameter + // file. Most of the work for + // deriving $|u|$ from $v$ and $w$ + // was already done in the + // implementation of the // ComputeIntensity - // object as the second argument, - // which effectively adds $|u|$ to - // the output data: - data_out.add_data_vector (solution, intensities); - - // The last steps are as before. Note - // that the actual output format is - // now determined by what is stated in - // the input file, i.e. one can change - // the output format without having to - // re-compile this program: - data_out.build_patches (); - data_out.write (output); - - timer.stop (); - deallog << "done (" - << timer() - << "s)" - << std::endl; -} + // class, so that the output routine + // is rather straightforward and very + // similar to what is done in the + // previous tutorials. + template + void UltrasoundProblem::output_results () const + { + deallog << "Generating output... "; + Timer timer; + timer.start (); + + // Define objects of our + // ComputeIntensity + // class and a DataOut object: + ComputeIntensity intensities; + DataOut data_out; + + data_out.attach_dof_handler (dof_handler); + + // Next we query the output-related + // parameters from the + // ParameterHandler. The + // DataOut::parse_parameters call + // acts as a counterpart to the + // DataOutInterface<1>::declare_parameters + // call in + // ParameterReader::declare_parameters. It + // collects all the output format + // related parameters from the + // ParameterHandler and sets the + // corresponding properties of the + // DataOut object accordingly. + prm.enter_subsection("Output parameters"); + + const std::string output_file = prm.get("Output file"); + data_out.parse_parameters(prm); + + prm.leave_subsection (); + + // Now we put together the filename from + // the base name provided by the + // ParameterHandler and the suffix which is + // provided by the DataOut class (the + // default suffix is set to the right type + // that matches the one set in the .prm + // file through parse_parameters()): + const std::string filename = output_file + + data_out.default_suffix(); + + std::ofstream output (filename.c_str()); + + // The solution vectors $v$ and $w$ + // are added to the DataOut object + // in the usual way: + std::vector solution_names; + solution_names.push_back ("Re_u"); + solution_names.push_back ("Im_u"); + + data_out.add_data_vector (solution, solution_names); + + // For the intensity, we just call + // add_data_vector + // again, but this with our + // ComputeIntensity + // object as the second argument, + // which effectively adds $|u|$ to + // the output data: + data_out.add_data_vector (solution, intensities); + + // The last steps are as before. Note + // that the actual output format is + // now determined by what is stated in + // the input file, i.e. one can change + // the output format without having to + // re-compile this program: + data_out.build_patches (); + data_out.write (output); + + timer.stop (); + deallog << "done (" + << timer() + << "s)" + << std::endl; + } - // @sect4{UltrasoundProblem::run} - // Here we simply execute our - // functions one after the other: -template -void UltrasoundProblem::run () -{ - make_grid (); - setup_system (); - assemble_system (); - solve (); - output_results (); + // @sect4{UltrasoundProblem::run} + // Here we simply execute our + // functions one after the other: + template + void UltrasoundProblem::run () + { + make_grid (); + setup_system (); + assemble_system (); + solve (); + output_results (); + } } @@ -1442,10 +1445,13 @@ void UltrasoundProblem::run () // values so read are then handed over // to an instance of the // UltrasoundProblem class: -int main () +int main () { try { + using namespace dealii; + using namespace Step29; + ParameterHandler prm; ParameterReader param(prm); param.read_parameters("step-29.prm"); @@ -1465,7 +1471,7 @@ int main () << std::endl; return 1; } - catch (...) + catch (...) { std::cerr << std::endl << std::endl << "----------------------------------------------------" -- 2.39.5