From 0edffb59f5a138b0d5dc80c019accab6b2c6cf43 Mon Sep 17 00:00:00 2001 From: bangerth Date: Tue, 20 Mar 2012 18:56:09 +0000 Subject: [PATCH] Convert tabs into spaces so that editors with different notions how many spaces a tab corresponds to still all show our carefully indented code correctly. git-svn-id: https://svn.dealii.org/trunk@25311 0785d39b-7218-0410-832d-ea1e28bc413d --- .../examples/doxygen/block_matrix_array.cc | 30 +- .../examples/doxygen/theta_timestepping.cc | 28 +- deal.II/examples/step-1/step-1.cc | 76 +- deal.II/examples/step-10/step-10.cc | 1168 ++-- deal.II/examples/step-11/step-11.cc | 1018 +-- deal.II/examples/step-12/step-12.cc | 1112 ++-- deal.II/examples/step-13/step-13.cc | 3128 ++++----- deal.II/examples/step-14/step-14.cc | 5850 ++++++++--------- deal.II/examples/step-15/step-15.cc | 2022 +++--- deal.II/examples/step-16/step-16.cc | 1318 ++-- deal.II/examples/step-17/step-17.cc | 1788 ++--- deal.II/examples/step-19/step-19.cc | 1004 +-- deal.II/examples/step-2/step-2.cc | 504 +- deal.II/examples/step-20/step-20.cc | 1602 ++--- deal.II/examples/step-21/step-21.cc | 1446 ++-- deal.II/examples/step-22/step-22.cc | 1676 ++--- deal.II/examples/step-23/step-23.cc | 972 +-- deal.II/examples/step-24/step-24.cc | 778 +-- deal.II/examples/step-25/step-25.cc | 1228 ++-- deal.II/examples/step-26/step-26.cc | 864 +-- deal.II/examples/step-27/step-27.cc | 1474 ++--- deal.II/examples/step-28/step-28.cc | 3146 ++++----- deal.II/examples/step-29/step-29.cc | 2110 +++--- deal.II/examples/step-3/step-3.cc | 1362 ++-- deal.II/examples/step-30/step-30.cc | 1546 ++--- deal.II/examples/step-31/step-31.cc | 2696 ++++---- deal.II/examples/step-32/step-32.cc | 2304 +++---- deal.II/examples/step-33/step-33.cc | 4664 ++++++------- deal.II/examples/step-34/step-34.cc | 1962 +++--- deal.II/examples/step-35/step-35.cc | 1272 ++-- deal.II/examples/step-36/step-36.cc | 784 +-- deal.II/examples/step-37/step-37.cc | 1714 ++--- deal.II/examples/step-38/step-38.cc | 304 +- deal.II/examples/step-39/step-39.cc | 1064 +-- deal.II/examples/step-4/step-4.cc | 712 +- deal.II/examples/step-40/step-40.cc | 1350 ++-- deal.II/examples/step-41/step-41.cc | 1004 +-- deal.II/examples/step-42/step-42.cc | 600 +- deal.II/examples/step-43/step-43.cc | 3010 ++++----- deal.II/examples/step-44/step-44.cc | 3688 +++++------ deal.II/examples/step-45/step-45.cc | 598 +- deal.II/examples/step-46/step-46.cc | 1578 ++--- deal.II/examples/step-47/step-47.cc | 1180 ++-- deal.II/examples/step-5/step-5.cc | 1406 ++-- deal.II/examples/step-6/step-6.cc | 1782 ++--- deal.II/examples/step-7/step-7.cc | 3140 ++++----- deal.II/examples/step-8/step-8.cc | 1760 ++--- deal.II/examples/step-9/step-9.cc | 3280 ++++----- 48 files changed, 39551 insertions(+), 39551 deletions(-) diff --git a/deal.II/examples/doxygen/block_matrix_array.cc b/deal.II/examples/doxygen/block_matrix_array.cc index 9300535f80..f24b85e8bb 100644 --- a/deal.II/examples/doxygen/block_matrix_array.cc +++ b/deal.II/examples/doxygen/block_matrix_array.cc @@ -1,7 +1,7 @@ //--------------------------------------------------------------------------- // $Id$ // -// Copyright (C) 2005, 2006 by the deal.II authors +// Copyright (C) 2005, 2006, 2012 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -54,7 +54,7 @@ double Cdata[] = 1., 8. }; -int main () +int main () { FullMatrix A(4,4); FullMatrix B1(4,2); @@ -65,11 +65,11 @@ int main () B1.fill(B1data); B2.fill(B2data); C.fill(Cdata); - + GrowingVectorMemory > simple_mem; - + BlockMatrixArray matrix(2, 2, simple_mem); - + matrix.enter(A,0,0,2.); matrix.enter(B1,0,1,-1.); matrix.enter(B2,0,1,1., true); @@ -77,11 +77,11 @@ int main () matrix.enter(B1,1,0,-1., true); matrix.enter(C,1,1); matrix.print_latex(deallog); - + std::vector block_sizes(2); block_sizes[0] = 4; block_sizes[1] = 2; - + BlockVector result(block_sizes); BlockVector x(block_sizes); BlockVector y(block_sizes); @@ -98,16 +98,16 @@ int main () cg.solve(matrix, x, y, id); x.add(-1., result); deallog << "Error " << x.l2_norm() << std::endl; - + deallog << "Error A-norm " - << std::sqrt(matrix.matrix_norm_square(x)) - << std::endl; - + << std::sqrt(matrix.matrix_norm_square(x)) + << std::endl; + FullMatrix Ainv(4,4); Ainv.invert(A); FullMatrix Cinv(2,2); Cinv.invert(C); - + BlockTrianglePrecondition precondition(2, simple_mem); precondition.enter(Ainv,0,0,.5); @@ -116,14 +116,14 @@ int main () cg.solve(matrix, x, y, precondition); x.add(-1., result); deallog << "Error " << x.l2_norm() << std::endl; - + precondition.enter(B1,1,0,-1., true); precondition.enter(B2,1,0,1.); - + SolverGMRES > gmres(control, mem); gmres.solve(matrix, x, y, precondition); x.add(-1., result); deallog << "Error " << x.l2_norm() << std::endl; - + return 0; } diff --git a/deal.II/examples/doxygen/theta_timestepping.cc b/deal.II/examples/doxygen/theta_timestepping.cc index 488d026778..bf7525dc28 100644 --- a/deal.II/examples/doxygen/theta_timestepping.cc +++ b/deal.II/examples/doxygen/theta_timestepping.cc @@ -1,7 +1,7 @@ //--------------------------------------------------------------------------- // $Id$ // -// Copyright (C) 2005, 2006, 2010 by the deal.II authors +// Copyright (C) 2005, 2006, 2010, 2012 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -32,7 +32,7 @@ class Explicit public: Explicit(const FullMatrix& matrix); void operator() (NamedData*>& out, - const NamedData*>& in); + const NamedData*>& in); void initialize_timestep_data(const TimestepData&); private: @@ -41,14 +41,14 @@ class Explicit FullMatrix m; }; - + class Implicit : public Operator > { public: Implicit(const FullMatrix& matrix); void operator() (NamedData*>& out, - const NamedData*>& in); + const NamedData*>& in); void initialize_timestep_data(const TimestepData&); private: @@ -69,29 +69,29 @@ int main() OutputOperator > out; out.initialize_stream(std::cout); - + Explicit op_explicit(matrix); Implicit op_implicit(matrix); ThetaTimestepping > solver(op_explicit, op_implicit); op_explicit.initialize_timestep_data(solver.explicit_data()); op_implicit.initialize_timestep_data(solver.implicit_data()); solver.set_output(out); - + Vector value(2); value(0) = 1.; NamedData*> indata; NamedData*> outdata; Vector* p = &value; outdata.add(p, "value"); - + solver.notify(Events::initial); solver(outdata, indata); } Explicit::Explicit(const FullMatrix& M) - : - matrix(&M) + : + matrix(&M) { m.reinit(M.m(), M.n()); } @@ -111,7 +111,7 @@ Explicit::operator() (NamedData*>& out, const NamedDatastep, *matrix); for (unsigned int i=0;inotifications.clear(); unsigned int i = in.find("Previous iterate"); @@ -120,8 +120,8 @@ Explicit::operator() (NamedData*>& out, const NamedData& M) - : - matrix(&M) + : + matrix(&M) { m.reinit(M.m(), M.n()); } @@ -141,11 +141,11 @@ Implicit::operator() (NamedData*>& out, const NamedDatastep, *matrix); for (unsigned int i=0;inotifications.clear(); - + unsigned int i = in.find("Previous time"); m.vmult(*out(0), *in(i)); } diff --git a/deal.II/examples/step-1/step-1.cc b/deal.II/examples/step-1/step-1.cc index 50bf7111e8..2d82dc2346 100644 --- a/deal.II/examples/step-1/step-1.cc +++ b/deal.II/examples/step-1/step-1.cc @@ -1,6 +1,6 @@ /* $Id$ * - * Copyright (C) 1999, 2000, 2001, 2002, 2003, 2005, 2006, 2007, 2009, 2011 by the deal.II authors + * Copyright (C) 1999, 2000, 2001, 2002, 2003, 2005, 2006, 2007, 2009, 2011, 2012 by the deal.II authors * * This file is subject to QPL and may not be distributed * without copyright and license information. Please refer @@ -8,7 +8,7 @@ * further information on this license. */ - // @sect3{Include files} + // @sect3{Include files} // The most fundamental class in the // library is the Triangulation @@ -33,28 +33,28 @@ // This is needed for C++ output: #include - // And this for the declarations of the - // `sqrt' and `fabs' functions: + // And this for the declarations of the + // `sqrt' and `fabs' functions: #include - // The final step in importing - // deal.II is this: All deal.II - // functions and classes are in a - // namespace dealii, to - // make sure they don't clash with - // symbols from other libraries you - // may want to use in conjunction - // with deal.II. One could use these - // functions and classes by prefixing - // every use of these names by - // dealii::, but that - // would quickly become cumbersome - // and annoying. Rather, we simply - // import the entire deal.II - // namespace for general use: + // The final step in importing + // deal.II is this: All deal.II + // functions and classes are in a + // namespace dealii, to + // make sure they don't clash with + // symbols from other libraries you + // may want to use in conjunction + // with deal.II. One could use these + // functions and classes by prefixing + // every use of these names by + // dealii::, but that + // would quickly become cumbersome + // and annoying. Rather, we simply + // import the entire deal.II + // namespace for general use: using namespace dealii; - // @sect3{Creating the first mesh} + // @sect3{Creating the first mesh} // In the following, first function, we // simply use the unit square as @@ -67,7 +67,7 @@ void first_grid () // triangulation of a // two-dimensional domain: Triangulation<2> triangulation; - // Here and in many following + // Here and in many following // cases, the string "<2>" after a // class name indicates that this // is an object that shall work in @@ -108,7 +108,7 @@ void first_grid () - // @sect3{Creating the second mesh} + // @sect3{Creating the second mesh} // The grid in the following, second // function is slightly more @@ -137,7 +137,7 @@ void second_grid () outer_radius = 1.0; GridGenerator::hyper_shell (triangulation, center, inner_radius, outer_radius, - 10); + 10); // By default, the triangulation // assumes that all boundaries are // straight and given by the cells @@ -150,8 +150,8 @@ void second_grid () // assumed to be straight, then new // points will simply be in the // middle of the surrounding ones. - // - // Here, however, we would like to + // + // Here, however, we would like to // have a curved // boundary. Fortunately, some good // soul implemented an object which @@ -208,8 +208,8 @@ void second_grid () // one-past-the-end // iterator: Triangulation<2>::active_cell_iterator - cell = triangulation.begin_active(), - endc = triangulation.end(); + cell = triangulation.begin_active(), + endc = triangulation.end(); // The loop over all cells is // then rather trivial, and @@ -321,21 +321,21 @@ void second_grid () // default object, over which the // triangulation has full control. triangulation.set_boundary (0); - // An alternative to doing so, and - // one that is frequently more - // convenient, would have been to - // declare the boundary object - // before the triangulation - // object. In that case, the - // triangulation would have let - // lose of the boundary object upon - // its destruction, and everything - // would have been fine. + // An alternative to doing so, and + // one that is frequently more + // convenient, would have been to + // declare the boundary object + // before the triangulation + // object. In that case, the + // triangulation would have let + // lose of the boundary object upon + // its destruction, and everything + // would have been fine. } - // @sect3{The main function} + // @sect3{The main function} // Finally, the main function. There // isn't much to do here, only to diff --git a/deal.II/examples/step-10/step-10.cc b/deal.II/examples/step-10/step-10.cc index 3df41fc02b..5548d521a8 100644 --- a/deal.II/examples/step-10/step-10.cc +++ b/deal.II/examples/step-10/step-10.cc @@ -9,10 +9,10 @@ /* to the file deal.II/doc/license.html for the text and */ /* further information on this license. */ - // The first of the following include - // files are probably well-known by - // now and need no further - // explanation. + // The first of the following include + // files are probably well-known by + // now and need no further + // explanation. #include #include #include @@ -26,628 +26,628 @@ #include #include - // This is the only new one: in it, - // we declare the MappingQ class - // which we will use for polynomial - // mappings of arbitrary order: + // This is the only new one: in it, + // we declare the MappingQ class + // which we will use for polynomial + // mappings of arbitrary order: #include - // And this again is C++: + // And this again is C++: #include #include #include - // The last step is as in previous - // programs: + // The last step is as in previous + // programs: namespace Step10 { using namespace dealii; - // Now, as we want to compute the - // value of $\pi$, we have to compare to - // somewhat. These are the first few - // digits of $\pi$, which we define - // beforehand for later use. Since we - // would like to compute the - // difference between two numbers - // which are quite accurate, with the - // accuracy of the computed - // approximation to $\pi$ being in the - // range of the number of digits - // which a double variable can hold, - // we rather declare the reference - // value as a long double and - // give it a number of extra digits: + // Now, as we want to compute the + // value of $\pi$, we have to compare to + // somewhat. These are the first few + // digits of $\pi$, which we define + // beforehand for later use. Since we + // would like to compute the + // difference between two numbers + // which are quite accurate, with the + // accuracy of the computed + // approximation to $\pi$ being in the + // range of the number of digits + // which a double variable can hold, + // we rather declare the reference + // value as a long double and + // give it a number of extra digits: const long double pi = 3.141592653589793238462643; - // Then, the first task will be to - // generate some output. Since this - // program is so small, we do not - // employ object oriented techniques - // in it and do not declare classes - // (although, of course, we use the - // object oriented features of the - // library). Rather, we just pack the - // functionality into separate - // functions. We make these functions - // templates on the number of space - // dimensions to conform to usual - // practice when using deal.II, - // although we will only use them for - // two space dimensions. - // - // The first of these functions just - // generates a triangulation of a - // circle (hyperball) and outputs the - // Qp mapping of its cells for - // different values of p. Then, - // we refine the grid once and do so - // again. + // Then, the first task will be to + // generate some output. Since this + // program is so small, we do not + // employ object oriented techniques + // in it and do not declare classes + // (although, of course, we use the + // object oriented features of the + // library). Rather, we just pack the + // functionality into separate + // functions. We make these functions + // templates on the number of space + // dimensions to conform to usual + // practice when using deal.II, + // although we will only use them for + // two space dimensions. + // + // The first of these functions just + // generates a triangulation of a + // circle (hyperball) and outputs the + // Qp mapping of its cells for + // different values of p. Then, + // we refine the grid once and do so + // again. template void gnuplot_output() { std::cout << "Output of grids into gnuplot files:" << std::endl - << "===================================" << std::endl; - - // So first generate a coarse - // triangulation of the circle and - // associate a suitable boundary - // description to it. Note that the - // default values of the - // HyperBallBoundary constructor - // are a center at the origin and a - // radius equals one. + << "===================================" << std::endl; + + // So first generate a coarse + // triangulation of the circle and + // associate a suitable boundary + // description to it. Note that the + // default values of the + // HyperBallBoundary constructor + // are a center at the origin and a + // radius equals one. Triangulation triangulation; GridGenerator::hyper_ball (triangulation); static const HyperBallBoundary boundary; triangulation.set_boundary (0, boundary); - // Next generate output for this - // grid and for a once refined - // grid. Note that we have hidden - // the mesh refinement in the loop - // header, which might be uncommon - // but nevertheless works. Also it - // is strangely consistent with - // incrementing the loop index - // denoting the refinement level. + // Next generate output for this + // grid and for a once refined + // grid. Note that we have hidden + // the mesh refinement in the loop + // header, which might be uncommon + // but nevertheless works. Also it + // is strangely consistent with + // incrementing the loop index + // denoting the refinement level. for (unsigned int refinement=0; refinement<2; - ++refinement, triangulation.refine_global(1)) + ++refinement, triangulation.refine_global(1)) { - std::cout << "Refinement level: " << refinement << std::endl; - - // Then have a string which - // denotes the base part of the - // names of the files into - // which we write the - // output. Note that in the - // parentheses in the - // initializer we do arithmetic - // on characters, which assumes - // that first the characters - // denoting numbers are placed - // consecutively (which is - // probably true for all - // reasonable character sets - // nowadays), but also assumes - // that the increment - // refinement is less than - // ten. This is therefore more - // a quick hack if we know - // exactly the values which the - // increment can assume. A - // better implementation would - // use the - // std::istringstream - // class to generate a name. - std::string filename_base = "ball"; - filename_base += '0'+refinement; - - // Then output the present grid - // for Q1, Q2, and Q3 mappings: - for (unsigned int degree=1; degree<4; ++degree) - { - std::cout << "Degree = " << degree << std::endl; - - // For this, first set up - // an object describing the - // mapping. This is done - // using the MappingQ - // class, which takes as - // argument to the - // constructor the - // polynomial degree which - // it shall use. - const MappingQ mapping (degree); - // We note one interesting - // fact: if you want a - // piecewise linear - // mapping, then you could - // give a value of 1 to - // the - // constructor. However, - // for linear mappings, so - // many things can be - // generated simpler that - // there is another class, - // called MappingQ1 - // which does exactly the - // same is if you gave an - // degree of 1 to the - // MappingQ class, but - // does so significantly - // faster. MappingQ1 is - // also the class that is - // implicitly used - // throughout the library - // in many functions and - // classes if you do not - // specify another mapping - // explicitly. - - - // In degree to actually - // write out the present - // grid with this mapping, - // we set up an object - // which we will use for - // output. We will generate - // Gnuplot output, which - // consists of a set of - // lines describing the - // mapped triangulation. By - // default, only one line - // is drawn for each face - // of the triangulation, - // but since we want to - // explicitely see the - // effect of the mapping, - // we want to have the - // faces in more - // detail. This can be done - // by passing the output - // object a structure which - // contains some flags. In - // the present case, since - // Gnuplot can only draw - // straight lines, we - // output a number of - // additional points on the - // faces so that each face - // is drawn by 30 small - // lines instead of only - // one. This is sufficient - // to give us the - // impression of seeing a - // curved line, rather than - // a set of straight lines. - GridOut grid_out; - GridOutFlags::Gnuplot gnuplot_flags(false, 30); - grid_out.set_flags(gnuplot_flags); - - // Finally, generate a - // filename and a file for - // output using the same - // evil hack as above: - std::string filename = filename_base+"_mapping_q"; - filename += ('0'+degree); - filename += ".dat"; - std::ofstream gnuplot_file (filename.c_str()); - - // Then write out the - // triangulation to this - // file. The last argument - // of the function is a - // pointer to a mapping - // object. This argument - // has a default value, and - // if no value is given a - // simple MappingQ1 - // object is taken, which - // we briefly described - // above. This would then - // result in a piecewise - // linear approximation of - // the true boundary in the - // output. - grid_out.write_gnuplot (triangulation, gnuplot_file, &mapping); - } - std::cout << std::endl; + std::cout << "Refinement level: " << refinement << std::endl; + + // Then have a string which + // denotes the base part of the + // names of the files into + // which we write the + // output. Note that in the + // parentheses in the + // initializer we do arithmetic + // on characters, which assumes + // that first the characters + // denoting numbers are placed + // consecutively (which is + // probably true for all + // reasonable character sets + // nowadays), but also assumes + // that the increment + // refinement is less than + // ten. This is therefore more + // a quick hack if we know + // exactly the values which the + // increment can assume. A + // better implementation would + // use the + // std::istringstream + // class to generate a name. + std::string filename_base = "ball"; + filename_base += '0'+refinement; + + // Then output the present grid + // for Q1, Q2, and Q3 mappings: + for (unsigned int degree=1; degree<4; ++degree) + { + std::cout << "Degree = " << degree << std::endl; + + // For this, first set up + // an object describing the + // mapping. This is done + // using the MappingQ + // class, which takes as + // argument to the + // constructor the + // polynomial degree which + // it shall use. + const MappingQ mapping (degree); + // We note one interesting + // fact: if you want a + // piecewise linear + // mapping, then you could + // give a value of 1 to + // the + // constructor. However, + // for linear mappings, so + // many things can be + // generated simpler that + // there is another class, + // called MappingQ1 + // which does exactly the + // same is if you gave an + // degree of 1 to the + // MappingQ class, but + // does so significantly + // faster. MappingQ1 is + // also the class that is + // implicitly used + // throughout the library + // in many functions and + // classes if you do not + // specify another mapping + // explicitly. + + + // In degree to actually + // write out the present + // grid with this mapping, + // we set up an object + // which we will use for + // output. We will generate + // Gnuplot output, which + // consists of a set of + // lines describing the + // mapped triangulation. By + // default, only one line + // is drawn for each face + // of the triangulation, + // but since we want to + // explicitely see the + // effect of the mapping, + // we want to have the + // faces in more + // detail. This can be done + // by passing the output + // object a structure which + // contains some flags. In + // the present case, since + // Gnuplot can only draw + // straight lines, we + // output a number of + // additional points on the + // faces so that each face + // is drawn by 30 small + // lines instead of only + // one. This is sufficient + // to give us the + // impression of seeing a + // curved line, rather than + // a set of straight lines. + GridOut grid_out; + GridOutFlags::Gnuplot gnuplot_flags(false, 30); + grid_out.set_flags(gnuplot_flags); + + // Finally, generate a + // filename and a file for + // output using the same + // evil hack as above: + std::string filename = filename_base+"_mapping_q"; + filename += ('0'+degree); + filename += ".dat"; + std::ofstream gnuplot_file (filename.c_str()); + + // Then write out the + // triangulation to this + // file. The last argument + // of the function is a + // pointer to a mapping + // object. This argument + // has a default value, and + // if no value is given a + // simple MappingQ1 + // object is taken, which + // we briefly described + // above. This would then + // result in a piecewise + // linear approximation of + // the true boundary in the + // output. + grid_out.write_gnuplot (triangulation, gnuplot_file, &mapping); + } + std::cout << std::endl; } } - // Now we proceed with the main part - // of the code, the approximation of - // $\pi$. The area of a circle is of - // course given by $\pi r^2$, so - // having a circle of radius 1, the - // area represents just the number - // that is searched for. The - // numerical computation of the area - // is performed by integrating the - // constant function of value 1 over - // the whole computational domain, - // i.e. by computing the areas - // $\int_K 1 dx=\int_{\hat K} 1 - // \ \textrm{det}\ J(\hat x) d\hat x - // \approx \sum_i \textrm{det} - // \ J(\hat x_i)w(\hat x_i)$, where the - // sum extends over all quadrature - // points on all active cells in the - // triangulation, with $w(x_i)$ being - // the weight of quadrature point - // $x_i$. The integrals on each cell - // are approximated by numerical - // quadrature, hence the only - // additional ingredient we need is - // to set up a FEValues object that - // provides the corresponding `JxW' - // values of each cell. (Note that - // `JxW' is meant to abbreviate - // Jacobian determinant times - // weight; since in numerical - // quadrature the two factors always - // occur at the same places, we only - // offer the combined quantity, - // rather than two separate ones.) We - // note that here we won't use the - // FEValues object in its original - // purpose, i.e. for the computation - // of values of basis functions of a - // specific finite element at certain - // quadrature points. Rather, we use - // it only to gain the `JxW' at the - // quadrature points, irrespective of - // the (dummy) finite element we will - // give to the constructor of the - // FEValues object. The actual finite - // element given to the FEValues - // object is not used at all, so we - // could give any. + // Now we proceed with the main part + // of the code, the approximation of + // $\pi$. The area of a circle is of + // course given by $\pi r^2$, so + // having a circle of radius 1, the + // area represents just the number + // that is searched for. The + // numerical computation of the area + // is performed by integrating the + // constant function of value 1 over + // the whole computational domain, + // i.e. by computing the areas + // $\int_K 1 dx=\int_{\hat K} 1 + // \ \textrm{det}\ J(\hat x) d\hat x + // \approx \sum_i \textrm{det} + // \ J(\hat x_i)w(\hat x_i)$, where the + // sum extends over all quadrature + // points on all active cells in the + // triangulation, with $w(x_i)$ being + // the weight of quadrature point + // $x_i$. The integrals on each cell + // are approximated by numerical + // quadrature, hence the only + // additional ingredient we need is + // to set up a FEValues object that + // provides the corresponding `JxW' + // values of each cell. (Note that + // `JxW' is meant to abbreviate + // Jacobian determinant times + // weight; since in numerical + // quadrature the two factors always + // occur at the same places, we only + // offer the combined quantity, + // rather than two separate ones.) We + // note that here we won't use the + // FEValues object in its original + // purpose, i.e. for the computation + // of values of basis functions of a + // specific finite element at certain + // quadrature points. Rather, we use + // it only to gain the `JxW' at the + // quadrature points, irrespective of + // the (dummy) finite element we will + // give to the constructor of the + // FEValues object. The actual finite + // element given to the FEValues + // object is not used at all, so we + // could give any. template void compute_pi_by_area () { std::cout << "Computation of Pi by the area:" << std::endl - << "==============================" << std::endl; - - // For the numerical quadrature on - // all cells we employ a quadrature - // rule of sufficiently high - // degree. We choose QGauss that - // is of order 8 (4 points), to be sure that - // the errors due to numerical - // quadrature are of higher order - // than the order (maximal 6) that - // will occur due to the order of - // the approximation of the - // boundary, i.e. the order of the - // mappings employed. Note that the - // integrand, the Jacobian - // determinant, is not a polynomial - // function (rather, it is a - // rational one), so we do not use - // Gauss quadrature in order to get - // the exact value of the integral - // as done often in finite element - // computations, but could as well - // have used any quadrature formula - // of like order instead. + << "==============================" << std::endl; + + // For the numerical quadrature on + // all cells we employ a quadrature + // rule of sufficiently high + // degree. We choose QGauss that + // is of order 8 (4 points), to be sure that + // the errors due to numerical + // quadrature are of higher order + // than the order (maximal 6) that + // will occur due to the order of + // the approximation of the + // boundary, i.e. the order of the + // mappings employed. Note that the + // integrand, the Jacobian + // determinant, is not a polynomial + // function (rather, it is a + // rational one), so we do not use + // Gauss quadrature in order to get + // the exact value of the integral + // as done often in finite element + // computations, but could as well + // have used any quadrature formula + // of like order instead. const QGauss quadrature(4); - // Now start by looping over - // polynomial mapping degrees=1..4: + // Now start by looping over + // polynomial mapping degrees=1..4: for (unsigned int degree=1; degree<5; ++degree) { - std::cout << "Degree = " << degree << std::endl; - - // First generate the - // triangulation, the boundary - // and the mapping object as - // already seen. - Triangulation triangulation; - GridGenerator::hyper_ball (triangulation); - - static const HyperBallBoundary boundary; - triangulation.set_boundary (0, boundary); - - const MappingQ mapping (degree); - - // We now create a dummy finite - // element. Here we could - // choose any finite element, - // as we are only interested in - // the `JxW' values provided by - // the FEValues object - // below. Nevertheless, we have - // to provide a finite element - // since in this example we - // abuse the FEValues class a - // little in that we only ask - // it to provide us with the - // weights of certain - // quadrature points, in - // contrast to the usual - // purpose (and name) of the - // FEValues class which is to - // provide the values of finite - // elements at these points. - const FE_Q dummy_fe (1); - - // Likewise, we need to create - // a DoFHandler object. We do - // not actually use it, but it - // will provide us with - // `active_cell_iterators' that - // are needed to reinitialize - // the FEValues object on each - // cell of the triangulation. - DoFHandler dof_handler (triangulation); - - // Now we set up the FEValues - // object, giving the Mapping, - // the dummy finite element and - // the quadrature object to the - // constructor, together with - // the update flags asking for - // the `JxW' values at the - // quadrature points only. This - // tells the FEValues object - // that it needs not compute - // other quantities upon - // calling the reinit - // function, thus saving - // computation time. - // - // The most important - // difference in the - // construction of the FEValues - // object compared to previous - // example programs is that we - // pass a mapping object as - // first argument, which is to - // be used in the computation - // of the mapping from unit to - // real cell. In previous - // examples, this argument was - // omitted, resulting in the - // implicit use of an object of - // type MappingQ1. - FEValues fe_values (mapping, dummy_fe, quadrature, - update_JxW_values); - - // We employ an object of the - // ConvergenceTable class to - // store all important data - // like the approximated values - // for $\pi$ and the error with - // respect to the true value of - // $\pi$. We will also use - // functions provided by the - // ConvergenceTable class to - // compute convergence rates of - // the approximations to $\pi$. - ConvergenceTable table; - - // Now we loop over several - // refinement steps of the - // triangulation. - for (unsigned int refinement=0; refinement<6; - ++refinement, triangulation.refine_global (1)) - { - // In this loop we first - // add the number of active - // cells of the current - // triangulation to the - // table. This function - // automatically creates a - // table column with - // superscription `cells', - // in case this column was - // not created before. - table.add_value("cells", triangulation.n_active_cells()); - - // Then we distribute the - // degrees of freedom for - // the dummy finite - // element. Strictly - // speaking we do not need - // this function call in - // our special case but we - // call it to make the - // DoFHandler happy -- - // otherwise it would throw - // an assertion in the - // FEValues::reinit - // function below. - dof_handler.distribute_dofs (dummy_fe); - - // We define the variable - // area as `long double' - // like we did for the pi - // variable before. - long double area = 0; - - // Now we loop over all - // cells, reinitialize the - // FEValues object for each - // cell, and add up all the - // `JxW' values for this - // cell to `area'... - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (; cell!=endc; ++cell) - { - fe_values.reinit (cell); - for (unsigned int i=0; ifabs - // function in the std - // namespace is overloaded on - // its argument types, so there - // exists a version taking - // and returning a long double, - // in contrast to the global - // namespace where only one such - // function is declared (which - // takes and returns a double). - table.add_value("eval.pi", static_cast (area)); - table.add_value("error", static_cast (std::fabs(area-pi))); - }; - - // We want to compute - // the convergence rates of the - // `error' column. Therefore we - // need to omit the other - // columns from the convergence - // rate evaluation before - // calling - // `evaluate_all_convergence_rates' - table.omit_column_from_convergence_rate_evaluation("cells"); - table.omit_column_from_convergence_rate_evaluation("eval.pi"); - table.evaluate_all_convergence_rates(ConvergenceTable::reduction_rate_log2); - - // Finally we set the precision - // and scientific mode for - // output of some of the - // quantities... - table.set_precision("eval.pi", 16); - table.set_scientific("error", true); - - // ...and write the whole table - // to std::cout. - table.write_text(std::cout); - - std::cout << std::endl; + std::cout << "Degree = " << degree << std::endl; + + // First generate the + // triangulation, the boundary + // and the mapping object as + // already seen. + Triangulation triangulation; + GridGenerator::hyper_ball (triangulation); + + static const HyperBallBoundary boundary; + triangulation.set_boundary (0, boundary); + + const MappingQ mapping (degree); + + // We now create a dummy finite + // element. Here we could + // choose any finite element, + // as we are only interested in + // the `JxW' values provided by + // the FEValues object + // below. Nevertheless, we have + // to provide a finite element + // since in this example we + // abuse the FEValues class a + // little in that we only ask + // it to provide us with the + // weights of certain + // quadrature points, in + // contrast to the usual + // purpose (and name) of the + // FEValues class which is to + // provide the values of finite + // elements at these points. + const FE_Q dummy_fe (1); + + // Likewise, we need to create + // a DoFHandler object. We do + // not actually use it, but it + // will provide us with + // `active_cell_iterators' that + // are needed to reinitialize + // the FEValues object on each + // cell of the triangulation. + DoFHandler dof_handler (triangulation); + + // Now we set up the FEValues + // object, giving the Mapping, + // the dummy finite element and + // the quadrature object to the + // constructor, together with + // the update flags asking for + // the `JxW' values at the + // quadrature points only. This + // tells the FEValues object + // that it needs not compute + // other quantities upon + // calling the reinit + // function, thus saving + // computation time. + // + // The most important + // difference in the + // construction of the FEValues + // object compared to previous + // example programs is that we + // pass a mapping object as + // first argument, which is to + // be used in the computation + // of the mapping from unit to + // real cell. In previous + // examples, this argument was + // omitted, resulting in the + // implicit use of an object of + // type MappingQ1. + FEValues fe_values (mapping, dummy_fe, quadrature, + update_JxW_values); + + // We employ an object of the + // ConvergenceTable class to + // store all important data + // like the approximated values + // for $\pi$ and the error with + // respect to the true value of + // $\pi$. We will also use + // functions provided by the + // ConvergenceTable class to + // compute convergence rates of + // the approximations to $\pi$. + ConvergenceTable table; + + // Now we loop over several + // refinement steps of the + // triangulation. + for (unsigned int refinement=0; refinement<6; + ++refinement, triangulation.refine_global (1)) + { + // In this loop we first + // add the number of active + // cells of the current + // triangulation to the + // table. This function + // automatically creates a + // table column with + // superscription `cells', + // in case this column was + // not created before. + table.add_value("cells", triangulation.n_active_cells()); + + // Then we distribute the + // degrees of freedom for + // the dummy finite + // element. Strictly + // speaking we do not need + // this function call in + // our special case but we + // call it to make the + // DoFHandler happy -- + // otherwise it would throw + // an assertion in the + // FEValues::reinit + // function below. + dof_handler.distribute_dofs (dummy_fe); + + // We define the variable + // area as `long double' + // like we did for the pi + // variable before. + long double area = 0; + + // Now we loop over all + // cells, reinitialize the + // FEValues object for each + // cell, and add up all the + // `JxW' values for this + // cell to `area'... + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + { + fe_values.reinit (cell); + for (unsigned int i=0; ifabs + // function in the std + // namespace is overloaded on + // its argument types, so there + // exists a version taking + // and returning a long double, + // in contrast to the global + // namespace where only one such + // function is declared (which + // takes and returns a double). + table.add_value("eval.pi", static_cast (area)); + table.add_value("error", static_cast (std::fabs(area-pi))); + }; + + // We want to compute + // the convergence rates of the + // `error' column. Therefore we + // need to omit the other + // columns from the convergence + // rate evaluation before + // calling + // `evaluate_all_convergence_rates' + table.omit_column_from_convergence_rate_evaluation("cells"); + table.omit_column_from_convergence_rate_evaluation("eval.pi"); + table.evaluate_all_convergence_rates(ConvergenceTable::reduction_rate_log2); + + // Finally we set the precision + // and scientific mode for + // output of some of the + // quantities... + table.set_precision("eval.pi", 16); + table.set_scientific("error", true); + + // ...and write the whole table + // to std::cout. + table.write_text(std::cout); + + std::cout << std::endl; }; } - // The following, second function also - // computes an approximation of $\pi$ - // but this time via the perimeter - // $2\pi r$ of the domain instead - // of the area. This function is only - // a variation of the previous - // function. So we will mainly give - // documentation for the differences. + // The following, second function also + // computes an approximation of $\pi$ + // but this time via the perimeter + // $2\pi r$ of the domain instead + // of the area. This function is only + // a variation of the previous + // function. So we will mainly give + // documentation for the differences. template void compute_pi_by_perimeter () { std::cout << "Computation of Pi by the perimeter:" << std::endl - << "===================================" << std::endl; - - // We take the same order of - // quadrature but this time a - // `dim-1' dimensional quadrature - // as we will integrate over - // (boundary) lines rather than - // over cells. + << "===================================" << std::endl; + + // We take the same order of + // quadrature but this time a + // `dim-1' dimensional quadrature + // as we will integrate over + // (boundary) lines rather than + // over cells. const QGauss quadrature(4); - // We loop over all degrees, create - // the triangulation, the boundary, - // the mapping, the dummy - // finite element and the DoFHandler - // object as seen before. + // We loop over all degrees, create + // the triangulation, the boundary, + // the mapping, the dummy + // finite element and the DoFHandler + // object as seen before. for (unsigned int degree=1; degree<5; ++degree) { - std::cout << "Degree = " << degree << std::endl; - Triangulation triangulation; - GridGenerator::hyper_ball (triangulation); - - static const HyperBallBoundary boundary; - triangulation.set_boundary (0, boundary); - - const MappingQ mapping (degree); - const FE_Q fe (1); - - DoFHandler dof_handler (triangulation); - - // Then we create a - // FEFaceValues object instead - // of a FEValues object as in - // the previous - // function. Again, we pass a - // mapping as first argument. - FEFaceValues fe_face_values (mapping, fe, quadrature, - update_JxW_values); - ConvergenceTable table; - - for (unsigned int refinement=0; refinement<6; - ++refinement, triangulation.refine_global (1)) - { - table.add_value("cells", triangulation.n_active_cells()); - - dof_handler.distribute_dofs (fe); - - // Now we run over all - // cells and over all faces - // of each cell. Only the - // contributions of the - // `JxW' values on boundary - // faces are added to the - // long double variable - // `perimeter'. - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - long double perimeter = 0; - for (; cell!=endc; ++cell) - for (unsigned int face_no=0; face_no::faces_per_cell; ++face_no) - if (cell->face(face_no)->at_boundary()) - { - // We reinit the - // FEFaceValues - // object with the - // cell iterator - // and the number - // of the face. - fe_face_values.reinit (cell, face_no); - for (unsigned int i=0; i (perimeter/2.)); - table.add_value("error", static_cast (std::fabs(perimeter/2.-pi))); - }; - - // ...and end this function as - // we did in the previous one: - table.omit_column_from_convergence_rate_evaluation("cells"); - table.omit_column_from_convergence_rate_evaluation("eval.pi"); - table.evaluate_all_convergence_rates(ConvergenceTable::reduction_rate_log2); - - table.set_precision("eval.pi", 16); - table.set_scientific("error", true); - - table.write_text(std::cout); - - std::cout << std::endl; + std::cout << "Degree = " << degree << std::endl; + Triangulation triangulation; + GridGenerator::hyper_ball (triangulation); + + static const HyperBallBoundary boundary; + triangulation.set_boundary (0, boundary); + + const MappingQ mapping (degree); + const FE_Q fe (1); + + DoFHandler dof_handler (triangulation); + + // Then we create a + // FEFaceValues object instead + // of a FEValues object as in + // the previous + // function. Again, we pass a + // mapping as first argument. + FEFaceValues fe_face_values (mapping, fe, quadrature, + update_JxW_values); + ConvergenceTable table; + + for (unsigned int refinement=0; refinement<6; + ++refinement, triangulation.refine_global (1)) + { + table.add_value("cells", triangulation.n_active_cells()); + + dof_handler.distribute_dofs (fe); + + // Now we run over all + // cells and over all faces + // of each cell. Only the + // contributions of the + // `JxW' values on boundary + // faces are added to the + // long double variable + // `perimeter'. + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + long double perimeter = 0; + for (; cell!=endc; ++cell) + for (unsigned int face_no=0; face_no::faces_per_cell; ++face_no) + if (cell->face(face_no)->at_boundary()) + { + // We reinit the + // FEFaceValues + // object with the + // cell iterator + // and the number + // of the face. + fe_face_values.reinit (cell, face_no); + for (unsigned int i=0; i (perimeter/2.)); + table.add_value("error", static_cast (std::fabs(perimeter/2.-pi))); + }; + + // ...and end this function as + // we did in the previous one: + table.omit_column_from_convergence_rate_evaluation("cells"); + table.omit_column_from_convergence_rate_evaluation("eval.pi"); + table.evaluate_all_convergence_rates(ConvergenceTable::reduction_rate_log2); + + table.set_precision("eval.pi", 16); + table.set_scientific("error", true); + + table.write_text(std::cout); + + std::cout << std::endl; }; } } - // The following main function just calls the - // above functions in the order of their - // appearance. Apart from this, it looks just - // like the main functions of previous - // tutorial programs. + // The following main function just calls the + // above functions in the order of their + // appearance. Apart from this, it looks just + // like the main functions of previous + // tutorial programs. int main () { try @@ -662,25 +662,25 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-11/step-11.cc b/deal.II/examples/step-11/step-11.cc index 0d3c2301cf..55615a90ee 100644 --- a/deal.II/examples/step-11/step-11.cc +++ b/deal.II/examples/step-11/step-11.cc @@ -9,10 +9,10 @@ /* to the file deal.II/doc/license.html for the text and */ /* further information on this license. */ - // As usual, the program starts with - // a rather long list of include - // files which you are probably - // already used to by now: + // As usual, the program starts with + // a rather long list of include + // files which you are probably + // already used to by now: #include #include #include @@ -36,55 +36,55 @@ #include #include - // Just this one is new: it declares - // a class - // CompressedSparsityPattern, - // which we will use and explain - // further down below. + // Just this one is new: it declares + // a class + // CompressedSparsityPattern, + // which we will use and explain + // further down below. #include - // We will make use of the std::find - // algorithm of the C++ standard - // library, so we have to include the - // following file for its - // declaration: + // We will make use of the std::find + // algorithm of the C++ standard + // library, so we have to include the + // following file for its + // declaration: #include #include #include #include - // The last step is as in all - // previous programs: + // The last step is as in all + // previous programs: namespace Step11 { using namespace dealii; - // Then we declare a class which - // represents the solution of a - // Laplace problem. As this example - // program is based on step-5, the - // class looks rather the same, with - // the sole structural difference - // that the functions - // assemble_system now calls - // solve itself, and is thus - // called assemble_and_solve, and - // that the output function was - // dropped since the solution - // function is so boring that it is - // not worth being viewed. - // - // The only other noteworthy change - // is that the constructor takes a - // value representing the polynomial - // degree of the mapping to be used - // later on, and that it has another - // member variable representing - // exactly this mapping. In general, - // this variable will occur in real - // applications at the same places - // where the finite element is - // declared or used. + // Then we declare a class which + // represents the solution of a + // Laplace problem. As this example + // program is based on step-5, the + // class looks rather the same, with + // the sole structural difference + // that the functions + // assemble_system now calls + // solve itself, and is thus + // called assemble_and_solve, and + // that the output function was + // dropped since the solution + // function is so boring that it is + // not worth being viewed. + // + // The only other noteworthy change + // is that the constructor takes a + // value representing the polynomial + // degree of the mapping to be used + // later on, and that it has another + // member variable representing + // exactly this mapping. In general, + // this variable will occur in real + // applications at the same places + // where the finite element is + // declared or used. template class LaplaceProblem { @@ -114,462 +114,462 @@ namespace Step11 - // Construct such an object, by - // initializing the variables. Here, - // we use linear finite elements (the - // argument to the fe variable - // denotes the polynomial degree), - // and mappings of given order. Print - // to screen what we are about to do. + // Construct such an object, by + // initializing the variables. Here, + // we use linear finite elements (the + // argument to the fe variable + // denotes the polynomial degree), + // and mappings of given order. Print + // to screen what we are about to do. template LaplaceProblem::LaplaceProblem (const unsigned int mapping_degree) : - fe (1), - dof_handler (triangulation), - mapping (mapping_degree) + fe (1), + dof_handler (triangulation), + mapping (mapping_degree) { std::cout << "Using mapping with degree " << mapping_degree << ":" - << std::endl - << "============================" - << std::endl; + << std::endl + << "============================" + << std::endl; } - // The first task is to set up the - // variables for this problem. This - // includes generating a valid - // DoFHandler object, as well as - // the sparsity patterns for the - // matrix, and the object - // representing the constraints that - // the mean value of the degrees of - // freedom on the boundary be zero. + // The first task is to set up the + // variables for this problem. This + // includes generating a valid + // DoFHandler object, as well as + // the sparsity patterns for the + // matrix, and the object + // representing the constraints that + // the mean value of the degrees of + // freedom on the boundary be zero. template void LaplaceProblem::setup_system () { - // The first task is trivial: - // generate an enumeration of the - // degrees of freedom, and - // initialize solution and right - // hand side vector to their - // correct sizes: + // The first task is trivial: + // generate an enumeration of the + // degrees of freedom, and + // initialize solution and right + // hand side vector to their + // correct sizes: dof_handler.distribute_dofs (fe); solution.reinit (dof_handler.n_dofs()); system_rhs.reinit (dof_handler.n_dofs()); - // Next task is to construct the - // object representing the - // constraint that the mean value - // of the degrees of freedom on the - // boundary shall be zero. For - // this, we first want a list of - // those nodes which are actually - // at the boundary. The - // DoFTools class has a - // function that returns an array - // of boolean values where true - // indicates that the node is at - // the boundary. The second - // argument denotes a mask - // selecting which components of - // vector valued finite elements we - // want to be considered. Since we - // have a scalar finite element - // anyway, this mask consists of - // only one entry, and its value - // must be true. + // Next task is to construct the + // object representing the + // constraint that the mean value + // of the degrees of freedom on the + // boundary shall be zero. For + // this, we first want a list of + // those nodes which are actually + // at the boundary. The + // DoFTools class has a + // function that returns an array + // of boolean values where true + // indicates that the node is at + // the boundary. The second + // argument denotes a mask + // selecting which components of + // vector valued finite elements we + // want to be considered. Since we + // have a scalar finite element + // anyway, this mask consists of + // only one entry, and its value + // must be true. std::vector boundary_dofs (dof_handler.n_dofs(), false); DoFTools::extract_boundary_dofs (dof_handler, std::vector(1,true), - boundary_dofs); - - // Now first for the generation of - // the constraints: as mentioned in - // the introduction, we constrain - // one of the nodes on the boundary - // by the values of all other DoFs - // on the boundary. So, let us - // first pick out the first - // boundary node from this list. We - // do that by searching for the - // first true value in the - // array (note that std::find - // returns an iterator to this - // element), and computing its - // distance to the overall first - // element in the array to get its - // index: + boundary_dofs); + + // Now first for the generation of + // the constraints: as mentioned in + // the introduction, we constrain + // one of the nodes on the boundary + // by the values of all other DoFs + // on the boundary. So, let us + // first pick out the first + // boundary node from this list. We + // do that by searching for the + // first true value in the + // array (note that std::find + // returns an iterator to this + // element), and computing its + // distance to the overall first + // element in the array to get its + // index: const unsigned int first_boundary_dof = std::distance (boundary_dofs.begin(), - std::find (boundary_dofs.begin(), - boundary_dofs.end(), - true)); - - // Then generate a constraints - // object with just this one - // constraint. First clear all - // previous content (which might - // reside there from the previous - // computation on a once coarser - // grid), then add this one line - // constraining the - // first_boundary_dof to the - // sum of other boundary DoFs each - // with weight -1. Finally, close - // the constraints object, i.e. do - // some internal bookkeeping on it - // for faster processing of what is - // to come later: + std::find (boundary_dofs.begin(), + boundary_dofs.end(), + true)); + + // Then generate a constraints + // object with just this one + // constraint. First clear all + // previous content (which might + // reside there from the previous + // computation on a once coarser + // grid), then add this one line + // constraining the + // first_boundary_dof to the + // sum of other boundary DoFs each + // with weight -1. Finally, close + // the constraints object, i.e. do + // some internal bookkeeping on it + // for faster processing of what is + // to come later: mean_value_constraints.clear (); mean_value_constraints.add_line (first_boundary_dof); for (unsigned int i=first_boundary_dof+1; iDoFTools::make_sparsity_pattern - // and condense the result using - // the hanging node constraints. We - // have no hanging node constraints - // here (since we only refine - // globally in this example), but - // we have this global constraint - // on the boundary. This poses one - // severe problem in this context: - // the SparsityPattern class - // wants us to state beforehand the - // maximal number of entries per - // row, either for all rows or for - // each row separately. There are - // functions in the library which - // can tell you this number in case - // you just have hanging node - // constraints (namely - // DoFHandler::max_coupling_between_dofs), - // but how is this for the present - // case? The difficulty arises - // because the elimination of the - // constrained degree of freedom - // requires a number of additional - // entries in the matrix at places - // that are not so simple to - // determine. We would therefore - // have a problem had we to give a - // maximal number of entries per - // row here. - // - // Since this can be so difficult - // that no reasonable answer can be - // given that allows allocation of - // only a reasonable amount of - // memory, there is a class - // CompressedSparsityPattern, - // that can help us out here. It - // does not require that we know in - // advance how many entries rows - // could have, but allows just - // about any length. It is thus - // significantly more flexible in - // case you do not have good - // estimates of row lengths, - // however at the price that - // building up such a pattern is - // also significantly more - // expensive than building up a - // pattern for which you had - // information in - // advance. Nevertheless, as we - // have no other choice here, we'll - // just build such an object by - // initializing it with the - // dimensions of the matrix and - // calling another function - // DoFTools::make_sparsity_pattern - // to get the sparsity pattern due - // to the differential operator, - // then condense it with the - // constraints object which adds - // those positions in the sparsity - // pattern that are required for - // the elimination of the - // constraint. + // Next task is to generate a + // sparsity pattern. This is indeed + // a tricky task here. Usually, we + // just call + // DoFTools::make_sparsity_pattern + // and condense the result using + // the hanging node constraints. We + // have no hanging node constraints + // here (since we only refine + // globally in this example), but + // we have this global constraint + // on the boundary. This poses one + // severe problem in this context: + // the SparsityPattern class + // wants us to state beforehand the + // maximal number of entries per + // row, either for all rows or for + // each row separately. There are + // functions in the library which + // can tell you this number in case + // you just have hanging node + // constraints (namely + // DoFHandler::max_coupling_between_dofs), + // but how is this for the present + // case? The difficulty arises + // because the elimination of the + // constrained degree of freedom + // requires a number of additional + // entries in the matrix at places + // that are not so simple to + // determine. We would therefore + // have a problem had we to give a + // maximal number of entries per + // row here. + // + // Since this can be so difficult + // that no reasonable answer can be + // given that allows allocation of + // only a reasonable amount of + // memory, there is a class + // CompressedSparsityPattern, + // that can help us out here. It + // does not require that we know in + // advance how many entries rows + // could have, but allows just + // about any length. It is thus + // significantly more flexible in + // case you do not have good + // estimates of row lengths, + // however at the price that + // building up such a pattern is + // also significantly more + // expensive than building up a + // pattern for which you had + // information in + // advance. Nevertheless, as we + // have no other choice here, we'll + // just build such an object by + // initializing it with the + // dimensions of the matrix and + // calling another function + // DoFTools::make_sparsity_pattern + // to get the sparsity pattern due + // to the differential operator, + // then condense it with the + // constraints object which adds + // those positions in the sparsity + // pattern that are required for + // the elimination of the + // constraint. CompressedSparsityPattern csp (dof_handler.n_dofs(), - dof_handler.n_dofs()); + dof_handler.n_dofs()); DoFTools::make_sparsity_pattern (dof_handler, csp); mean_value_constraints.condense (csp); - // Finally, once we have the full - // pattern, we can initialize an - // object of type - // SparsityPattern from it and - // in turn initialize the matrix - // with it. Note that this is - // actually necessary, since the - // CompressedSparsityPattern is - // so inefficient compared to the - // SparsityPattern class due to - // the more flexible data - // structures it has to use, that - // we can impossibly base the - // sparse matrix class on it, but - // rather need an object of type - // SparsityPattern, which we - // generate by copying from the - // intermediate object. - // - // As a further sidenote, you will - // notice that we do not explicitly - // have to compress the - // sparsity pattern here. This, of - // course, is due to the fact that - // the copy_from function - // generates a compressed object - // right from the start, to which - // you cannot add new entries - // anymore. The compress call - // is therefore implicit in the - // copy_from call. + // Finally, once we have the full + // pattern, we can initialize an + // object of type + // SparsityPattern from it and + // in turn initialize the matrix + // with it. Note that this is + // actually necessary, since the + // CompressedSparsityPattern is + // so inefficient compared to the + // SparsityPattern class due to + // the more flexible data + // structures it has to use, that + // we can impossibly base the + // sparse matrix class on it, but + // rather need an object of type + // SparsityPattern, which we + // generate by copying from the + // intermediate object. + // + // As a further sidenote, you will + // notice that we do not explicitly + // have to compress the + // sparsity pattern here. This, of + // course, is due to the fact that + // the copy_from function + // generates a compressed object + // right from the start, to which + // you cannot add new entries + // anymore. The compress call + // is therefore implicit in the + // copy_from call. sparsity_pattern.copy_from (csp); system_matrix.reinit (sparsity_pattern); } - // The next function then assembles - // the linear system of equations, - // solves it, and evaluates the - // solution. This then makes three - // actions, and we will put them into - // eight true statements (excluding - // declaration of variables, and - // handling of temporary - // vectors). Thus, this function is - // something for the very - // lazy. Nevertheless, the functions - // called are rather powerful, and - // through them this function uses a - // good deal of the whole - // library. But let's look at each of - // the steps. + // The next function then assembles + // the linear system of equations, + // solves it, and evaluates the + // solution. This then makes three + // actions, and we will put them into + // eight true statements (excluding + // declaration of variables, and + // handling of temporary + // vectors). Thus, this function is + // something for the very + // lazy. Nevertheless, the functions + // called are rather powerful, and + // through them this function uses a + // good deal of the whole + // library. But let's look at each of + // the steps. template void LaplaceProblem::assemble_and_solve () { - // First, we have to assemble the - // matrix and the right hand - // side. In all previous examples, - // we have investigated various - // ways how to do this - // manually. However, since the - // Laplace matrix and simple right - // hand sides appear so frequently - // in applications, the library - // provides functions for actually - // doing this for you, i.e. they - // perform the loop over all cells, - // setting up the local matrices - // and vectors, and putting them - // together for the end result. - // - // The following are the two most - // commonly used ones: creation of - // the Laplace matrix and creation - // of a right hand side vector from - // body or boundary forces. They - // take the mapping object, the - // DoFHandler object - // representing the degrees of - // freedom and the finite element - // in use, a quadrature formula to - // be used, and the output - // object. The function that - // creates a right hand side vector - // also has to take a function - // object describing the - // (continuous) right hand side - // function. - // - // Let us look at the way the - // matrix and body forces are - // integrated: + // First, we have to assemble the + // matrix and the right hand + // side. In all previous examples, + // we have investigated various + // ways how to do this + // manually. However, since the + // Laplace matrix and simple right + // hand sides appear so frequently + // in applications, the library + // provides functions for actually + // doing this for you, i.e. they + // perform the loop over all cells, + // setting up the local matrices + // and vectors, and putting them + // together for the end result. + // + // The following are the two most + // commonly used ones: creation of + // the Laplace matrix and creation + // of a right hand side vector from + // body or boundary forces. They + // take the mapping object, the + // DoFHandler object + // representing the degrees of + // freedom and the finite element + // in use, a quadrature formula to + // be used, and the output + // object. The function that + // creates a right hand side vector + // also has to take a function + // object describing the + // (continuous) right hand side + // function. + // + // Let us look at the way the + // matrix and body forces are + // integrated: const unsigned int gauss_degree = std::max (static_cast(std::ceil(1.*(mapping.get_degree()+1)/2)), - 2U); + 2U); MatrixTools::create_laplace_matrix (mapping, dof_handler, - QGauss(gauss_degree), - system_matrix); + QGauss(gauss_degree), + system_matrix); VectorTools::create_right_hand_side (mapping, dof_handler, - QGauss(gauss_degree), - ConstantFunction(-2), - system_rhs); - // That's quite simple, right? - // - // Two remarks are in order, - // though: First, these functions - // are used in a lot of - // contexts. Maybe you want to - // create a Laplace or mass matrix - // for a vector values finite - // element; or you want to use the - // default Q1 mapping; or you want - // to assembled the matrix with a - // coefficient in the Laplace - // operator. For this reason, there - // are quite a large number of - // variants of these functions in - // the MatrixCreator and - // MatrixTools - // classes. Whenever you need a - // slightly different version of - // these functions than the ones - // called above, it is certainly - // worthwhile to take a look at the - // documentation and to check - // whether something fits your - // needs. - // - // The second remark concerns the - // quadrature formula we use: we - // want to integrate over bilinear - // shape functions, so we know that - // we have to use at least a Gauss2 - // quadrature formula. On the other - // hand, we want to have the - // quadrature rule to have at least - // the order of the boundary - // approximation. Since the order - // of Gauss-r is 2r, and the order - // of the boundary approximation - // using polynomials of degree p is - // p+1, we know that 2r@>=p+1. Since - // r has to be an integer and (as - // mentioned above) has to be at - // least 2, this makes up for the - // formula above computing - // gauss_degree. - // - // Since the generation of the body - // force contributions to the right - // hand side vector was so simple, - // we do that all over again for - // the boundary forces as well: - // allocate a vector of the right - // size and call the right - // function. The boundary function - // has constant values, so we can - // generate an object from the - // library on the fly, and we use - // the same quadrature formula as - // above, but this time of lower - // dimension since we integrate - // over faces now instead of cells: + QGauss(gauss_degree), + ConstantFunction(-2), + system_rhs); + // That's quite simple, right? + // + // Two remarks are in order, + // though: First, these functions + // are used in a lot of + // contexts. Maybe you want to + // create a Laplace or mass matrix + // for a vector values finite + // element; or you want to use the + // default Q1 mapping; or you want + // to assembled the matrix with a + // coefficient in the Laplace + // operator. For this reason, there + // are quite a large number of + // variants of these functions in + // the MatrixCreator and + // MatrixTools + // classes. Whenever you need a + // slightly different version of + // these functions than the ones + // called above, it is certainly + // worthwhile to take a look at the + // documentation and to check + // whether something fits your + // needs. + // + // The second remark concerns the + // quadrature formula we use: we + // want to integrate over bilinear + // shape functions, so we know that + // we have to use at least a Gauss2 + // quadrature formula. On the other + // hand, we want to have the + // quadrature rule to have at least + // the order of the boundary + // approximation. Since the order + // of Gauss-r is 2r, and the order + // of the boundary approximation + // using polynomials of degree p is + // p+1, we know that 2r@>=p+1. Since + // r has to be an integer and (as + // mentioned above) has to be at + // least 2, this makes up for the + // formula above computing + // gauss_degree. + // + // Since the generation of the body + // force contributions to the right + // hand side vector was so simple, + // we do that all over again for + // the boundary forces as well: + // allocate a vector of the right + // size and call the right + // function. The boundary function + // has constant values, so we can + // generate an object from the + // library on the fly, and we use + // the same quadrature formula as + // above, but this time of lower + // dimension since we integrate + // over faces now instead of cells: Vector tmp (system_rhs.size()); VectorTools::create_boundary_right_hand_side (mapping, dof_handler, - QGauss(gauss_degree), - ConstantFunction(1), - tmp); - // Then add the contributions from - // the boundary to those from the - // interior of the domain: + QGauss(gauss_degree), + ConstantFunction(1), + tmp); + // Then add the contributions from + // the boundary to those from the + // interior of the domain: system_rhs += tmp; - // For assembling the right hand - // side, we had to use two - // different vector objects, and - // later add them together. The - // reason we had to do so is that - // the - // VectorTools::create_right_hand_side - // and - // VectorTools::create_boundary_right_hand_side - // functions first clear the output - // vector, rather than adding up - // their results to previous - // contents. This can reasonably be - // called a design flaw in the - // library made in its infancy, but - // unfortunately things are as they - // are for some time now and it is - // difficult to change such things - // that silently break existing - // code, so we have to live with - // that. - - // Now, the linear system is set - // up, so we can eliminate the one - // degree of freedom which we - // constrained to the other DoFs on - // the boundary for the mean value - // constraint from matrix and right - // hand side vector, and solve the - // system. After that, distribute - // the constraints again, which in - // this case means setting the - // constrained degree of freedom to - // its proper value + // For assembling the right hand + // side, we had to use two + // different vector objects, and + // later add them together. The + // reason we had to do so is that + // the + // VectorTools::create_right_hand_side + // and + // VectorTools::create_boundary_right_hand_side + // functions first clear the output + // vector, rather than adding up + // their results to previous + // contents. This can reasonably be + // called a design flaw in the + // library made in its infancy, but + // unfortunately things are as they + // are for some time now and it is + // difficult to change such things + // that silently break existing + // code, so we have to live with + // that. + + // Now, the linear system is set + // up, so we can eliminate the one + // degree of freedom which we + // constrained to the other DoFs on + // the boundary for the mean value + // constraint from matrix and right + // hand side vector, and solve the + // system. After that, distribute + // the constraints again, which in + // this case means setting the + // constrained degree of freedom to + // its proper value mean_value_constraints.condense (system_matrix); mean_value_constraints.condense (system_rhs); solve (); mean_value_constraints.distribute (solution); - // Finally, evaluate what we got as - // solution. As stated in the - // introduction, we are interested - // in the H1 semi-norm of the - // solution. Here, as well, we have - // a function in the library that - // does this, although in a - // slightly non-obvious way: the - // VectorTools::integrate_difference - // function integrates the norm of - // the difference between a finite - // element function and a - // continuous function. If we - // therefore want the norm of a - // finite element field, we just - // put the continuous function to - // zero. Note that this function, - // just as so many other ones in - // the library as well, has at - // least two versions, one which - // takes a mapping as argument - // (which we make us of here), and - // the one which we have used in - // previous examples which - // implicitly uses MappingQ1. - // Also note that we take a - // quadrature formula of one degree - // higher, in order to avoid - // superconvergence effects where - // the solution happens to be - // especially close to the exact - // solution at certain points (we - // don't know whether this might be - // the case here, but there are - // cases known of this, and we just - // want to make sure): + // Finally, evaluate what we got as + // solution. As stated in the + // introduction, we are interested + // in the H1 semi-norm of the + // solution. Here, as well, we have + // a function in the library that + // does this, although in a + // slightly non-obvious way: the + // VectorTools::integrate_difference + // function integrates the norm of + // the difference between a finite + // element function and a + // continuous function. If we + // therefore want the norm of a + // finite element field, we just + // put the continuous function to + // zero. Note that this function, + // just as so many other ones in + // the library as well, has at + // least two versions, one which + // takes a mapping as argument + // (which we make us of here), and + // the one which we have used in + // previous examples which + // implicitly uses MappingQ1. + // Also note that we take a + // quadrature formula of one degree + // higher, in order to avoid + // superconvergence effects where + // the solution happens to be + // especially close to the exact + // solution at certain points (we + // don't know whether this might be + // the case here, but there are + // cases known of this, and we just + // want to make sure): Vector norm_per_cell (triangulation.n_active_cells()); VectorTools::integrate_difference (mapping, dof_handler, - solution, - ZeroFunction(), - norm_per_cell, - QGauss(gauss_degree+1), - VectorTools::H1_seminorm); - // Then, the function just called - // returns its results as a vector - // of values each of which denotes - // the norm on one cell. To get the - // global norm, a simple - // computation shows that we have - // to take the l2 norm of the - // vector: + solution, + ZeroFunction(), + norm_per_cell, + QGauss(gauss_degree+1), + VectorTools::H1_seminorm); + // Then, the function just called + // returns its results as a vector + // of values each of which denotes + // the norm on one cell. To get the + // global norm, a simple + // computation shows that we have + // to take the l2 norm of the + // vector: const double norm = norm_per_cell.l2_norm(); - // Last task -- generate output: + // Last task -- generate output: output_table.add_value ("cells", triangulation.n_active_cells()); output_table.add_value ("|u|_1", norm); output_table.add_value ("error", std::fabs(norm-std::sqrt(3.14159265358/2))); @@ -577,10 +577,10 @@ namespace Step11 - // The following function solving the - // linear system of equations is - // copied from step-5 and is - // explained there in some detail: + // The following function solving the + // linear system of equations is + // copied from step-5 and is + // explained there in some detail: template void LaplaceProblem::solve () { @@ -591,39 +591,39 @@ namespace Step11 preconditioner.initialize(system_matrix, 1.2); cg.solve (system_matrix, solution, system_rhs, - preconditioner); + preconditioner); } - // Finally the main function - // controlling the different steps to - // be performed. Its content is - // rather straightforward, generating - // a triangulation of a circle, - // associating a boundary to it, and - // then doing several cycles on - // subsequently finer grids. Note - // again that we have put mesh - // refinement into the loop header; - // this may be something for a test - // program, but for real applications - // you should consider that this - // implies that the mesh is refined - // after the loop is executed the - // last time since the increment - // clause (the last part of the - // three-parted loop header) is - // executed before the comparison - // part (the second one), which may - // be rather costly if the mesh is - // already quite refined. In that - // case, you should arrange code such - // that the mesh is not further - // refined after the last loop run - // (or you should do it at the - // beginning of each run except for - // the first one). + // Finally the main function + // controlling the different steps to + // be performed. Its content is + // rather straightforward, generating + // a triangulation of a circle, + // associating a boundary to it, and + // then doing several cycles on + // subsequently finer grids. Note + // again that we have put mesh + // refinement into the loop header; + // this may be something for a test + // program, but for real applications + // you should consider that this + // implies that the mesh is refined + // after the loop is executed the + // last time since the increment + // clause (the last part of the + // three-parted loop header) is + // executed before the comparison + // part (the second one), which may + // be rather costly if the mesh is + // already quite refined. In that + // case, you should arrange code such + // that the mesh is not further + // refined after the last loop run + // (or you should do it at the + // beginning of each run except for + // the first one). template void LaplaceProblem::run () { @@ -633,13 +633,13 @@ namespace Step11 for (unsigned int cycle=0; cycle<6; ++cycle, triangulation.refine_global(1)) { - setup_system (); - assemble_and_solve (); + setup_system (); + assemble_and_solve (); }; - // After all the data is generated, - // write a table of results to the - // screen: + // After all the data is generated, + // write a table of results to the + // screen: output_table.set_precision("|u|_1", 6); output_table.set_precision("error", 6); output_table.write_text (std::cout); @@ -649,11 +649,11 @@ namespace Step11 - // Finally the main function. It's - // structure is the same as that used - // in several of the previous - // examples, so probably needs no - // more explanation. + // Finally the main function. It's + // structure is the same as that used + // in several of the previous + // examples, so probably needs no + // more explanation. int main () { try @@ -661,42 +661,42 @@ int main () dealii::deallog.depth_console (0); std::cout.precision(5); - // This is the main loop, doing - // the computations with - // mappings of linear through - // cubic mappings. Note that - // since we need the object of - // type LaplaceProblem@<2@> - // only once, we do not even - // name it, but create an - // unnamed such object and call - // the run function of it, - // subsequent to which it is - // immediately destroyed again. + // This is the main loop, doing + // the computations with + // mappings of linear through + // cubic mappings. Note that + // since we need the object of + // type LaplaceProblem@<2@> + // only once, we do not even + // name it, but create an + // unnamed such object and call + // the run function of it, + // subsequent to which it is + // immediately destroyed again. for (unsigned int mapping_degree=1; mapping_degree<=3; ++mapping_degree) - Step11::LaplaceProblem<2>(mapping_degree).run (); + Step11::LaplaceProblem<2>(mapping_degree).run (); } catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; }; diff --git a/deal.II/examples/step-12/step-12.cc b/deal.II/examples/step-12/step-12.cc index 354340adf8..b00be0c0a6 100644 --- a/deal.II/examples/step-12/step-12.cc +++ b/deal.II/examples/step-12/step-12.cc @@ -9,10 +9,10 @@ /* to the file deal.II/doc/license.html for the text and */ /* further information on this license. */ - // The first few files have already - // been covered in previous examples - // and will thus not be further - // commented on: + // The first few files have already + // been covered in previous examples + // and will thus not be further + // commented on: #include #include #include @@ -30,66 +30,66 @@ #include #include #include - // Here the discontinuous finite elements are - // defined. They are used in the same way as - // all other finite elements, though -- as - // you have seen in previous tutorial - // programs -- there isn't much user - // interaction with finite element classes at - // all: the are passed to - // DoFHandler and - // FEValues objects, and that is - // about it. + // Here the discontinuous finite elements are + // defined. They are used in the same way as + // all other finite elements, though -- as + // you have seen in previous tutorial + // programs -- there isn't much user + // interaction with finite element classes at + // all: the are passed to + // DoFHandler and + // FEValues objects, and that is + // about it. #include - // We are going to use the simplest - // possible solver, called Richardson - // iteration, that represents a - // simple defect correction. This, in - // combination with a block SSOR - // preconditioner (defined in - // precondition_block.h), that uses - // the special block matrix structure - // of system matrices arising from DG - // discretizations. + // We are going to use the simplest + // possible solver, called Richardson + // iteration, that represents a + // simple defect correction. This, in + // combination with a block SSOR + // preconditioner (defined in + // precondition_block.h), that uses + // the special block matrix structure + // of system matrices arising from DG + // discretizations. #include #include - // We are going to use gradients as - // refinement indicator. + // We are going to use gradients as + // refinement indicator. #include - // Here come the new include files - // for using the MeshWorker - // framework. The first contains the - // class - // MeshWorker::DoFInfo, - // which provides local integrators - // with a mapping between local and - // global degrees of freedom. It - // stores the results of local - // integrals as well in its base - // class Meshworker::LocalResults. - // In the second of these files, we - // find an object of type - // MeshWorker::IntegrationInfo, which - // is mostly a wrapper around a group - // of FEValues objects. The file - // meshworker/simple.h - // contains classes assembling - // locally integrated data into a - // global system containing only a - // single matrix. Finally, we will - // need the file that runs the loop - // over all mesh cells and faces. + // Here come the new include files + // for using the MeshWorker + // framework. The first contains the + // class + // MeshWorker::DoFInfo, + // which provides local integrators + // with a mapping between local and + // global degrees of freedom. It + // stores the results of local + // integrals as well in its base + // class Meshworker::LocalResults. + // In the second of these files, we + // find an object of type + // MeshWorker::IntegrationInfo, which + // is mostly a wrapper around a group + // of FEValues objects. The file + // meshworker/simple.h + // contains classes assembling + // locally integrated data into a + // global system containing only a + // single matrix. Finally, we will + // need the file that runs the loop + // over all mesh cells and faces. #include #include #include #include - // Like in all programs, we finish - // this section by including the - // needed C++ headers and declaring - // we want to use objects in the - // dealii namespace without prefix. + // Like in all programs, we finish + // this section by including the + // needed C++ headers and declaring + // we want to use objects in the + // dealii namespace without prefix. #include #include @@ -98,66 +98,66 @@ namespace Step12 { using namespace dealii; - // @sect3{Equation data} - // - // First, we define a class - // describing the inhomogeneous - // boundary data. Since only its - // values are used, we implement - // value_list(), but leave all other - // functions of Function undefined. + // @sect3{Equation data} + // + // First, we define a class + // describing the inhomogeneous + // boundary data. Since only its + // values are used, we implement + // value_list(), but leave all other + // functions of Function undefined. template class BoundaryValues: public Function { public: BoundaryValues () {}; virtual void value_list (const std::vector > &points, - std::vector &values, - const unsigned int component=0) const; + std::vector &values, + const unsigned int component=0) const; }; - // Given the flow direction, the inflow - // boundary of the unit square $[0,1]^2$ are - // the right and the lower boundaries. We - // prescribe discontinuous boundary values 1 - // and 0 on the x-axis and value 0 on the - // right boundary. The values of this - // function on the outflow boundaries will - // not be used within the DG scheme. + // Given the flow direction, the inflow + // boundary of the unit square $[0,1]^2$ are + // the right and the lower boundaries. We + // prescribe discontinuous boundary values 1 + // and 0 on the x-axis and value 0 on the + // right boundary. The values of this + // function on the outflow boundaries will + // not be used within the DG scheme. template void BoundaryValues::value_list(const std::vector > &points, - std::vector &values, - const unsigned int) const + std::vector &values, + const unsigned int) const { Assert(values.size()==points.size(), - ExcDimensionMismatch(values.size(),points.size())); + ExcDimensionMismatch(values.size(),points.size())); for (unsigned int i=0; i class AdvectionProblem { @@ -175,247 +175,247 @@ namespace Step12 Triangulation triangulation; const MappingQ1 mapping; - // Furthermore we want to use DG - // elements of degree 1 (but this - // is only specified in the - // constructor). If you want to - // use a DG method of a different - // degree the whole program stays - // the same, only replace 1 in - // the constructor by the desired - // polynomial degree. + // Furthermore we want to use DG + // elements of degree 1 (but this + // is only specified in the + // constructor). If you want to + // use a DG method of a different + // degree the whole program stays + // the same, only replace 1 in + // the constructor by the desired + // polynomial degree. FE_DGQ fe; DoFHandler dof_handler; - // The next four members represent the - // linear system to be - // solved. system_matrix and - // right_hand_side are - // generated by - // assemble_system(), the - // solution is computed in - // solve(). The - // sparsity_pattern is used - // to determine the location of nonzero - // elements in - // system_matrix. + // The next four members represent the + // linear system to be + // solved. system_matrix and + // right_hand_side are + // generated by + // assemble_system(), the + // solution is computed in + // solve(). The + // sparsity_pattern is used + // to determine the location of nonzero + // elements in + // system_matrix. SparsityPattern sparsity_pattern; SparseMatrix system_matrix; Vector solution; Vector right_hand_side; - // Finally, we have to provide - // functions that assemble the - // cell, boundary, and inner face - // terms. Within the MeshWorker - // framework, the loop over all - // cells and much of the setup of - // operations will be done - // outside this class, so all we - // have to provide are these - // three operations. They will - // then work on intermediate - // objects for which first, we - // here define typedefs to the - // info objects handed to the - // local integration functions in - // order to make our life easier - // below. + // Finally, we have to provide + // functions that assemble the + // cell, boundary, and inner face + // terms. Within the MeshWorker + // framework, the loop over all + // cells and much of the setup of + // operations will be done + // outside this class, so all we + // have to provide are these + // three operations. They will + // then work on intermediate + // objects for which first, we + // here define typedefs to the + // info objects handed to the + // local integration functions in + // order to make our life easier + // below. typedef MeshWorker::DoFInfo DoFInfo; typedef MeshWorker::IntegrationInfo CellInfo; - // The following three functions - // are then the ones that get called - // inside the generic loop over all - // cells and faces. They are the - // ones doing the actual - // integration. - // - // In our code below, these - // functions do not access member - // variables of the current - // class, so we can mark them as - // static and simply - // pass pointers to these - // functions to the MeshWorker - // framework. If, however, these - // functions would want to access - // member variables (or needed - // additional arguments beyond - // the ones specified below), we - // could use the facilities of - // boost::bind (or std::bind, - // respectively) to provide the - // MeshWorker framework with - // objects that act as if they - // had the required number and - // types of arguments, but have - // in fact other arguments - // already bound. + // The following three functions + // are then the ones that get called + // inside the generic loop over all + // cells and faces. They are the + // ones doing the actual + // integration. + // + // In our code below, these + // functions do not access member + // variables of the current + // class, so we can mark them as + // static and simply + // pass pointers to these + // functions to the MeshWorker + // framework. If, however, these + // functions would want to access + // member variables (or needed + // additional arguments beyond + // the ones specified below), we + // could use the facilities of + // boost::bind (or std::bind, + // respectively) to provide the + // MeshWorker framework with + // objects that act as if they + // had the required number and + // types of arguments, but have + // in fact other arguments + // already bound. static void integrate_cell_term (DoFInfo& dinfo, - CellInfo& info); + CellInfo& info); static void integrate_boundary_term (DoFInfo& dinfo, - CellInfo& info); + CellInfo& info); static void integrate_face_term (DoFInfo& dinfo1, - DoFInfo& dinfo2, - CellInfo& info1, - CellInfo& info2); + DoFInfo& dinfo2, + CellInfo& info1, + CellInfo& info2); }; - // We start with the constructor. The 1 in - // the constructor call of fe is - // the polynomial degree. + // We start with the constructor. The 1 in + // the constructor call of fe is + // the polynomial degree. template AdvectionProblem::AdvectionProblem () - : - mapping (), - fe (1), - dof_handler (triangulation) + : + mapping (), + fe (1), + dof_handler (triangulation) {} template void AdvectionProblem::setup_system () { - // In the function that sets up the usual - // finite element data structures, we first - // need to distribute the DoFs. + // In the function that sets up the usual + // finite element data structures, we first + // need to distribute the DoFs. dof_handler.distribute_dofs (fe); - // We start by generating the sparsity - // pattern. To this end, we first fill an - // intermediate object of type - // CompressedSparsityPattern with the - // couplings appearing in the system. After - // building the pattern, this object is - // copied to sparsity_pattern - // and can be discarded. - - // To build the sparsity pattern for DG - // discretizations, we can call the - // function analogue to - // DoFTools::make_sparsity_pattern, which - // is called - // DoFTools::make_flux_sparsity_pattern: + // We start by generating the sparsity + // pattern. To this end, we first fill an + // intermediate object of type + // CompressedSparsityPattern with the + // couplings appearing in the system. After + // building the pattern, this object is + // copied to sparsity_pattern + // and can be discarded. + + // To build the sparsity pattern for DG + // discretizations, we can call the + // function analogue to + // DoFTools::make_sparsity_pattern, which + // is called + // DoFTools::make_flux_sparsity_pattern: CompressedSparsityPattern c_sparsity(dof_handler.n_dofs()); DoFTools::make_flux_sparsity_pattern (dof_handler, c_sparsity); sparsity_pattern.copy_from(c_sparsity); - // Finally, we set up the structure - // of all components of the linear system. + // Finally, we set up the structure + // of all components of the linear system. system_matrix.reinit (sparsity_pattern); solution.reinit (dof_handler.n_dofs()); right_hand_side.reinit (dof_handler.n_dofs()); } - // @sect4{The assemble_system function} + // @sect4{The assemble_system function} - // Here we see the major difference to - // assembling by hand. Instead of writing - // loops over cells and faces, we leave all - // this to the MeshWorker framework. In order - // to do so, we just have to define local - // integration functions and use one of the - // classes in namespace MeshWorker::Assembler - // to build the global system. + // Here we see the major difference to + // assembling by hand. Instead of writing + // loops over cells and faces, we leave all + // this to the MeshWorker framework. In order + // to do so, we just have to define local + // integration functions and use one of the + // classes in namespace MeshWorker::Assembler + // to build the global system. template void AdvectionProblem::assemble_system () { - // This is the magic object, which - // knows everything about the data - // structures and local - // integration. This is the object - // doing the work in the function - // MeshWorker::loop(), which is - // implicitly called by - // MeshWorker::integration_loop() - // below. After the functions to - // which we provide pointers did - // the local integration, the - // MeshWorker::Assembler::SystemSimple - // object distributes these into - // the global sparse matrix and the - // right hand side vector. + // This is the magic object, which + // knows everything about the data + // structures and local + // integration. This is the object + // doing the work in the function + // MeshWorker::loop(), which is + // implicitly called by + // MeshWorker::integration_loop() + // below. After the functions to + // which we provide pointers did + // the local integration, the + // MeshWorker::Assembler::SystemSimple + // object distributes these into + // the global sparse matrix and the + // right hand side vector. MeshWorker::IntegrationInfoBox info_box; - // First, we initialize the - // quadrature formulae and the - // update flags in the worker base - // class. For quadrature, we play - // safe and use a QGauss formula - // with number of points one higher - // than the polynomial degree - // used. Since the quadratures for - // cells, boundary and interior - // faces can be selected - // independently, we have to hand - // over this value three times. + // First, we initialize the + // quadrature formulae and the + // update flags in the worker base + // class. For quadrature, we play + // safe and use a QGauss formula + // with number of points one higher + // than the polynomial degree + // used. Since the quadratures for + // cells, boundary and interior + // faces can be selected + // independently, we have to hand + // over this value three times. const unsigned int n_gauss_points = dof_handler.get_fe().degree+1; info_box.initialize_gauss_quadrature(n_gauss_points, - n_gauss_points, - n_gauss_points); - - // These are the types of values we - // need for integrating our - // system. They are added to the - // flags used on cells, boundary - // and interior faces, as well as - // interior neighbor faces, which is - // forced by the four @p true - // values. + n_gauss_points, + n_gauss_points); + + // These are the types of values we + // need for integrating our + // system. They are added to the + // flags used on cells, boundary + // and interior faces, as well as + // interior neighbor faces, which is + // forced by the four @p true + // values. info_box.initialize_update_flags(); UpdateFlags update_flags = update_quadrature_points | - update_values | - update_gradients; + update_values | + update_gradients; info_box.add_update_flags(update_flags, true, true, true, true); - // After preparing all data in - // info_box, we initialize - // the FEValus objects in there. + // After preparing all data in + // info_box, we initialize + // the FEValus objects in there. info_box.initialize(fe, mapping); - // The object created so far helps - // us do the local integration on - // each cell and face. Now, we need - // an object which receives the - // integrated (local) data and - // forwards them to the assembler. + // The object created so far helps + // us do the local integration on + // each cell and face. Now, we need + // an object which receives the + // integrated (local) data and + // forwards them to the assembler. MeshWorker::DoFInfo dof_info(dof_handler); - // Now, we have to create the - // assembler object and tell it, - // where to put the local - // data. These will be our system - // matrix and the right hand side. + // Now, we have to create the + // assembler object and tell it, + // where to put the local + // data. These will be our system + // matrix and the right hand side. MeshWorker::Assembler::SystemSimple, Vector > assembler; assembler.initialize(system_matrix, right_hand_side); - // Finally, the integration loop - // over all active cells - // (determined by the first - // argument, which is an active - // iterator). - // - // As noted in the discussion when - // declaring the local integration - // functions in the class - // declaration, the arguments - // expected by the assembling - // integrator class are not - // actually function - // pointers. Rather, they are - // objects that can be called like - // functions with a certain number - // of arguments. Consequently, we - // could also pass objects with - // appropriate operator() - // implementations here, or the - // result of std::bind if the local - // integrators were, for example, - // non-static member functions. + // Finally, the integration loop + // over all active cells + // (determined by the first + // argument, which is an active + // iterator). + // + // As noted in the discussion when + // declaring the local integration + // functions in the class + // declaration, the arguments + // expected by the assembling + // integrator class are not + // actually function + // pointers. Rather, they are + // objects that can be called like + // functions with a certain number + // of arguments. Consequently, we + // could also pass objects with + // appropriate operator() + // implementations here, or the + // result of std::bind if the local + // integrators were, for example, + // non-static member functions. MeshWorker::integration_loop (dof_handler.begin_active(), dof_handler.end(), dof_info, info_box, @@ -426,64 +426,64 @@ namespace Step12 } - // @sect4{The local integrators} + // @sect4{The local integrators} - // These are the functions given to - // the MeshWorker::integration_loop() - // called just above. They compute - // the local contributions to the - // system matrix and right hand side - // on cells and faces. + // These are the functions given to + // the MeshWorker::integration_loop() + // called just above. They compute + // the local contributions to the + // system matrix and right hand side + // on cells and faces. template void AdvectionProblem::integrate_cell_term (DoFInfo& dinfo, - CellInfo& info) + CellInfo& info) { - // First, let us retrieve some of - // the objects used here from - // @p info. Note that these objects - // can handle much more complex - // structures, thus the access here - // looks more complicated than - // might seem necessary. + // First, let us retrieve some of + // the objects used here from + // @p info. Note that these objects + // can handle much more complex + // structures, thus the access here + // looks more complicated than + // might seem necessary. const FEValuesBase& fe_v = info.fe_values(); FullMatrix& local_matrix = dinfo.matrix(0).matrix; const std::vector &JxW = fe_v.get_JxW_values (); - // With these objects, we continue - // local integration like - // always. First, we loop over the - // quadrature points and compute - // the advection vector in the - // current point. + // With these objects, we continue + // local integration like + // always. First, we loop over the + // quadrature points and compute + // the advection vector in the + // current point. for (unsigned int point=0; point beta; - beta(0) = -fe_v.quadrature_point(point)(1); - beta(1) = fe_v.quadrature_point(point)(0); - beta /= beta.norm(); - - // We solve a homogeneous - // equation, thus no right - // hand side shows up in - // the cell term. - // What's left is - // integrating the matrix entries. - for (unsigned int i=0; i beta; + beta(0) = -fe_v.quadrature_point(point)(1); + beta(1) = fe_v.quadrature_point(point)(0); + beta /= beta.norm(); + + // We solve a homogeneous + // equation, thus no right + // hand side shows up in + // the cell term. + // What's left is + // integrating the matrix entries. + for (unsigned int i=0; i void AdvectionProblem::integrate_boundary_term (DoFInfo& dinfo, - CellInfo& info) + CellInfo& info) { const FEValuesBase& fe_v = info.fe_values(); FullMatrix& local_matrix = dinfo.matrix(0).matrix; @@ -499,263 +499,263 @@ namespace Step12 for (unsigned int point=0; point beta; - beta(0) = -fe_v.quadrature_point(point)(1); - beta(1) = fe_v.quadrature_point(point)(0); - beta /= beta.norm(); - - const double beta_n=beta * normals[point]; - if (beta_n>0) - for (unsigned int i=0; i beta; + beta(0) = -fe_v.quadrature_point(point)(1); + beta(1) = fe_v.quadrature_point(point)(0); + beta /= beta.norm(); + + const double beta_n=beta * normals[point]; + if (beta_n>0) + for (unsigned int i=0; i void AdvectionProblem::integrate_face_term (DoFInfo& dinfo1, - DoFInfo& dinfo2, - CellInfo& info1, - CellInfo& info2) + DoFInfo& dinfo2, + CellInfo& info1, + CellInfo& info2) { - // For quadrature points, weights, - // etc., we use the - // FEValuesBase object of the - // first argument. + // For quadrature points, weights, + // etc., we use the + // FEValuesBase object of the + // first argument. const FEValuesBase& fe_v = info1.fe_values(); - // For additional shape functions, - // we have to ask the neighbors - // FEValuesBase. + // For additional shape functions, + // we have to ask the neighbors + // FEValuesBase. const FEValuesBase& fe_v_neighbor = info2.fe_values(); - // Then we get references to the - // four local matrices. The letters - // u and v refer to trial and test - // functions, respectively. The - // %numbers indicate the cells - // provided by info1 and info2. By - // convention, the two matrices in - // each info object refer to the - // test functions on the respective - // cell. The first matrix contains the - // interior couplings of that cell, - // while the second contains the - // couplings between cells. + // Then we get references to the + // four local matrices. The letters + // u and v refer to trial and test + // functions, respectively. The + // %numbers indicate the cells + // provided by info1 and info2. By + // convention, the two matrices in + // each info object refer to the + // test functions on the respective + // cell. The first matrix contains the + // interior couplings of that cell, + // while the second contains the + // couplings between cells. FullMatrix& u1_v1_matrix = dinfo1.matrix(0,false).matrix; FullMatrix& u2_v1_matrix = dinfo1.matrix(0,true).matrix; FullMatrix& u1_v2_matrix = dinfo2.matrix(0,true).matrix; FullMatrix& u2_v2_matrix = dinfo2.matrix(0,false).matrix; - // Here, following the previous - // functions, we would have the - // local right hand side - // vectors. Fortunately, the - // interface terms only involve the - // solution and the right hand side - // does not receive any contributions. + // Here, following the previous + // functions, we would have the + // local right hand side + // vectors. Fortunately, the + // interface terms only involve the + // solution and the right hand side + // does not receive any contributions. const std::vector &JxW = fe_v.get_JxW_values (); const std::vector > &normals = fe_v.get_normal_vectors (); for (unsigned int point=0; point beta; - beta(0) = -fe_v.quadrature_point(point)(1); - beta(1) = fe_v.quadrature_point(point)(0); - beta /= beta.norm(); - - const double beta_n=beta * normals[point]; - if (beta_n>0) - { - // This term we've already - // seen: - for (unsigned int i=0; i beta; + beta(0) = -fe_v.quadrature_point(point)(1); + beta(1) = fe_v.quadrature_point(point)(0); + beta /= beta.norm(); + + const double beta_n=beta * normals[point]; + if (beta_n>0) + { + // This term we've already + // seen: + for (unsigned int i=0; i void AdvectionProblem::solve (Vector &solution) { SolverControl solver_control (1000, 1e-12); SolverRichardson<> solver (solver_control); - // Here we create the - // preconditioner, + // Here we create the + // preconditioner, PreconditionBlockSSOR > preconditioner; - // then assign the matrix to it and - // set the right block size: + // then assign the matrix to it and + // set the right block size: preconditioner.initialize(system_matrix, fe.dofs_per_cell); - // After these preparations we are - // ready to start the linear solver. + // After these preparations we are + // ready to start the linear solver. solver.solve (system_matrix, solution, right_hand_side, - preconditioner); + preconditioner); } - // We refine the grid according to a - // very simple refinement criterion, - // namely an approximation to the - // gradient of the solution. As here - // we consider the DG(1) method - // (i.e. we use piecewise bilinear - // shape functions) we could simply - // compute the gradients on each - // cell. But we do not want to base - // our refinement indicator on the - // gradients on each cell only, but - // want to base them also on jumps of - // the discontinuous solution - // function over faces between - // neighboring cells. The simplest - // way of doing that is to compute - // approximative gradients by - // difference quotients including the - // cell under consideration and its - // neighbors. This is done by the - // DerivativeApproximation class - // that computes the approximate - // gradients in a way similar to the - // GradientEstimation described - // in step-9 of this tutorial. In - // fact, the - // DerivativeApproximation class - // was developed following the - // GradientEstimation class of - // step-9. Relating to the - // discussion in step-9, here we - // consider $h^{1+d/2}|\nabla_h - // u_h|$. Furthermore we note that we - // do not consider approximate second - // derivatives because solutions to - // the linear advection equation are - // in general not in $H^2$ but in $H^1$ - // (to be more precise, in $H^1_\beta$) - // only. + // We refine the grid according to a + // very simple refinement criterion, + // namely an approximation to the + // gradient of the solution. As here + // we consider the DG(1) method + // (i.e. we use piecewise bilinear + // shape functions) we could simply + // compute the gradients on each + // cell. But we do not want to base + // our refinement indicator on the + // gradients on each cell only, but + // want to base them also on jumps of + // the discontinuous solution + // function over faces between + // neighboring cells. The simplest + // way of doing that is to compute + // approximative gradients by + // difference quotients including the + // cell under consideration and its + // neighbors. This is done by the + // DerivativeApproximation class + // that computes the approximate + // gradients in a way similar to the + // GradientEstimation described + // in step-9 of this tutorial. In + // fact, the + // DerivativeApproximation class + // was developed following the + // GradientEstimation class of + // step-9. Relating to the + // discussion in step-9, here we + // consider $h^{1+d/2}|\nabla_h + // u_h|$. Furthermore we note that we + // do not consider approximate second + // derivatives because solutions to + // the linear advection equation are + // in general not in $H^2$ but in $H^1$ + // (to be more precise, in $H^1_\beta$) + // only. template void AdvectionProblem::refine_grid () { - // The DerivativeApproximation - // class computes the gradients to - // float precision. This is - // sufficient as they are - // approximate and serve as - // refinement indicators only. + // The DerivativeApproximation + // class computes the gradients to + // float precision. This is + // sufficient as they are + // approximate and serve as + // refinement indicators only. Vector gradient_indicator (triangulation.n_active_cells()); - // Now the approximate gradients - // are computed + // Now the approximate gradients + // are computed DerivativeApproximation::approximate_gradient (mapping, - dof_handler, - solution, - gradient_indicator); + dof_handler, + solution, + gradient_indicator); - // and they are cell-wise scaled by - // the factor $h^{1+d/2}$ + // and they are cell-wise scaled by + // the factor $h^{1+d/2}$ typename DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), endc = dof_handler.end(); for (unsigned int cell_no=0; cell!=endc; ++cell, ++cell_no) gradient_indicator(cell_no)*=std::pow(cell->diameter(), 1+1.0*dim/2); - // Finally they serve as refinement - // indicator. + // Finally they serve as refinement + // indicator. GridRefinement::refine_and_coarsen_fixed_number (triangulation, - gradient_indicator, - 0.3, 0.1); + gradient_indicator, + 0.3, 0.1); triangulation.execute_coarsening_and_refinement (); } - // The output of this program - // consists of eps-files of the - // adaptively refined grids and the - // numerical solutions given in - // gnuplot format. This was covered - // in previous examples and will not - // be further commented on. + // The output of this program + // consists of eps-files of the + // adaptively refined grids and the + // numerical solutions given in + // gnuplot format. This was covered + // in previous examples and will not + // be further commented on. template void AdvectionProblem::output_results (const unsigned int cycle) const { - // Write the grid in eps format. + // Write the grid in eps format. std::string filename = "grid-"; filename += ('0' + cycle); Assert (cycle < 10, ExcInternalError()); @@ -767,8 +767,8 @@ namespace Step12 GridOut grid_out; grid_out.write_eps (triangulation, eps_output); - // Output of the solution in - // gnuplot format. + // Output of the solution in + // gnuplot format. filename = "sol-"; filename += ('0' + cycle); Assert (cycle < 10, ExcInternalError()); @@ -787,47 +787,47 @@ namespace Step12 } - // The following run function is - // similar to previous examples. + // The following run function is + // similar to previous examples. template void AdvectionProblem::run () { for (unsigned int cycle=0; cycle<6; ++cycle) { - deallog << "Cycle " << cycle << std::endl; + deallog << "Cycle " << cycle << std::endl; - if (cycle == 0) - { - GridGenerator::hyper_cube (triangulation); + if (cycle == 0) + { + GridGenerator::hyper_cube (triangulation); - triangulation.refine_global (3); - } - else - refine_grid (); + triangulation.refine_global (3); + } + else + refine_grid (); - deallog << "Number of active cells: " - << triangulation.n_active_cells() - << std::endl; + deallog << "Number of active cells: " + << triangulation.n_active_cells() + << std::endl; - setup_system (); + setup_system (); - deallog << "Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl; + deallog << "Number of degrees of freedom: " + << dof_handler.n_dofs() + << std::endl; - assemble_system (); - solve (solution); + assemble_system (); + solve (solution); - output_results (cycle); + output_results (cycle); } } } - // The following main function is - // similar to previous examples as well, and - // need not be commented on. + // The following main function is + // similar to previous examples as well, and + // need not be commented on. int main () { try @@ -838,24 +838,24 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; }; diff --git a/deal.II/examples/step-13/step-13.cc b/deal.II/examples/step-13/step-13.cc index 1a00ddf32a..9c911d6d37 100644 --- a/deal.II/examples/step-13/step-13.cc +++ b/deal.II/examples/step-13/step-13.cc @@ -10,15 +10,15 @@ /* further information on this license. */ - // As in all programs, we start with - // a list of include files from the - // library, and as usual they are in - // the standard order which is - // base -- lac -- grid -- - // dofs -- fe -- numerics - // (as each of these categories - // roughly builds upon previous - // ones), then C++ standard headers: + // As in all programs, we start with + // a list of include files from the + // library, and as usual they are in + // the standard order which is + // base -- lac -- grid -- + // dofs -- fe -- numerics + // (as each of these categories + // roughly builds upon previous + // ones), then C++ standard headers: #include #include #include @@ -45,145 +45,145 @@ #include #include - // Now for the C++ standard headers: + // Now for the C++ standard headers: #include #include #include #include - // The last step is as in all - // previous programs: + // The last step is as in all + // previous programs: namespace Step13 { using namespace dealii; - // @sect3{Evaluation of the solution} - - // As for the program itself, we - // first define classes that evaluate - // the solutions of a Laplace - // equation. In fact, they can - // evaluate every kind of solution, - // as long as it is described by a - // DoFHandler object, and a - // solution vector. We define them - // here first, even before the - // classes that actually generate the - // solution to be evaluated, since we - // need to declare an abstract base - // class that the solver classes can - // refer to. - // - // From an abstract point of view, we - // declare a pure base class - // that provides an evaluation - // operator() which will - // do the evaluation of the solution - // (whatever derived classes might - // consider an evaluation). Since - // this is the only real function of - // this base class (except for some - // bookkeeping machinery), one - // usually terms such a class that - // only has an operator() a - // functor in C++ terminology, - // since it is used just like a - // function object. - // - // Objects of this functor type will - // then later be passed to the solver - // object, which applies it to the - // solution just computed. The - // evaluation objects may then - // extract any quantity they like - // from the solution. The advantage - // of putting these evaluation - // functions into a separate - // hierarchy of classes is that by - // design they cannot use the - // internals of the solver object and - // are therefore independent of - // changes to the way the solver - // works. Furthermore, it is trivial - // to write another evaluation class - // without modifying the solver - // class, which speeds up programming - // (not being able to use internals - // of another class also means that - // you do not have to worry about - // them -- programming evaluators is - // usually a rather quickly done - // task), as well as compilation (if - // solver and evaluation classes are - // put into different files: the - // solver only needs to see the - // declaration of the abstract base - // class, and therefore does not need - // to be recompiled upon addition of - // a new evaluation class, or - // modification of an old one). - // On a related note, you can reuse - // the evaluation classes for other - // projects, solving different - // equations. - // - // In order to improve separation of - // code into different modules, we - // put the evaluation classes into a - // namespace of their own. This makes - // it easier to actually solve - // different equations in the same - // program, by assembling it from - // existing building blocks. The - // reason for this is that classes - // for similar purposes tend to have - // the same name, although they were - // developed in different - // contexts. In order to be able to - // use them together in one program, - // it is necessary that they are - // placed in different - // namespaces. This we do here: + // @sect3{Evaluation of the solution} + + // As for the program itself, we + // first define classes that evaluate + // the solutions of a Laplace + // equation. In fact, they can + // evaluate every kind of solution, + // as long as it is described by a + // DoFHandler object, and a + // solution vector. We define them + // here first, even before the + // classes that actually generate the + // solution to be evaluated, since we + // need to declare an abstract base + // class that the solver classes can + // refer to. + // + // From an abstract point of view, we + // declare a pure base class + // that provides an evaluation + // operator() which will + // do the evaluation of the solution + // (whatever derived classes might + // consider an evaluation). Since + // this is the only real function of + // this base class (except for some + // bookkeeping machinery), one + // usually terms such a class that + // only has an operator() a + // functor in C++ terminology, + // since it is used just like a + // function object. + // + // Objects of this functor type will + // then later be passed to the solver + // object, which applies it to the + // solution just computed. The + // evaluation objects may then + // extract any quantity they like + // from the solution. The advantage + // of putting these evaluation + // functions into a separate + // hierarchy of classes is that by + // design they cannot use the + // internals of the solver object and + // are therefore independent of + // changes to the way the solver + // works. Furthermore, it is trivial + // to write another evaluation class + // without modifying the solver + // class, which speeds up programming + // (not being able to use internals + // of another class also means that + // you do not have to worry about + // them -- programming evaluators is + // usually a rather quickly done + // task), as well as compilation (if + // solver and evaluation classes are + // put into different files: the + // solver only needs to see the + // declaration of the abstract base + // class, and therefore does not need + // to be recompiled upon addition of + // a new evaluation class, or + // modification of an old one). + // On a related note, you can reuse + // the evaluation classes for other + // projects, solving different + // equations. + // + // In order to improve separation of + // code into different modules, we + // put the evaluation classes into a + // namespace of their own. This makes + // it easier to actually solve + // different equations in the same + // program, by assembling it from + // existing building blocks. The + // reason for this is that classes + // for similar purposes tend to have + // the same name, although they were + // developed in different + // contexts. In order to be able to + // use them together in one program, + // it is necessary that they are + // placed in different + // namespaces. This we do here: namespace Evaluation { - // Now for the abstract base class - // of evaluation classes: its main - // purpose is to declare a pure - // virtual function operator() - // taking a DoFHandler object, - // and the solution vector. In - // order to be able to use pointers - // to this base class only, it also - // has to declare a virtual - // destructor, which however does - // nothing. Besides this, it only - // provides for a little bit of - // bookkeeping: since we usually - // want to evaluate solutions on - // subsequent refinement levels, we - // store the number of the present - // refinement cycle, and provide a - // function to change this number. + // Now for the abstract base class + // of evaluation classes: its main + // purpose is to declare a pure + // virtual function operator() + // taking a DoFHandler object, + // and the solution vector. In + // order to be able to use pointers + // to this base class only, it also + // has to declare a virtual + // destructor, which however does + // nothing. Besides this, it only + // provides for a little bit of + // bookkeeping: since we usually + // want to evaluate solutions on + // subsequent refinement levels, we + // store the number of the present + // refinement cycle, and provide a + // function to change this number. template class EvaluationBase { public: - virtual ~EvaluationBase (); + virtual ~EvaluationBase (); - void set_refinement_cycle (const unsigned int refinement_cycle); + void set_refinement_cycle (const unsigned int refinement_cycle); - virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const = 0; + virtual void operator () (const DoFHandler &dof_handler, + const Vector &solution) const = 0; protected: - unsigned int refinement_cycle; + unsigned int refinement_cycle; }; - // After the declaration has been - // discussed above, the - // implementation is rather - // straightforward: + // After the declaration has been + // discussed above, the + // implementation is rather + // straightforward: template EvaluationBase::~EvaluationBase () {} @@ -198,303 +198,303 @@ namespace Step13 } - // @sect4{%Point evaluation} - - // The next thing is to implement - // actual evaluation classes. As - // noted in the introduction, we'd - // like to extract a point value - // from the solution, so the first - // class does this in its - // operator(). The actual point - // is given to this class through - // the constructor, as well as a - // table object into which it will - // put its findings. - // - // Finding out the value of a - // finite element field at an - // arbitrary point is rather - // difficult, if we cannot rely on - // knowing the actual finite - // element used, since then we - // cannot, for example, interpolate - // between nodes. For simplicity, - // we therefore assume here that - // the point at which we want to - // evaluate the field is actually a - // node. If, in the process of - // evaluating the solution, we find - // that we did not encounter this - // point upon looping over all - // vertices, we then have to throw - // an exception in order to signal - // to the calling functions that - // something has gone wrong, rather - // than silently ignore this error. - // - // In the step-9 example program, - // we have already seen how such an - // exception class can be declared, - // using the DeclExceptionN - // macros. We use this mechanism - // here again. - // - // From this, the actual - // declaration of this class should - // be evident. Note that of course - // even if we do not list a - // destructor explicitely, an - // implicit destructor is generated - // from the compiler, and it is - // virtual just as the one of the - // base class. + // @sect4{%Point evaluation} + + // The next thing is to implement + // actual evaluation classes. As + // noted in the introduction, we'd + // like to extract a point value + // from the solution, so the first + // class does this in its + // operator(). The actual point + // is given to this class through + // the constructor, as well as a + // table object into which it will + // put its findings. + // + // Finding out the value of a + // finite element field at an + // arbitrary point is rather + // difficult, if we cannot rely on + // knowing the actual finite + // element used, since then we + // cannot, for example, interpolate + // between nodes. For simplicity, + // we therefore assume here that + // the point at which we want to + // evaluate the field is actually a + // node. If, in the process of + // evaluating the solution, we find + // that we did not encounter this + // point upon looping over all + // vertices, we then have to throw + // an exception in order to signal + // to the calling functions that + // something has gone wrong, rather + // than silently ignore this error. + // + // In the step-9 example program, + // we have already seen how such an + // exception class can be declared, + // using the DeclExceptionN + // macros. We use this mechanism + // here again. + // + // From this, the actual + // declaration of this class should + // be evident. Note that of course + // even if we do not list a + // destructor explicitely, an + // implicit destructor is generated + // from the compiler, and it is + // virtual just as the one of the + // base class. template class PointValueEvaluation : public EvaluationBase { public: - PointValueEvaluation (const Point &evaluation_point, - TableHandler &results_table); + PointValueEvaluation (const Point &evaluation_point, + TableHandler &results_table); - virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const; + virtual void operator () (const DoFHandler &dof_handler, + const Vector &solution) const; - DeclException1 (ExcEvaluationPointNotFound, - Point, - << "The evaluation point " << arg1 - << " was not found among the vertices of the present grid."); + DeclException1 (ExcEvaluationPointNotFound, + Point, + << "The evaluation point " << arg1 + << " was not found among the vertices of the present grid."); private: - const Point evaluation_point; - TableHandler &results_table; + const Point evaluation_point; + TableHandler &results_table; }; - // As for the definition, the - // constructor is trivial, just - // taking data and storing it in - // object-local ones: + // As for the definition, the + // constructor is trivial, just + // taking data and storing it in + // object-local ones: template PointValueEvaluation:: PointValueEvaluation (const Point &evaluation_point, - TableHandler &results_table) - : - evaluation_point (evaluation_point), - results_table (results_table) + TableHandler &results_table) + : + evaluation_point (evaluation_point), + results_table (results_table) {} - // Now for the function that is - // mainly of interest in this - // class, the computation of the - // point value: + // Now for the function that is + // mainly of interest in this + // class, the computation of the + // point value: template void PointValueEvaluation:: operator () (const DoFHandler &dof_handler, - const Vector &solution) const + const Vector &solution) const { - // First allocate a variable that - // will hold the point - // value. Initialize it with a - // value that is clearly bogus, - // so that if we fail to set it - // to a reasonable value, we will - // note at once. This may not be - // necessary in a function as - // small as this one, since we - // can easily see all possible - // paths of execution here, but - // it proved to be helpful for - // more complex cases, and so we - // employ this strategy here as - // well. + // First allocate a variable that + // will hold the point + // value. Initialize it with a + // value that is clearly bogus, + // so that if we fail to set it + // to a reasonable value, we will + // note at once. This may not be + // necessary in a function as + // small as this one, since we + // can easily see all possible + // paths of execution here, but + // it proved to be helpful for + // more complex cases, and so we + // employ this strategy here as + // well. double point_value = 1e20; - // Then loop over all cells and - // all their vertices, and check - // whether a vertex matches the - // evaluation point. If this is - // the case, then extract the - // point value, set a flag that - // we have found the point of - // interest, and exit the loop. + // Then loop over all cells and + // all their vertices, and check + // whether a vertex matches the + // evaluation point. If this is + // the case, then extract the + // point value, set a flag that + // we have found the point of + // interest, and exit the loop. typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); + cell = dof_handler.begin_active(), + endc = dof_handler.end(); bool evaluation_point_found = false; for (; (cell!=endc) && !evaluation_point_found; ++cell) - for (unsigned int vertex=0; - vertex::vertices_per_cell; - ++vertex) - if (cell->vertex(vertex) == evaluation_point) - { - // In order to extract - // the point value from - // the global solution - // vector, pick that - // component that belongs - // to the vertex of - // interest, and, in case - // the solution is - // vector-valued, take - // the first component of - // it: - point_value = solution(cell->vertex_dof_index(vertex,0)); - // Note that by this we - // have made an - // assumption that is not - // valid always and - // should be documented - // in the class - // declaration if this - // were code for a real - // application rather - // than a tutorial - // program: we assume - // that the finite - // element used for the - // solution we try to - // evaluate actually has - // degrees of freedom - // associated with - // vertices. This, for - // example, does not hold - // for discontinuous - // elements, were the - // support points for the - // shape functions - // happen to be located - // at the vertices, but - // are not associated - // with the vertices but - // rather with the cell - // interior, since - // association with - // vertices would imply - // continuity there. It - // would also not hold - // for edge oriented - // elements, and the - // like. - // - // Ideally, we would - // check this at the - // beginning of the - // function, for example - // by a statement like - // Assert - // (dof_handler.get_fe().dofs_per_vertex - // @> 0, - // ExcNotImplemented()), - // which should make it - // quite clear what is - // going wrong when the - // exception is - // triggered. In this - // case, we omit it - // (which is indeed bad - // style), but knowing - // that that does not - // hurt here, since the - // statement - // cell-@>vertex_dof_index(vertex,0) - // would fail if we asked - // it to give us the DoF - // index of a vertex if - // there were none. - // - // We stress again that - // this restriction on - // the allowed finite - // elements should be - // stated in the class - // documentation. - - // Since we found the - // right point, we now - // set the respective - // flag and exit the - // innermost loop. The - // outer loop will the - // also be terminated due - // to the set flag. - evaluation_point_found = true; - break; - }; - - // Finally, we'd like to make - // sure that we have indeed found - // the evaluation point, since if - // that were not so we could not - // give a reasonable value of the - // solution there and the rest of - // the computations were useless - // anyway. So make sure through - // the AssertThrow macro - // already used in the step-9 - // program that we have indeed - // found this point. If this is - // not so, the macro throws an - // exception of the type that is - // given to it as second - // argument, but compared to a - // straightforward throw - // statement, it fills the - // exception object with a set of - // additional information, for - // example the source file and - // line number where the - // exception was generated, and - // the condition that failed. If - // you have a catch clause in - // your main function (as this - // program has), you will catch - // all exceptions that are not - // caught somewhere in between - // and thus already handled, and - // this additional information - // will help you find out what - // happened and where it went - // wrong. + for (unsigned int vertex=0; + vertex::vertices_per_cell; + ++vertex) + if (cell->vertex(vertex) == evaluation_point) + { + // In order to extract + // the point value from + // the global solution + // vector, pick that + // component that belongs + // to the vertex of + // interest, and, in case + // the solution is + // vector-valued, take + // the first component of + // it: + point_value = solution(cell->vertex_dof_index(vertex,0)); + // Note that by this we + // have made an + // assumption that is not + // valid always and + // should be documented + // in the class + // declaration if this + // were code for a real + // application rather + // than a tutorial + // program: we assume + // that the finite + // element used for the + // solution we try to + // evaluate actually has + // degrees of freedom + // associated with + // vertices. This, for + // example, does not hold + // for discontinuous + // elements, were the + // support points for the + // shape functions + // happen to be located + // at the vertices, but + // are not associated + // with the vertices but + // rather with the cell + // interior, since + // association with + // vertices would imply + // continuity there. It + // would also not hold + // for edge oriented + // elements, and the + // like. + // + // Ideally, we would + // check this at the + // beginning of the + // function, for example + // by a statement like + // Assert + // (dof_handler.get_fe().dofs_per_vertex + // @> 0, + // ExcNotImplemented()), + // which should make it + // quite clear what is + // going wrong when the + // exception is + // triggered. In this + // case, we omit it + // (which is indeed bad + // style), but knowing + // that that does not + // hurt here, since the + // statement + // cell-@>vertex_dof_index(vertex,0) + // would fail if we asked + // it to give us the DoF + // index of a vertex if + // there were none. + // + // We stress again that + // this restriction on + // the allowed finite + // elements should be + // stated in the class + // documentation. + + // Since we found the + // right point, we now + // set the respective + // flag and exit the + // innermost loop. The + // outer loop will the + // also be terminated due + // to the set flag. + evaluation_point_found = true; + break; + }; + + // Finally, we'd like to make + // sure that we have indeed found + // the evaluation point, since if + // that were not so we could not + // give a reasonable value of the + // solution there and the rest of + // the computations were useless + // anyway. So make sure through + // the AssertThrow macro + // already used in the step-9 + // program that we have indeed + // found this point. If this is + // not so, the macro throws an + // exception of the type that is + // given to it as second + // argument, but compared to a + // straightforward throw + // statement, it fills the + // exception object with a set of + // additional information, for + // example the source file and + // line number where the + // exception was generated, and + // the condition that failed. If + // you have a catch clause in + // your main function (as this + // program has), you will catch + // all exceptions that are not + // caught somewhere in between + // and thus already handled, and + // this additional information + // will help you find out what + // happened and where it went + // wrong. AssertThrow (evaluation_point_found, - ExcEvaluationPointNotFound(evaluation_point)); - // Note that we have used the - // Assert macro in other - // example programs as well. It - // differed from the - // AssertThrow macro used - // here in that it simply aborts - // the program, rather than - // throwing an exception, and - // that it did so only in debug - // mode. It was the right macro - // to use to check about the size - // of vectors passed as arguments - // to functions, and the like. - // - // However, here the situation is - // different: whether we find the - // evaluation point or not may - // change from refinement to - // refinement (for example, if - // the four cells around point - // are coarsened away, then the - // point may vanish after - // refinement and - // coarsening). This is something - // that cannot be predicted from - // a few number of runs of the - // program in debug mode, but - // should be checked always, also - // in production runs. Thus the - // use of the AssertThrow - // macro here. - - // Now, if we are sure that we - // have found the evaluation - // point, we can add the results - // into the table of results: + ExcEvaluationPointNotFound(evaluation_point)); + // Note that we have used the + // Assert macro in other + // example programs as well. It + // differed from the + // AssertThrow macro used + // here in that it simply aborts + // the program, rather than + // throwing an exception, and + // that it did so only in debug + // mode. It was the right macro + // to use to check about the size + // of vectors passed as arguments + // to functions, and the like. + // + // However, here the situation is + // different: whether we find the + // evaluation point or not may + // change from refinement to + // refinement (for example, if + // the four cells around point + // are coarsened away, then the + // point may vanish after + // refinement and + // coarsening). This is something + // that cannot be predicted from + // a few number of runs of the + // program in debug mode, but + // should be checked always, also + // in production runs. Thus the + // use of the AssertThrow + // macro here. + + // Now, if we are sure that we + // have found the evaluation + // point, we can add the results + // into the table of results: results_table.add_value ("DoFs", dof_handler.n_dofs()); results_table.add_value ("u(x_0)", point_value); } @@ -502,171 +502,171 @@ namespace Step13 - // @sect4{Generating output} - - // A different, maybe slightly odd - // kind of evaluation of a - // solution is to output it to a - // file in a graphical - // format. Since in the evaluation - // functions we are given a - // DoFHandler object and the - // solution vector, we have all we - // need to do this, so we can do it - // in an evaluation class. The - // reason for actually doing so - // instead of putting it into the - // class that computed the solution - // is that this way we have more - // flexibility: if we choose to - // only output certain aspects of - // it, or not output it at all. In - // any case, we do not need to - // modify the solver class, we just - // have to modify one of the - // modules out of which we build - // this program. This form of - // encapsulation, as above, helps - // us to keep each part of the - // program rather simple as the - // interfaces are kept simple, and - // no access to hidden data is - // possible. - // - // Since this class which generates - // the output is derived from the - // common EvaluationBase base - // class, its main interface is the - // operator() - // function. Furthermore, it has a - // constructor taking a string that - // will be used as the base part of - // the file name to which output - // will be sent (we will augment it - // by a number indicating the - // number of the refinement cycle - // -- the base class has this - // information at hand --, and a - // suffix), and the constructor - // also takes a value that - // indicates which format is - // requested, i.e. for which - // graphics program we shall - // generate output (from this we - // will then also generate the - // suffix of the filename to which - // we write). - // - // Regarding the output format, the - // DataOutInterface class - // (which is a base class of - // DataOut through which we - // will access its fields) provides - // an enumeration field - // OutputFormat, which lists - // names for all supported output - // formats. At the time of writing - // of this program, the supported - // graphics formats are represented - // by the enum values ucd, - // gnuplot, povray, - // eps, gmv, tecplot, - // tecplot_binary, dx, and - // vtk, but this list will - // certainly grow over time. Now, - // within various functions of that - // base class, you can use values - // of this type to get information - // about these graphics formats - // (for example the default suffix - // used for files of each format), - // and you can call a generic - // write function, which then - // branches to the - // write_gnuplot, - // write_ucd, etc functions - // which we have used in previous - // examples already, based on the - // value of a second argument given - // to it denoting the required - // output format. This mechanism - // makes it simple to write an - // extensible program that can - // decide which output format to - // use at runtime, and it also - // makes it rather simple to write - // the program in a way such that - // it takes advantage of newly - // implemented output formats, - // without the need to change the - // application program. - // - // Of these two fields, the base - // name and the output format - // descriptor, the constructor - // takes values and stores them for - // later use by the actual - // evaluation function. + // @sect4{Generating output} + + // A different, maybe slightly odd + // kind of evaluation of a + // solution is to output it to a + // file in a graphical + // format. Since in the evaluation + // functions we are given a + // DoFHandler object and the + // solution vector, we have all we + // need to do this, so we can do it + // in an evaluation class. The + // reason for actually doing so + // instead of putting it into the + // class that computed the solution + // is that this way we have more + // flexibility: if we choose to + // only output certain aspects of + // it, or not output it at all. In + // any case, we do not need to + // modify the solver class, we just + // have to modify one of the + // modules out of which we build + // this program. This form of + // encapsulation, as above, helps + // us to keep each part of the + // program rather simple as the + // interfaces are kept simple, and + // no access to hidden data is + // possible. + // + // Since this class which generates + // the output is derived from the + // common EvaluationBase base + // class, its main interface is the + // operator() + // function. Furthermore, it has a + // constructor taking a string that + // will be used as the base part of + // the file name to which output + // will be sent (we will augment it + // by a number indicating the + // number of the refinement cycle + // -- the base class has this + // information at hand --, and a + // suffix), and the constructor + // also takes a value that + // indicates which format is + // requested, i.e. for which + // graphics program we shall + // generate output (from this we + // will then also generate the + // suffix of the filename to which + // we write). + // + // Regarding the output format, the + // DataOutInterface class + // (which is a base class of + // DataOut through which we + // will access its fields) provides + // an enumeration field + // OutputFormat, which lists + // names for all supported output + // formats. At the time of writing + // of this program, the supported + // graphics formats are represented + // by the enum values ucd, + // gnuplot, povray, + // eps, gmv, tecplot, + // tecplot_binary, dx, and + // vtk, but this list will + // certainly grow over time. Now, + // within various functions of that + // base class, you can use values + // of this type to get information + // about these graphics formats + // (for example the default suffix + // used for files of each format), + // and you can call a generic + // write function, which then + // branches to the + // write_gnuplot, + // write_ucd, etc functions + // which we have used in previous + // examples already, based on the + // value of a second argument given + // to it denoting the required + // output format. This mechanism + // makes it simple to write an + // extensible program that can + // decide which output format to + // use at runtime, and it also + // makes it rather simple to write + // the program in a way such that + // it takes advantage of newly + // implemented output formats, + // without the need to change the + // application program. + // + // Of these two fields, the base + // name and the output format + // descriptor, the constructor + // takes values and stores them for + // later use by the actual + // evaluation function. template class SolutionOutput : public EvaluationBase { public: - SolutionOutput (const std::string &output_name_base, - const typename DataOut::OutputFormat output_format); + SolutionOutput (const std::string &output_name_base, + const typename DataOut::OutputFormat output_format); - virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const; + virtual void operator () (const DoFHandler &dof_handler, + const Vector &solution) const; private: - const std::string output_name_base; - const typename DataOut::OutputFormat output_format; + const std::string output_name_base; + const typename DataOut::OutputFormat output_format; }; template SolutionOutput:: SolutionOutput (const std::string &output_name_base, - const typename DataOut::OutputFormat output_format) - : - output_name_base (output_name_base), - output_format (output_format) + const typename DataOut::OutputFormat output_format) + : + output_name_base (output_name_base), + output_format (output_format) {} - // After the description above, the - // function generating the actual - // output is now relatively - // straightforward. The only - // particularly interesting feature - // over previous example programs - // is the use of the - // DataOut::default_suffix - // function, returning the usual - // suffix for files of a given - // format (e.g. ".eps" for - // encapsulated postscript files, - // ".gnuplot" for Gnuplot files), - // and of the generic - // DataOut::write function with - // a second argument, which - // branches to the actual output - // functions for the different - // graphics formats, based on the - // value of the format descriptor - // passed as second argument. - // - // Also note that we have to prefix - // this-@> to access a member - // variable of the template - // dependent base class. The reason - // here, and further down in the - // program is the same as the one - // described in the step-7 example - // program (look for two-stage - // name lookup there). + // After the description above, the + // function generating the actual + // output is now relatively + // straightforward. The only + // particularly interesting feature + // over previous example programs + // is the use of the + // DataOut::default_suffix + // function, returning the usual + // suffix for files of a given + // format (e.g. ".eps" for + // encapsulated postscript files, + // ".gnuplot" for Gnuplot files), + // and of the generic + // DataOut::write function with + // a second argument, which + // branches to the actual output + // functions for the different + // graphics formats, based on the + // value of the format descriptor + // passed as second argument. + // + // Also note that we have to prefix + // this-@> to access a member + // variable of the template + // dependent base class. The reason + // here, and further down in the + // program is the same as the one + // described in the step-7 example + // program (look for two-stage + // name lookup there). template void SolutionOutput::operator () (const DoFHandler &dof_handler, - const Vector &solution) const + const Vector &solution) const { DataOut data_out; data_out.attach_dof_handler (dof_handler); @@ -675,9 +675,9 @@ namespace Step13 std::ostringstream filename; filename << output_name_base << "-" - << this->refinement_cycle - << data_out.default_suffix (output_format) - << std::ends; + << this->refinement_cycle + << data_out.default_suffix (output_format) + << std::ends; std::ofstream out (filename.str().c_str()); data_out.write (out, output_format); @@ -685,184 +685,184 @@ namespace Step13 - // @sect4{Other evaluations} + // @sect4{Other evaluations} - // In practical applications, one - // would add here a list of other - // possible evaluation classes, - // representing quantities that one - // may be interested in. For this - // example, that much shall be - // sufficient, so we close the - // namespace. + // In practical applications, one + // would add here a list of other + // possible evaluation classes, + // representing quantities that one + // may be interested in. For this + // example, that much shall be + // sufficient, so we close the + // namespace. } - // @sect3{The Laplace solver classes} - - // After defining what we want to - // know of the solution, we should - // now care how to get at it. We will - // pack everything we need into a - // namespace of its own, for much the - // same reasons as for the - // evaluations above. - // - // Since we have discussed Laplace - // solvers already in considerable - // detail in previous examples, there - // is not much new stuff - // following. Rather, we have to a - // great extent cannibalized previous - // examples and put them, in slightly - // different form, into this example - // program. We will therefore mostly - // be concerned with discussing the - // differences to previous examples. - // - // Basically, as already said in the - // introduction, the lack of new - // stuff in this example is - // deliberate, as it is more to - // demonstrate software design - // practices, rather than - // mathematics. The emphasis in - // explanations below will therefore - // be more on the actual - // implementation. + // @sect3{The Laplace solver classes} + + // After defining what we want to + // know of the solution, we should + // now care how to get at it. We will + // pack everything we need into a + // namespace of its own, for much the + // same reasons as for the + // evaluations above. + // + // Since we have discussed Laplace + // solvers already in considerable + // detail in previous examples, there + // is not much new stuff + // following. Rather, we have to a + // great extent cannibalized previous + // examples and put them, in slightly + // different form, into this example + // program. We will therefore mostly + // be concerned with discussing the + // differences to previous examples. + // + // Basically, as already said in the + // introduction, the lack of new + // stuff in this example is + // deliberate, as it is more to + // demonstrate software design + // practices, rather than + // mathematics. The emphasis in + // explanations below will therefore + // be more on the actual + // implementation. namespace LaplaceSolver { - // @sect4{An abstract base class} - - // In defining a Laplace solver, we - // start out by declaring an - // abstract base class, that has no - // functionality itself except for - // taking and storing a pointer to - // the triangulation to be used - // later. - // - // This base class is very general, - // and could as well be used for - // any other stationary problem. It - // provides declarations of - // functions that shall, in derived - // classes, solve a problem, - // postprocess the solution with a - // list of evaluation objects, and - // refine the grid, - // respectively. None of these - // functions actually does - // something itself in the base - // class. - // - // Due to the lack of actual - // functionality, the programming - // style of declaring very abstract - // base classes reminds of the - // style used in Smalltalk or Java - // programs, where all classes are - // derived from entirely abstract - // classes Object, even number - // representations. The author - // admits that he does not - // particularly like the use of - // such a style in C++, as it puts - // style over reason. Furthermore, - // it promotes the use of virtual - // functions for everything (for - // example, in Java, all functions - // are virtual per se), which, - // however, has proven to be rather - // inefficient in many applications - // where functions are often only - // accessing data, not doing - // computations, and therefore - // quickly return; the overhead of - // virtual functions can then be - // significant. The opinion of the - // author is to have abstract base - // classes wherever at least some - // part of the code of actual - // implementations can be shared - // and thus separated into the base - // class. - // - // Besides all these theoretical - // questions, we here have a good - // reason, which will become - // clearer to the reader - // below. Basically, we want to be - // able to have a family of - // different Laplace solvers that - // differ so much that no larger - // common subset of functionality - // could be found. We therefore - // just declare such an abstract - // base class, taking a pointer to - // a triangulation in the - // constructor and storing it - // henceforth. Since this - // triangulation will be used - // throughout all computations, we - // have to make sure that the - // triangulation exists until the - // destructor exits. We do this by - // keeping a SmartPointer to - // this triangulation, which uses a - // counter in the triangulation - // class to denote the fact that - // there is still an object out - // there using this triangulation, - // thus leading to an abort in case - // the triangulation is attempted - // to be destructed while this - // object still uses it. - // - // Note that while the pointer - // itself is declared constant - // (i.e. throughout the lifetime of - // this object, the pointer points - // to the same object), it is not - // declared as a pointer to a - // constant triangulation. In fact, - // by this we allow that derived - // classes refine or coarsen the - // triangulation within the - // refine_grid function. - // - // Finally, we have a function - // n_dofs is only a tool for - // the driver functions to decide - // whether we want to go on with - // mesh refinement or not. It - // returns the number of degrees of - // freedom the present simulation - // has. + // @sect4{An abstract base class} + + // In defining a Laplace solver, we + // start out by declaring an + // abstract base class, that has no + // functionality itself except for + // taking and storing a pointer to + // the triangulation to be used + // later. + // + // This base class is very general, + // and could as well be used for + // any other stationary problem. It + // provides declarations of + // functions that shall, in derived + // classes, solve a problem, + // postprocess the solution with a + // list of evaluation objects, and + // refine the grid, + // respectively. None of these + // functions actually does + // something itself in the base + // class. + // + // Due to the lack of actual + // functionality, the programming + // style of declaring very abstract + // base classes reminds of the + // style used in Smalltalk or Java + // programs, where all classes are + // derived from entirely abstract + // classes Object, even number + // representations. The author + // admits that he does not + // particularly like the use of + // such a style in C++, as it puts + // style over reason. Furthermore, + // it promotes the use of virtual + // functions for everything (for + // example, in Java, all functions + // are virtual per se), which, + // however, has proven to be rather + // inefficient in many applications + // where functions are often only + // accessing data, not doing + // computations, and therefore + // quickly return; the overhead of + // virtual functions can then be + // significant. The opinion of the + // author is to have abstract base + // classes wherever at least some + // part of the code of actual + // implementations can be shared + // and thus separated into the base + // class. + // + // Besides all these theoretical + // questions, we here have a good + // reason, which will become + // clearer to the reader + // below. Basically, we want to be + // able to have a family of + // different Laplace solvers that + // differ so much that no larger + // common subset of functionality + // could be found. We therefore + // just declare such an abstract + // base class, taking a pointer to + // a triangulation in the + // constructor and storing it + // henceforth. Since this + // triangulation will be used + // throughout all computations, we + // have to make sure that the + // triangulation exists until the + // destructor exits. We do this by + // keeping a SmartPointer to + // this triangulation, which uses a + // counter in the triangulation + // class to denote the fact that + // there is still an object out + // there using this triangulation, + // thus leading to an abort in case + // the triangulation is attempted + // to be destructed while this + // object still uses it. + // + // Note that while the pointer + // itself is declared constant + // (i.e. throughout the lifetime of + // this object, the pointer points + // to the same object), it is not + // declared as a pointer to a + // constant triangulation. In fact, + // by this we allow that derived + // classes refine or coarsen the + // triangulation within the + // refine_grid function. + // + // Finally, we have a function + // n_dofs is only a tool for + // the driver functions to decide + // whether we want to go on with + // mesh refinement or not. It + // returns the number of degrees of + // freedom the present simulation + // has. template class Base { public: - Base (Triangulation &coarse_grid); - virtual ~Base (); + Base (Triangulation &coarse_grid); + virtual ~Base (); - virtual void solve_problem () = 0; - virtual void postprocess (const Evaluation::EvaluationBase &postprocessor) const = 0; - virtual void refine_grid () = 0; - virtual unsigned int n_dofs () const = 0; + virtual void solve_problem () = 0; + virtual void postprocess (const Evaluation::EvaluationBase &postprocessor) const = 0; + virtual void refine_grid () = 0; + virtual unsigned int n_dofs () const = 0; protected: - const SmartPointer > triangulation; + const SmartPointer > triangulation; }; - // The implementation of the only - // two non-abstract functions is - // then rather boring: + // The implementation of the only + // two non-abstract functions is + // then rather boring: template Base::Base (Triangulation &coarse_grid) - : - triangulation (&coarse_grid) + : + triangulation (&coarse_grid) {} @@ -871,210 +871,210 @@ namespace Step13 {} - // @sect4{A general solver class} - - // Following now the main class - // that implements assembling the - // matrix of the linear system, - // solving it, and calling the - // postprocessor objects on the - // solution. It implements the - // solve_problem and - // postprocess functions - // declared in the base class. It - // does not, however, implement the - // refine_grid method, as mesh - // refinement will be implemented - // in a number of derived classes. - // - // It also declares a new abstract - // virtual function, - // assemble_rhs, that needs to - // be overloaded in subclasses. The - // reason is that we will implement - // two different classes that will - // implement different methods to - // assemble the right hand side - // vector. This function might also - // be interesting in cases where - // the right hand side depends not - // simply on a continuous function, - // but on something else as well, - // for example the solution of - // another discretized problem, - // etc. The latter happens - // frequently in non-linear - // problems. - // - // As we mentioned previously, the - // actual content of this class is - // not new, but a mixture of - // various techniques already used - // in previous examples. We will - // therefore not discuss them in - // detail, but refer the reader to - // these programs. - // - // Basically, in a few words, the - // constructor of this class takes - // pointers to a triangulation, a - // finite element, and a function - // object representing the boundary - // values. These are either passed - // down to the base class's - // constructor, or are stored and - // used to generate a - // DoFHandler object - // later. Since finite elements and - // quadrature formula should match, - // it is also passed a quadrature - // object. - // - // The solve_problem sets up - // the data structures for the - // actual solution, calls the - // functions to assemble the linear - // system, and solves it. - // - // The postprocess function - // finally takes an evaluation - // object and applies it to the - // computed solution. - // - // The n_dofs function finally - // implements the pure virtual - // function of the base class. + // @sect4{A general solver class} + + // Following now the main class + // that implements assembling the + // matrix of the linear system, + // solving it, and calling the + // postprocessor objects on the + // solution. It implements the + // solve_problem and + // postprocess functions + // declared in the base class. It + // does not, however, implement the + // refine_grid method, as mesh + // refinement will be implemented + // in a number of derived classes. + // + // It also declares a new abstract + // virtual function, + // assemble_rhs, that needs to + // be overloaded in subclasses. The + // reason is that we will implement + // two different classes that will + // implement different methods to + // assemble the right hand side + // vector. This function might also + // be interesting in cases where + // the right hand side depends not + // simply on a continuous function, + // but on something else as well, + // for example the solution of + // another discretized problem, + // etc. The latter happens + // frequently in non-linear + // problems. + // + // As we mentioned previously, the + // actual content of this class is + // not new, but a mixture of + // various techniques already used + // in previous examples. We will + // therefore not discuss them in + // detail, but refer the reader to + // these programs. + // + // Basically, in a few words, the + // constructor of this class takes + // pointers to a triangulation, a + // finite element, and a function + // object representing the boundary + // values. These are either passed + // down to the base class's + // constructor, or are stored and + // used to generate a + // DoFHandler object + // later. Since finite elements and + // quadrature formula should match, + // it is also passed a quadrature + // object. + // + // The solve_problem sets up + // the data structures for the + // actual solution, calls the + // functions to assemble the linear + // system, and solves it. + // + // The postprocess function + // finally takes an evaluation + // object and applies it to the + // computed solution. + // + // The n_dofs function finally + // implements the pure virtual + // function of the base class. template class Solver : public virtual Base { public: - Solver (Triangulation &triangulation, - const FiniteElement &fe, - const Quadrature &quadrature, - const Function &boundary_values); - virtual - ~Solver (); - - virtual - void - solve_problem (); - - virtual - void - postprocess (const Evaluation::EvaluationBase &postprocessor) const; - - virtual - unsigned int - n_dofs () const; - - // In the protected section of - // this class, we first have a - // number of member variables, - // of which the use should be - // clear from the previous - // examples: + Solver (Triangulation &triangulation, + const FiniteElement &fe, + const Quadrature &quadrature, + const Function &boundary_values); + virtual + ~Solver (); + + virtual + void + solve_problem (); + + virtual + void + postprocess (const Evaluation::EvaluationBase &postprocessor) const; + + virtual + unsigned int + n_dofs () const; + + // In the protected section of + // this class, we first have a + // number of member variables, + // of which the use should be + // clear from the previous + // examples: protected: - const SmartPointer > fe; - const SmartPointer > quadrature; - DoFHandler dof_handler; - Vector solution; - const SmartPointer > boundary_values; - - // Then we declare an abstract - // function that will be used - // to assemble the right hand - // side. As explained above, - // there are various cases for - // which this action differs - // strongly in what is - // necessary, so we defer this - // to derived classes: - virtual void assemble_rhs (Vector &rhs) const = 0; - - // Next, in the private - // section, we have a small - // class which represents an - // entire linear system, i.e. a - // matrix, a right hand side, - // and a solution vector, as - // well as the constraints that - // are applied to it, such as - // those due to hanging - // nodes. Its constructor - // initializes the various - // subobjects, and there is a - // function that implements a - // conjugate gradient method as - // solver. + const SmartPointer > fe; + const SmartPointer > quadrature; + DoFHandler dof_handler; + Vector solution; + const SmartPointer > boundary_values; + + // Then we declare an abstract + // function that will be used + // to assemble the right hand + // side. As explained above, + // there are various cases for + // which this action differs + // strongly in what is + // necessary, so we defer this + // to derived classes: + virtual void assemble_rhs (Vector &rhs) const = 0; + + // Next, in the private + // section, we have a small + // class which represents an + // entire linear system, i.e. a + // matrix, a right hand side, + // and a solution vector, as + // well as the constraints that + // are applied to it, such as + // those due to hanging + // nodes. Its constructor + // initializes the various + // subobjects, and there is a + // function that implements a + // conjugate gradient method as + // solver. private: - struct LinearSystem - { - LinearSystem (const DoFHandler &dof_handler); - - void solve (Vector &solution) const; - - ConstraintMatrix hanging_node_constraints; - SparsityPattern sparsity_pattern; - SparseMatrix matrix; - Vector rhs; - }; - - // Finally, there is a pair of - // functions which will be used - // to assemble the actual - // system matrix. It calls the - // virtual function assembling - // the right hand side, and - // installs a number threads - // each running the second - // function which assembles - // part of the system - // matrix. The mechanism for - // doing so is the same as in - // the step-9 example program. - void - assemble_linear_system (LinearSystem &linear_system); - - void - assemble_matrix (LinearSystem &linear_system, - const typename DoFHandler::active_cell_iterator &begin_cell, - const typename DoFHandler::active_cell_iterator &end_cell, - Threads::ThreadMutex &mutex) const; + struct LinearSystem + { + LinearSystem (const DoFHandler &dof_handler); + + void solve (Vector &solution) const; + + ConstraintMatrix hanging_node_constraints; + SparsityPattern sparsity_pattern; + SparseMatrix matrix; + Vector rhs; + }; + + // Finally, there is a pair of + // functions which will be used + // to assemble the actual + // system matrix. It calls the + // virtual function assembling + // the right hand side, and + // installs a number threads + // each running the second + // function which assembles + // part of the system + // matrix. The mechanism for + // doing so is the same as in + // the step-9 example program. + void + assemble_linear_system (LinearSystem &linear_system); + + void + assemble_matrix (LinearSystem &linear_system, + const typename DoFHandler::active_cell_iterator &begin_cell, + const typename DoFHandler::active_cell_iterator &end_cell, + Threads::ThreadMutex &mutex) const; }; - // Now here comes the constructor - // of the class. It does not do - // much except store pointers to - // the objects given, and generate - // DoFHandler object - // initialized with the given - // pointer to a triangulation. This - // causes the DoF handler to store - // that pointer, but does not - // already generate a finite - // element numbering (we only ask - // for that in the - // solve_problem function). + // Now here comes the constructor + // of the class. It does not do + // much except store pointers to + // the objects given, and generate + // DoFHandler object + // initialized with the given + // pointer to a triangulation. This + // causes the DoF handler to store + // that pointer, but does not + // already generate a finite + // element numbering (we only ask + // for that in the + // solve_problem function). template Solver::Solver (Triangulation &triangulation, - const FiniteElement &fe, - const Quadrature &quadrature, - const Function &boundary_values) - : - Base (triangulation), - fe (&fe), - quadrature (&quadrature), - dof_handler (triangulation), - boundary_values (&boundary_values) + const FiniteElement &fe, + const Quadrature &quadrature, + const Function &boundary_values) + : + Base (triangulation), + fe (&fe), + quadrature (&quadrature), + dof_handler (triangulation), + boundary_values (&boundary_values) {} - // The destructor is simple, it - // only clears the information - // stored in the DoF handler object - // to release the memory. + // The destructor is simple, it + // only clears the information + // stored in the DoF handler object + // to release the memory. template Solver::~Solver () { @@ -1082,19 +1082,19 @@ namespace Step13 } - // The next function is the one - // which delegates the main work in - // solving the problem: it sets up - // the DoF handler object with the - // finite element given to the - // constructor of this object, the - // creates an object that denotes - // the linear system (i.e. the - // matrix, the right hand side - // vector, and the solution - // vector), calls the function to - // assemble it, and finally solves - // it: + // The next function is the one + // which delegates the main work in + // solving the problem: it sets up + // the DoF handler object with the + // finite element given to the + // constructor of this object, the + // creates an object that denotes + // the linear system (i.e. the + // matrix, the right hand side + // vector, and the solution + // vector), calls the function to + // assemble it, and finally solves + // it: template void Solver::solve_problem () @@ -1108,14 +1108,14 @@ namespace Step13 } - // As stated above, the - // postprocess function takes - // an evaluation object, and - // applies it to the computed - // solution. This function may be - // called multiply, once for each - // evaluation of the solution which - // the user required. + // As stated above, the + // postprocess function takes + // an evaluation object, and + // applies it to the computed + // solution. This function may be + // called multiply, once for each + // evaluation of the solution which + // the user required. template void Solver:: @@ -1125,8 +1125,8 @@ namespace Step13 } - // The n_dofs function should - // be self-explanatory: + // The n_dofs function should + // be self-explanatory: template unsigned int Solver::n_dofs () const @@ -1135,126 +1135,126 @@ namespace Step13 } - // The following function assembles matrix - // and right hand side of the linear system - // to be solved in each step. It goes along - // the same lines as used in previous - // examples, so we explain it only - // briefly. Note that we do a number of - // things in parallel, a process described - // in more detail in the @ref threads - // module. + // The following function assembles matrix + // and right hand side of the linear system + // to be solved in each step. It goes along + // the same lines as used in previous + // examples, so we explain it only + // briefly. Note that we do a number of + // things in parallel, a process described + // in more detail in the @ref threads + // module. template void Solver::assemble_linear_system (LinearSystem &linear_system) { - // First define a convenience - // abbreviation for these lengthy - // iterator names... + // First define a convenience + // abbreviation for these lengthy + // iterator names... typedef - typename DoFHandler::active_cell_iterator - active_cell_iterator; - - // ... and use it to split up the - // set of cells into a number of - // pieces of equal size. The - // number of blocks is set to the - // default number of threads to - // be used, which by default is - // set to the number of - // processors found in your - // computer at startup of the - // program: + typename DoFHandler::active_cell_iterator + active_cell_iterator; + + // ... and use it to split up the + // set of cells into a number of + // pieces of equal size. The + // number of blocks is set to the + // default number of threads to + // be used, which by default is + // set to the number of + // processors found in your + // computer at startup of the + // program: const unsigned int n_threads = multithread_info.n_default_threads; std::vector > - thread_ranges - = Threads::split_range (dof_handler.begin_active (), - dof_handler.end (), - n_threads); - - // These ranges are then assigned - // to a number of threads which - // we create next. Each will - // assemble the local cell - // matrices on the assigned - // cells, and fill the matrix - // object with it. Since there is - // need for synchronization when - // filling the same matrix from - // different threads, we need a - // mutex here: + thread_ranges + = Threads::split_range (dof_handler.begin_active (), + dof_handler.end (), + n_threads); + + // These ranges are then assigned + // to a number of threads which + // we create next. Each will + // assemble the local cell + // matrices on the assigned + // cells, and fill the matrix + // object with it. Since there is + // need for synchronization when + // filling the same matrix from + // different threads, we need a + // mutex here: Threads::ThreadMutex mutex; Threads::ThreadGroup<> threads; for (unsigned int thread=0; thread::assemble_matrix, - *this, - linear_system, - thread_ranges[thread].first, - thread_ranges[thread].second, - mutex); - - // While the new threads - // assemble the system matrix, we - // can already compute the right - // hand side vector in the main - // thread, and condense away the - // constraints due to hanging - // nodes: + threads += Threads::new_thread (&Solver::assemble_matrix, + *this, + linear_system, + thread_ranges[thread].first, + thread_ranges[thread].second, + mutex); + + // While the new threads + // assemble the system matrix, we + // can already compute the right + // hand side vector in the main + // thread, and condense away the + // constraints due to hanging + // nodes: assemble_rhs (linear_system.rhs); linear_system.hanging_node_constraints.condense (linear_system.rhs); - // And while we're already - // computing things in parallel, - // interpolating boundary values - // is one more thing that can be - // done independently, so we do - // it here: + // And while we're already + // computing things in parallel, + // interpolating boundary values + // is one more thing that can be + // done independently, so we do + // it here: std::map boundary_value_map; VectorTools::interpolate_boundary_values (dof_handler, - 0, - *boundary_values, - boundary_value_map); + 0, + *boundary_values, + boundary_value_map); - // If this is done, wait for the - // matrix assembling threads, and - // condense the constraints in - // the matrix as well: + // If this is done, wait for the + // matrix assembling threads, and + // condense the constraints in + // the matrix as well: threads.join_all (); linear_system.hanging_node_constraints.condense (linear_system.matrix); - // Now that we have the linear - // system, we can also treat - // boundary values, which need to - // be eliminated from both the - // matrix and the right hand - // side: + // Now that we have the linear + // system, we can also treat + // boundary values, which need to + // be eliminated from both the + // matrix and the right hand + // side: MatrixTools::apply_boundary_values (boundary_value_map, - linear_system.matrix, - solution, - linear_system.rhs); + linear_system.matrix, + solution, + linear_system.rhs); } - // The second of this pair of - // functions takes a range of cell - // iterators, and assembles the - // system matrix on this part of - // the domain. Since it's actions - // have all been explained in - // previous programs, we do not - // comment on it any more, except - // for one pointe below. + // The second of this pair of + // functions takes a range of cell + // iterators, and assembles the + // system matrix on this part of + // the domain. Since it's actions + // have all been explained in + // previous programs, we do not + // comment on it any more, except + // for one pointe below. template void Solver::assemble_matrix (LinearSystem &linear_system, - const typename DoFHandler::active_cell_iterator &begin_cell, - const typename DoFHandler::active_cell_iterator &end_cell, - Threads::ThreadMutex &mutex) const + const typename DoFHandler::active_cell_iterator &begin_cell, + const typename DoFHandler::active_cell_iterator &end_cell, + Threads::ThreadMutex &mutex) const { FEValues fe_values (*fe, *quadrature, - update_gradients | update_JxW_values); + update_gradients | update_JxW_values); const unsigned int dofs_per_cell = fe->dofs_per_cell; const unsigned int n_q_points = quadrature->size(); @@ -1264,181 +1264,181 @@ namespace Step13 std::vector local_dof_indices (dofs_per_cell); for (typename DoFHandler::active_cell_iterator cell=begin_cell; - cell!=end_cell; ++cell) - { - cell_matrix = 0; - - fe_values.reinit (cell); - - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - - // In the step-9 program, we - // have shown that you have - // to use the mutex to lock - // the matrix when copying - // the elements from the - // local to the global - // matrix. This was necessary - // to avoid that two threads - // access it at the same - // time, eventually - // overwriting their - // respective - // work. Previously, we have - // used the acquire and - // release functions of - // the mutex to lock and - // unlock the mutex, - // respectively. While this - // is valid, there is one - // possible catch: if between - // the locking operation and - // the unlocking operation an - // exception is thrown, the - // mutex remains in the - // locked state, and in some - // cases this might lead to - // deadlocks. A similar - // situation arises, when one - // changes the code to have a - // return statement somewhere - // in the middle of the - // locked block, and forgets - // that before we call - // return, we also have - // to unlock the mutex. This - // all is not be a problem - // here, but we want to show - // the general technique to - // cope with these problems - // nevertheless: have an - // object that upon - // initialization (i.e. in - // its constructor) locks the - // mutex, and on running the - // destructor unlocks it - // again. This is called the - // scoped lock pattern - // (apparently invented by - // Doug Schmidt originally), - // and it works because - // destructors of local - // objects are also run when - // we exit the function - // either through a - // return statement, or - // when an exception is - // raised. Thus, it is - // guaranteed that the mutex - // will always be unlocked - // when we exit this part of - // the program, whether the - // operation completed - // successfully or not, - // whether the exit path was - // something we implemented - // willfully or whether the - // function was exited by an - // exception that we did not - // forsee. - // - // deal.II implements the - // scoped locking pattern in - // the - // ThreadMutex::ScopedLock - // class: it takes the mutex - // in the constructor and - // locks it; in its - // destructor, it unlocks it - // again. So here is how it - // is used: - Threads::ThreadMutex::ScopedLock lock (mutex); - for (unsigned int i=0; ilock variable goes out - // of existence and its - // destructor the mutex is - // unlocked. - }; + cell!=end_cell; ++cell) + { + cell_matrix = 0; + + fe_values.reinit (cell); + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + + // In the step-9 program, we + // have shown that you have + // to use the mutex to lock + // the matrix when copying + // the elements from the + // local to the global + // matrix. This was necessary + // to avoid that two threads + // access it at the same + // time, eventually + // overwriting their + // respective + // work. Previously, we have + // used the acquire and + // release functions of + // the mutex to lock and + // unlock the mutex, + // respectively. While this + // is valid, there is one + // possible catch: if between + // the locking operation and + // the unlocking operation an + // exception is thrown, the + // mutex remains in the + // locked state, and in some + // cases this might lead to + // deadlocks. A similar + // situation arises, when one + // changes the code to have a + // return statement somewhere + // in the middle of the + // locked block, and forgets + // that before we call + // return, we also have + // to unlock the mutex. This + // all is not be a problem + // here, but we want to show + // the general technique to + // cope with these problems + // nevertheless: have an + // object that upon + // initialization (i.e. in + // its constructor) locks the + // mutex, and on running the + // destructor unlocks it + // again. This is called the + // scoped lock pattern + // (apparently invented by + // Doug Schmidt originally), + // and it works because + // destructors of local + // objects are also run when + // we exit the function + // either through a + // return statement, or + // when an exception is + // raised. Thus, it is + // guaranteed that the mutex + // will always be unlocked + // when we exit this part of + // the program, whether the + // operation completed + // successfully or not, + // whether the exit path was + // something we implemented + // willfully or whether the + // function was exited by an + // exception that we did not + // forsee. + // + // deal.II implements the + // scoped locking pattern in + // the + // ThreadMutex::ScopedLock + // class: it takes the mutex + // in the constructor and + // locks it; in its + // destructor, it unlocks it + // again. So here is how it + // is used: + Threads::ThreadMutex::ScopedLock lock (mutex); + for (unsigned int i=0; ilock variable goes out + // of existence and its + // destructor the mutex is + // unlocked. + }; } - // Now for the functions that - // implement actions in the linear - // system class. First, the - // constructor initializes all data - // elements to their correct sizes, - // and sets up a number of - // additional data structures, such - // as constraints due to hanging - // nodes. Since setting up the - // hanging nodes and finding out - // about the nonzero elements of - // the matrix is independent, we do - // that in parallel (if the library - // was configured to use - // concurrency, at least; - // otherwise, the actions are - // performed sequentially). Note - // that we start only one thread, - // and do the second action in the - // main thread. Since only one - // thread is generated, we don't - // use the Threads::ThreadGroup - // class here, but rather use the - // one created thread object - // directly to wait for this - // particular thread's exit. - // - // Note that taking up the address - // of the - // DoFTools::make_hanging_node_constraints - // function is a little tricky, - // since there are actually three - // of them, one for each supported - // space dimension. Taking - // addresses of overloaded - // functions is somewhat - // complicated in C++, since the - // address-of operator & in - // that case returns more like a - // set of values (the addresses of - // all functions with that name), - // and selecting the right one is - // then the next step. If the - // context dictates which one to - // take (for example by assigning - // to a function pointer of known - // type), then the compiler can do - // that by itself, but if this set - // of pointers shall be given as - // the argument to a function that - // takes a template, the compiler - // could choose all without having - // a preference for one. We - // therefore have to make it clear - // to the compiler which one we - // would like to have; for this, we - // could use a cast, but for more - // clarity, we assign it to a - // temporary mhnc_p (short for - // pointer to - // make_hanging_node_constraints) - // with the right type, and using - // this pointer instead. + // Now for the functions that + // implement actions in the linear + // system class. First, the + // constructor initializes all data + // elements to their correct sizes, + // and sets up a number of + // additional data structures, such + // as constraints due to hanging + // nodes. Since setting up the + // hanging nodes and finding out + // about the nonzero elements of + // the matrix is independent, we do + // that in parallel (if the library + // was configured to use + // concurrency, at least; + // otherwise, the actions are + // performed sequentially). Note + // that we start only one thread, + // and do the second action in the + // main thread. Since only one + // thread is generated, we don't + // use the Threads::ThreadGroup + // class here, but rather use the + // one created thread object + // directly to wait for this + // particular thread's exit. + // + // Note that taking up the address + // of the + // DoFTools::make_hanging_node_constraints + // function is a little tricky, + // since there are actually three + // of them, one for each supported + // space dimension. Taking + // addresses of overloaded + // functions is somewhat + // complicated in C++, since the + // address-of operator & in + // that case returns more like a + // set of values (the addresses of + // all functions with that name), + // and selecting the right one is + // then the next step. If the + // context dictates which one to + // take (for example by assigning + // to a function pointer of known + // type), then the compiler can do + // that by itself, but if this set + // of pointers shall be given as + // the argument to a function that + // takes a template, the compiler + // could choose all without having + // a preference for one. We + // therefore have to make it clear + // to the compiler which one we + // would like to have; for this, we + // could use a cast, but for more + // clarity, we assign it to a + // temporary mhnc_p (short for + // pointer to + // make_hanging_node_constraints) + // with the right type, and using + // this pointer instead. template Solver::LinearSystem:: LinearSystem (const DoFHandler &dof_handler) @@ -1446,32 +1446,32 @@ namespace Step13 hanging_node_constraints.clear (); void (*mhnc_p) (const DoFHandler &, - ConstraintMatrix &) - = &DoFTools::make_hanging_node_constraints; + ConstraintMatrix &) + = &DoFTools::make_hanging_node_constraints; Threads::Thread<> - mhnc_thread = Threads::new_thread (mhnc_p, - dof_handler, - hanging_node_constraints); + mhnc_thread = Threads::new_thread (mhnc_p, + dof_handler, + hanging_node_constraints); sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); - // Wait until the - // hanging_node_constraints - // object is fully set up, then - // close it and use it to - // condense the sparsity pattern: + // Wait until the + // hanging_node_constraints + // object is fully set up, then + // close it and use it to + // condense the sparsity pattern: mhnc_thread.join (); hanging_node_constraints.close (); hanging_node_constraints.condense (sparsity_pattern); - // Finally, close the sparsity - // pattern, initialize the - // matrix, and set the right hand - // side vector to the right size. + // Finally, close the sparsity + // pattern, initialize the + // matrix, and set the right hand + // side vector to the right size. sparsity_pattern.compress(); matrix.reinit (sparsity_pattern); rhs.reinit (dof_handler.n_dofs()); @@ -1479,13 +1479,13 @@ namespace Step13 - // The second function of this - // class simply solves the linear - // system by a preconditioned - // conjugate gradient method. This - // has been extensively discussed - // before, so we don't dwell into - // it any more. + // The second function of this + // class simply solves the linear + // system by a preconditioned + // conjugate gradient method. This + // has been extensively discussed + // before, so we don't dwell into + // it any more. template void Solver::LinearSystem::solve (Vector &solution) const @@ -1504,88 +1504,88 @@ namespace Step13 - // @sect4{A primal solver} - - // In the previous section, a base - // class for Laplace solvers was - // implemented, that lacked the - // functionality to assemble the - // right hand side vector, however, - // for reasons that were explained - // there. Now we implement a - // corresponding class that can do - // this for the case that the right - // hand side of a problem is given - // as a function object. - // - // The actions of the class are - // rather what you have seen - // already in previous examples - // already, so a brief explanation - // should suffice: the constructor - // takes the same data as does that - // of the underlying class (to - // which it passes all information) - // except for one function object - // that denotes the right hand side - // of the problem. A pointer to - // this object is stored (again as - // a SmartPointer, in order to - // make sure that the function - // object is not deleted as long as - // it is still used by this class). - // - // The only functional part of this - // class is the assemble_rhs - // method that does what its name - // suggests. + // @sect4{A primal solver} + + // In the previous section, a base + // class for Laplace solvers was + // implemented, that lacked the + // functionality to assemble the + // right hand side vector, however, + // for reasons that were explained + // there. Now we implement a + // corresponding class that can do + // this for the case that the right + // hand side of a problem is given + // as a function object. + // + // The actions of the class are + // rather what you have seen + // already in previous examples + // already, so a brief explanation + // should suffice: the constructor + // takes the same data as does that + // of the underlying class (to + // which it passes all information) + // except for one function object + // that denotes the right hand side + // of the problem. A pointer to + // this object is stored (again as + // a SmartPointer, in order to + // make sure that the function + // object is not deleted as long as + // it is still used by this class). + // + // The only functional part of this + // class is the assemble_rhs + // method that does what its name + // suggests. template class PrimalSolver : public Solver { public: - PrimalSolver (Triangulation &triangulation, - const FiniteElement &fe, - const Quadrature &quadrature, - const Function &rhs_function, - const Function &boundary_values); + PrimalSolver (Triangulation &triangulation, + const FiniteElement &fe, + const Quadrature &quadrature, + const Function &rhs_function, + const Function &boundary_values); protected: - const SmartPointer > rhs_function; - virtual void assemble_rhs (Vector &rhs) const; + const SmartPointer > rhs_function; + virtual void assemble_rhs (Vector &rhs) const; }; - // The constructor of this class - // basically does what it is - // announced to do above... + // The constructor of this class + // basically does what it is + // announced to do above... template PrimalSolver:: PrimalSolver (Triangulation &triangulation, - const FiniteElement &fe, - const Quadrature &quadrature, - const Function &rhs_function, - const Function &boundary_values) - : - Base (triangulation), - Solver (triangulation, fe, - quadrature, boundary_values), - rhs_function (&rhs_function) + const FiniteElement &fe, + const Quadrature &quadrature, + const Function &rhs_function, + const Function &boundary_values) + : + Base (triangulation), + Solver (triangulation, fe, + quadrature, boundary_values), + rhs_function (&rhs_function) {} - // ... as does the assemble_rhs - // function. Since this is - // explained in several of the - // previous example programs, we - // leave it at that. + // ... as does the assemble_rhs + // function. Since this is + // explained in several of the + // previous example programs, we + // leave it at that. template void PrimalSolver:: assemble_rhs (Vector &rhs) const { FEValues fe_values (*this->fe, *this->quadrature, - update_values | update_quadrature_points | - update_JxW_values); + update_values | update_quadrature_points | + update_JxW_values); const unsigned int dofs_per_cell = this->fe->dofs_per_cell; const unsigned int n_q_points = this->quadrature->size(); @@ -1595,69 +1595,69 @@ namespace Step13 std::vector local_dof_indices (dofs_per_cell); typename DoFHandler::active_cell_iterator - cell = this->dof_handler.begin_active(), - endc = this->dof_handler.end(); + cell = this->dof_handler.begin_active(), + endc = this->dof_handler.end(); for (; cell!=endc; ++cell) - { - cell_rhs = 0; - fe_values.reinit (cell); - rhs_function->value_list (fe_values.get_quadrature_points(), - rhs_values); - - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - for (unsigned int i=0; ivalue_list (fe_values.get_quadrature_points(), + rhs_values); + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + for (unsigned int i=0; irefine_grid function - // have been implemented. We will - // now have two classes that - // implement this function for the - // PrimalSolver class, one - // doing global refinement, one a - // form of local refinement. - // - // The first, doing global - // refinement, is rather simple: - // its main function just calls - // triangulation-@>refine_global - // (1);, which does all the work. - // - // Note that since the Base - // base class of the Solver - // class is virtual, we have to - // declare a constructor that - // initializes the immediate base - // class as well as the abstract - // virtual one. - // - // Apart from this technical - // complication, the class is - // probably simple enough to be - // left without further comments. + // @sect4{Global refinement} + + // By now, all functions of the + // abstract base class except for + // the refine_grid function + // have been implemented. We will + // now have two classes that + // implement this function for the + // PrimalSolver class, one + // doing global refinement, one a + // form of local refinement. + // + // The first, doing global + // refinement, is rather simple: + // its main function just calls + // triangulation-@>refine_global + // (1);, which does all the work. + // + // Note that since the Base + // base class of the Solver + // class is virtual, we have to + // declare a constructor that + // initializes the immediate base + // class as well as the abstract + // virtual one. + // + // Apart from this technical + // complication, the class is + // probably simple enough to be + // left without further comments. template class RefinementGlobal : public PrimalSolver { public: - RefinementGlobal (Triangulation &coarse_grid, - const FiniteElement &fe, - const Quadrature &quadrature, - const Function &rhs_function, - const Function &boundary_values); + RefinementGlobal (Triangulation &coarse_grid, + const FiniteElement &fe, + const Quadrature &quadrature, + const Function &rhs_function, + const Function &boundary_values); - virtual void refine_grid (); + virtual void refine_grid (); }; @@ -1665,14 +1665,14 @@ namespace Step13 template RefinementGlobal:: RefinementGlobal (Triangulation &coarse_grid, - const FiniteElement &fe, - const Quadrature &quadrature, - const Function &rhs_function, - const Function &boundary_values) - : - Base (coarse_grid), - PrimalSolver (coarse_grid, fe, quadrature, - rhs_function, boundary_values) + const FiniteElement &fe, + const Quadrature &quadrature, + const Function &rhs_function, + const Function &boundary_values) + : + Base (coarse_grid), + PrimalSolver (coarse_grid, fe, quadrature, + rhs_function, boundary_values) {} @@ -1685,38 +1685,38 @@ namespace Step13 } - // @sect4{Local refinement by the Kelly error indicator} - - // The second class implementing - // refinement strategies uses the - // Kelly refinemet indicator used - // in various example programs - // before. Since this indicator is - // already implemented in a class - // of its own inside the deal.II - // library, there is not much t do - // here except cal the function - // computing the indicator, then - // using it to select a number of - // cells for refinement and - // coarsening, and refinement the - // mesh accordingly. - // - // Again, this should now be - // sufficiently standard to allow - // the omission of further - // comments. + // @sect4{Local refinement by the Kelly error indicator} + + // The second class implementing + // refinement strategies uses the + // Kelly refinemet indicator used + // in various example programs + // before. Since this indicator is + // already implemented in a class + // of its own inside the deal.II + // library, there is not much t do + // here except cal the function + // computing the indicator, then + // using it to select a number of + // cells for refinement and + // coarsening, and refinement the + // mesh accordingly. + // + // Again, this should now be + // sufficiently standard to allow + // the omission of further + // comments. template class RefinementKelly : public PrimalSolver { public: - RefinementKelly (Triangulation &coarse_grid, - const FiniteElement &fe, - const Quadrature &quadrature, - const Function &rhs_function, - const Function &boundary_values); + RefinementKelly (Triangulation &coarse_grid, + const FiniteElement &fe, + const Quadrature &quadrature, + const Function &rhs_function, + const Function &boundary_values); - virtual void refine_grid (); + virtual void refine_grid (); }; @@ -1724,14 +1724,14 @@ namespace Step13 template RefinementKelly:: RefinementKelly (Triangulation &coarse_grid, - const FiniteElement &fe, - const Quadrature &quadrature, - const Function &rhs_function, - const Function &boundary_values) - : - Base (coarse_grid), - PrimalSolver (coarse_grid, fe, quadrature, - rhs_function, boundary_values) + const FiniteElement &fe, + const Quadrature &quadrature, + const Function &rhs_function, + const Function &boundary_values) + : + Base (coarse_grid), + PrimalSolver (coarse_grid, fe, quadrature, + rhs_function, boundary_values) {} @@ -1742,13 +1742,13 @@ namespace Step13 { Vector estimated_error_per_cell (this->triangulation->n_active_cells()); KellyErrorEstimator::estimate (this->dof_handler, - QGauss(3), - typename FunctionMap::type(), - this->solution, - estimated_error_per_cell); + QGauss(3), + typename FunctionMap::type(), + this->solution, + estimated_error_per_cell); GridRefinement::refine_and_coarsen_fixed_number (*this->triangulation, - estimated_error_per_cell, - 0.3, 0.03); + estimated_error_per_cell, + 0.3, 0.03); this->triangulation->execute_coarsening_and_refinement (); } @@ -1757,42 +1757,42 @@ namespace Step13 - // @sect3{Equation data} - - // As this is one more academic - // example, we'd like to compare - // exact and computed solution - // against each other. For this, we - // need to declare function classes - // representing the exact solution - // (for comparison and for the - // Dirichlet boundary values), as - // well as a class that denotes the - // right hand side of the equation - // (this is simply the Laplace - // operator applied to the exact - // solution we'd like to recover). - // - // For this example, let us choose as - // exact solution the function - // $u(x,y)=exp(x+sin(10y+5x^2))$. In more - // than two dimensions, simply repeat - // the sine-factor with y - // replaced by z and so on. Given - // this, the following two classes - // are probably straightforward from - // the previous examples. - // - // As in previous examples, the C++ - // language forces us to declare and - // define a constructor to the - // following classes even though they - // are empty. This is due to the fact - // that the base class has no default - // constructor (i.e. one without - // arguments), even though it has a - // constructor which has default - // values for all arguments. + // @sect3{Equation data} + + // As this is one more academic + // example, we'd like to compare + // exact and computed solution + // against each other. For this, we + // need to declare function classes + // representing the exact solution + // (for comparison and for the + // Dirichlet boundary values), as + // well as a class that denotes the + // right hand side of the equation + // (this is simply the Laplace + // operator applied to the exact + // solution we'd like to recover). + // + // For this example, let us choose as + // exact solution the function + // $u(x,y)=exp(x+sin(10y+5x^2))$. In more + // than two dimensions, simply repeat + // the sine-factor with y + // replaced by z and so on. Given + // this, the following two classes + // are probably straightforward from + // the previous examples. + // + // As in previous examples, the C++ + // language forces us to declare and + // define a constructor to the + // following classes even though they + // are empty. This is due to the fact + // that the base class has no default + // constructor (i.e. one without + // arguments), even though it has a + // constructor which has default + // values for all arguments. template class Solution : public Function { @@ -1800,14 +1800,14 @@ namespace Step13 Solution () : Function () {} virtual double value (const Point &p, - const unsigned int component) const; + const unsigned int component) const; }; template double Solution::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { double q = p(0); for (unsigned int i=1; i () {} virtual double value (const Point &p, - const unsigned int component) const; + const unsigned int component) const; }; template double RightHandSide::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { double q = p(0); for (unsigned int i=1; i void run_simulation (LaplaceSolver::Base &solver, - const std::list *> &postprocessor_list) + const std::list *> &postprocessor_list) { - // We will give an indicator of the - // step we are presently computing, - // in order to keep the user - // informed that something is still - // happening, and that the program - // is not in an endless loop. This - // is the head of this status line: + // We will give an indicator of the + // step we are presently computing, + // in order to keep the user + // informed that something is still + // happening, and that the program + // is not in an endless loop. This + // is the head of this status line: std::cout << "Refinement cycle: "; - // Then start a loop which only - // terminates once the number of - // degrees of freedom is larger - // than 20,000 (you may of course - // change this limit, if you need - // more -- or less -- accuracy from - // your program). + // Then start a loop which only + // terminates once the number of + // degrees of freedom is larger + // than 20,000 (you may of course + // change this limit, if you need + // more -- or less -- accuracy from + // your program). for (unsigned int step=0; true; ++step) { - // Then give the alive - // indication for this - // iteration. Note that the - // std::flush is needed to - // have the text actually - // appear on the screen, rather - // than only in some buffer - // that is only flushed the - // next time we issue an - // end-line. - std::cout << step << " " << std::flush; - - // Now solve the problem on the - // present grid, and run the - // evaluators on it. The long - // type name of iterators into - // the list is a little - // annoying, but could be - // shortened by a typedef, if - // so desired. - solver.solve_problem (); - - for (typename std::list *>::const_iterator - i = postprocessor_list.begin(); - i != postprocessor_list.end(); ++i) - { - (*i)->set_refinement_cycle (step); - solver.postprocess (**i); - }; - - - // Now check whether more - // iterations are required, or - // whether the loop shall be - // ended: - if (solver.n_dofs() < 20000) - solver.refine_grid (); - else - break; + // Then give the alive + // indication for this + // iteration. Note that the + // std::flush is needed to + // have the text actually + // appear on the screen, rather + // than only in some buffer + // that is only flushed the + // next time we issue an + // end-line. + std::cout << step << " " << std::flush; + + // Now solve the problem on the + // present grid, and run the + // evaluators on it. The long + // type name of iterators into + // the list is a little + // annoying, but could be + // shortened by a typedef, if + // so desired. + solver.solve_problem (); + + for (typename std::list *>::const_iterator + i = postprocessor_list.begin(); + i != postprocessor_list.end(); ++i) + { + (*i)->set_refinement_cycle (step); + solver.postprocess (**i); + }; + + + // Now check whether more + // iterations are required, or + // whether the loop shall be + // ended: + if (solver.n_dofs() < 20000) + solver.refine_grid (); + else + break; }; - // Finally end the line in which we - // displayed status reports: + // Finally end the line in which we + // displayed status reports: std::cout << std::endl; } - // The final function is one which - // takes the name of a solver - // (presently "kelly" and "global" - // are allowed), creates a solver - // object out of it using a coarse - // grid (in this case the ubiquitous - // unit square) and a finite element - // object (here the likewise - // ubiquitous bilinear one), and uses - // that solver to ask for the - // solution of the problem on a - // sequence of successively refined - // grids. - // - // The function also sets up two of - // evaluation functions, one - // evaluating the solution at the - // point (0.5,0.5), the other writing - // out the solution to a file. + // The final function is one which + // takes the name of a solver + // (presently "kelly" and "global" + // are allowed), creates a solver + // object out of it using a coarse + // grid (in this case the ubiquitous + // unit square) and a finite element + // object (here the likewise + // ubiquitous bilinear one), and uses + // that solver to ask for the + // solution of the problem on a + // sequence of successively refined + // grids. + // + // The function also sets up two of + // evaluation functions, one + // evaluating the solution at the + // point (0.5,0.5), the other writing + // out the solution to a file. template void solve_problem (const std::string &solver_name) { - // First minor task: tell the user - // what is going to happen. Thus - // write a header line, and a line - // with all '-' characters of the - // same length as the first one - // right below. + // First minor task: tell the user + // what is going to happen. Thus + // write a header line, and a line + // with all '-' characters of the + // same length as the first one + // right below. const std::string header = "Running tests with \"" + solver_name + - "\" refinement criterion:"; + "\" refinement criterion:"; std::cout << header << std::endl - << std::string (header.size(), '-') << std::endl; + << std::string (header.size(), '-') << std::endl; - // Then set up triangulation, - // finite element, etc. + // Then set up triangulation, + // finite element, etc. Triangulation triangulation; GridGenerator::hyper_cube (triangulation, -1, 1); triangulation.refine_global (2); @@ -1985,77 +1985,77 @@ namespace Step13 const RightHandSide rhs_function; const Solution boundary_values; - // Create a solver object of the - // kind indicated by the argument - // to this function. If the name is - // not recognized, throw an - // exception! + // Create a solver object of the + // kind indicated by the argument + // to this function. If the name is + // not recognized, throw an + // exception! LaplaceSolver::Base * solver = 0; if (solver_name == "global") solver = new LaplaceSolver::RefinementGlobal (triangulation, fe, - quadrature, - rhs_function, - boundary_values); + quadrature, + rhs_function, + boundary_values); else if (solver_name == "kelly") solver = new LaplaceSolver::RefinementKelly (triangulation, fe, - quadrature, - rhs_function, - boundary_values); + quadrature, + rhs_function, + boundary_values); else AssertThrow (false, ExcNotImplemented()); - // Next create a table object in - // which the values of the - // numerical solution at the point - // (0.5,0.5) will be stored, and - // create a respective evaluation - // object: + // Next create a table object in + // which the values of the + // numerical solution at the point + // (0.5,0.5) will be stored, and + // create a respective evaluation + // object: TableHandler results_table; Evaluation::PointValueEvaluation postprocessor1 (Point(0.5,0.5), results_table); - // Also generate an evaluator which - // writes out the solution: + // Also generate an evaluator which + // writes out the solution: Evaluation::SolutionOutput postprocessor2 (std::string("solution-")+solver_name, - DataOut::gnuplot); + DataOut::gnuplot); - // Take these two evaluation - // objects and put them in a - // list... + // Take these two evaluation + // objects and put them in a + // list... std::list *> postprocessor_list; postprocessor_list.push_back (&postprocessor1); postprocessor_list.push_back (&postprocessor2); - // ... which we can then pass on to - // the function that actually runs - // the simulation on successively - // refined grids: + // ... which we can then pass on to + // the function that actually runs + // the simulation on successively + // refined grids: run_simulation (*solver, postprocessor_list); - // When this all is done, write out - // the results of the point - // evaluations, and finally delete - // the solver object: + // When this all is done, write out + // the results of the point + // evaluations, and finally delete + // the solver object: results_table.write_text (std::cout); delete solver; - // And one blank line after all - // results: + // And one blank line after all + // results: std::cout << std::endl; } } - // There is not much to say about the - // main function. It follows the same - // pattern as in all previous - // examples, with attempts to catch - // thrown exceptions, and displaying - // as much information as possible if - // we should get some. The rest is - // self-explanatory. + // There is not much to say about the + // main function. It follows the same + // pattern as in all previous + // examples, with attempts to catch + // thrown exceptions, and displaying + // as much information as possible if + // we should get some. The rest is + // self-explanatory. int main () { try @@ -2068,24 +2068,24 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; }; diff --git a/deal.II/examples/step-14/step-14.cc b/deal.II/examples/step-14/step-14.cc index ced3a24da9..c43c970f4e 100644 --- a/deal.II/examples/step-14/step-14.cc +++ b/deal.II/examples/step-14/step-14.cc @@ -10,7 +10,7 @@ /* further information on this license. */ - // Start out with well known things... + // Start out with well known things... #include #include #include @@ -45,42 +45,42 @@ #include #include - // The last step is as in all - // previous programs: + // The last step is as in all + // previous programs: namespace Step14 { using namespace dealii; - // @sect3{Evaluating the solution} - - // As mentioned in the introduction, - // significant parts of the program - // have simply been taken over from - // the step-13 example program. We - // therefore only comment on those - // things that are new. - // - // First, the framework for - // evaluation of solutions is - // unchanged, i.e. the base class is - // the same, and the class to - // evaluate the solution at a grid - // point is unchanged: + // @sect3{Evaluating the solution} + + // As mentioned in the introduction, + // significant parts of the program + // have simply been taken over from + // the step-13 example program. We + // therefore only comment on those + // things that are new. + // + // First, the framework for + // evaluation of solutions is + // unchanged, i.e. the base class is + // the same, and the class to + // evaluate the solution at a grid + // point is unchanged: namespace Evaluation { - // @sect4{The EvaluationBase class} + // @sect4{The EvaluationBase class} template class EvaluationBase { public: - virtual ~EvaluationBase (); + virtual ~EvaluationBase (); - void set_refinement_cycle (const unsigned int refinement_cycle); + void set_refinement_cycle (const unsigned int refinement_cycle); - virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const = 0; + virtual void operator () (const DoFHandler &dof_handler, + const Vector &solution) const = 0; protected: - unsigned int refinement_cycle; + unsigned int refinement_cycle; }; @@ -98,30 +98,30 @@ namespace Step14 } - // @sect4{The PointValueEvaluation class} + // @sect4{The PointValueEvaluation class} template class PointValueEvaluation : public EvaluationBase { public: - PointValueEvaluation (const Point &evaluation_point); + PointValueEvaluation (const Point &evaluation_point); - virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const; + virtual void operator () (const DoFHandler &dof_handler, + const Vector &solution) const; - DeclException1 (ExcEvaluationPointNotFound, - Point, - << "The evaluation point " << arg1 - << " was not found among the vertices of the present grid."); + DeclException1 (ExcEvaluationPointNotFound, + Point, + << "The evaluation point " << arg1 + << " was not found among the vertices of the present grid."); private: - const Point evaluation_point; + const Point evaluation_point; }; template PointValueEvaluation:: PointValueEvaluation (const Point &evaluation_point) - : - evaluation_point (evaluation_point) + : + evaluation_point (evaluation_point) {} @@ -130,303 +130,303 @@ namespace Step14 void PointValueEvaluation:: operator () (const DoFHandler &dof_handler, - const Vector &solution) const + const Vector &solution) const { double point_value = 1e20; typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); + cell = dof_handler.begin_active(), + endc = dof_handler.end(); bool evaluation_point_found = false; for (; (cell!=endc) && !evaluation_point_found; ++cell) - for (unsigned int vertex=0; - vertex::vertices_per_cell; - ++vertex) - if (cell->vertex(vertex).distance (evaluation_point) - < - cell->diameter() * 1e-8) - { - point_value = solution(cell->vertex_dof_index(vertex,0)); - - evaluation_point_found = true; - break; - } + for (unsigned int vertex=0; + vertex::vertices_per_cell; + ++vertex) + if (cell->vertex(vertex).distance (evaluation_point) + < + cell->diameter() * 1e-8) + { + point_value = solution(cell->vertex_dof_index(vertex,0)); + + evaluation_point_found = true; + break; + } AssertThrow (evaluation_point_found, - ExcEvaluationPointNotFound(evaluation_point)); + ExcEvaluationPointNotFound(evaluation_point)); std::cout << " Point value=" << point_value - << std::endl; + << std::endl; } - // @sect4{The PointXDerivativeEvaluation class} - - // Besides the class implementing - // the evaluation of the solution - // at one point, we here provide - // one which evaluates the gradient - // at a grid point. Since in - // general the gradient of a finite - // element function is not - // continuous at a vertex, we have - // to be a little bit more careful - // here. What we do is to loop over - // all cells, even if we have found - // the point already on one cell, - // and use the mean value of the - // gradient at the vertex taken - // from all adjacent cells. - // - // Given the interface of the - // PointValueEvaluation class, - // the declaration of this class - // provides little surprise, and - // neither does the constructor: + // @sect4{The PointXDerivativeEvaluation class} + + // Besides the class implementing + // the evaluation of the solution + // at one point, we here provide + // one which evaluates the gradient + // at a grid point. Since in + // general the gradient of a finite + // element function is not + // continuous at a vertex, we have + // to be a little bit more careful + // here. What we do is to loop over + // all cells, even if we have found + // the point already on one cell, + // and use the mean value of the + // gradient at the vertex taken + // from all adjacent cells. + // + // Given the interface of the + // PointValueEvaluation class, + // the declaration of this class + // provides little surprise, and + // neither does the constructor: template class PointXDerivativeEvaluation : public EvaluationBase { public: - PointXDerivativeEvaluation (const Point &evaluation_point); + PointXDerivativeEvaluation (const Point &evaluation_point); - virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const; + virtual void operator () (const DoFHandler &dof_handler, + const Vector &solution) const; - DeclException1 (ExcEvaluationPointNotFound, - Point, - << "The evaluation point " << arg1 - << " was not found among the vertices of the present grid."); + DeclException1 (ExcEvaluationPointNotFound, + Point, + << "The evaluation point " << arg1 + << " was not found among the vertices of the present grid."); private: - const Point evaluation_point; + const Point evaluation_point; }; template PointXDerivativeEvaluation:: PointXDerivativeEvaluation (const Point &evaluation_point) - : - evaluation_point (evaluation_point) + : + evaluation_point (evaluation_point) {} - // The more interesting things - // happen inside the function doing - // the actual evaluation: + // The more interesting things + // happen inside the function doing + // the actual evaluation: template void PointXDerivativeEvaluation:: operator () (const DoFHandler &dof_handler, - const Vector &solution) const - { - // This time initialize the - // return value with something - // useful, since we will have to - // add up a number of - // contributions and take the - // mean value afterwards... + const Vector &solution) const + { + // This time initialize the + // return value with something + // useful, since we will have to + // add up a number of + // contributions and take the + // mean value afterwards... double point_derivative = 0; - // ...then have some objects of - // which the meaning wil become - // clear below... + // ...then have some objects of + // which the meaning wil become + // clear below... QTrapez vertex_quadrature; FEValues fe_values (dof_handler.get_fe(), - vertex_quadrature, - update_gradients | update_quadrature_points); + vertex_quadrature, + update_gradients | update_quadrature_points); std::vector > - solution_gradients (vertex_quadrature.size()); + solution_gradients (vertex_quadrature.size()); - // ...and next loop over all cells - // and their vertices, and count - // how often the vertex has been - // found: + // ...and next loop over all cells + // and their vertices, and count + // how often the vertex has been + // found: typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); + cell = dof_handler.begin_active(), + endc = dof_handler.end(); unsigned int evaluation_point_hits = 0; for (; cell!=endc; ++cell) - for (unsigned int vertex=0; - vertex::vertices_per_cell; - ++vertex) - if (cell->vertex(vertex) == evaluation_point) - { - // Things are now no more - // as simple, since we - // can't get the gradient - // of the finite element - // field as before, where - // we simply had to pick - // one degree of freedom - // at a vertex. - // - // Rather, we have to - // evaluate the finite - // element field on this - // cell, and at a certain - // point. As you know, - // evaluating finite - // element fields at - // certain points is done - // through the - // FEValues class, so - // we use that. The - // question is: the - // FEValues object - // needs to be a given a - // quadrature formula and - // can then compute the - // values of finite - // element quantities at - // the quadrature - // points. Here, we don't - // want to do quadrature, - // we simply want to - // specify some points! - // - // Nevertheless, the same - // way is chosen: use a - // special quadrature - // rule with points at - // the vertices, since - // these are what we are - // interested in. The - // appropriate rule is - // the trapezoidal rule, - // so that is the reason - // why we used that one - // above. - // - // Thus: initialize the - // FEValues object on - // this cell, - fe_values.reinit (cell); - // and extract the - // gradients of the - // solution vector at the - // vertices: - fe_values.get_function_grads (solution, - solution_gradients); - - // Now we have the - // gradients at all - // vertices, so pick out - // that one which belongs - // to the evaluation - // point (note that the - // order of vertices is - // not necessarily the - // same as that of the - // quadrature points): - unsigned int q_point = 0; - for (; q_point::vertices_per_cell; + ++vertex) + if (cell->vertex(vertex) == evaluation_point) + { + // Things are now no more + // as simple, since we + // can't get the gradient + // of the finite element + // field as before, where + // we simply had to pick + // one degree of freedom + // at a vertex. + // + // Rather, we have to + // evaluate the finite + // element field on this + // cell, and at a certain + // point. As you know, + // evaluating finite + // element fields at + // certain points is done + // through the + // FEValues class, so + // we use that. The + // question is: the + // FEValues object + // needs to be a given a + // quadrature formula and + // can then compute the + // values of finite + // element quantities at + // the quadrature + // points. Here, we don't + // want to do quadrature, + // we simply want to + // specify some points! + // + // Nevertheless, the same + // way is chosen: use a + // special quadrature + // rule with points at + // the vertices, since + // these are what we are + // interested in. The + // appropriate rule is + // the trapezoidal rule, + // so that is the reason + // why we used that one + // above. + // + // Thus: initialize the + // FEValues object on + // this cell, + fe_values.reinit (cell); + // and extract the + // gradients of the + // solution vector at the + // vertices: + fe_values.get_function_grads (solution, + solution_gradients); + + // Now we have the + // gradients at all + // vertices, so pick out + // that one which belongs + // to the evaluation + // point (note that the + // order of vertices is + // not necessarily the + // same as that of the + // quadrature points): + unsigned int q_point = 0; + for (; q_point 0, - ExcEvaluationPointNotFound(evaluation_point)); + ExcEvaluationPointNotFound(evaluation_point)); - // We have simply summed up the - // contributions of all adjacent - // cells, so we still have to - // compute the mean value. Once - // this is done, report the status: + // We have simply summed up the + // contributions of all adjacent + // cells, so we still have to + // compute the mean value. Once + // this is done, report the status: point_derivative /= evaluation_point_hits; std::cout << " Point x-derivative=" << point_derivative - << std::endl; + << std::endl; } - // @sect4{The GridOutput class} - - // Since this program has a more - // difficult structure (it computed - // a dual solution in addition to a - // primal one), writing out the - // solution is no more done by an - // evaluation object since we want - // to write both solutions at once - // into one file, and that requires - // some more information than - // available to the evaluation - // classes. - // - // However, we also want to look at - // the grids generated. This again - // can be done with one such - // class. Its structure is analog - // to the SolutionOutput class - // of the previous example program, - // so we do not discuss it here in - // more detail. Furthermore, - // everything that is used here has - // already been used in previous - // example programs. + // @sect4{The GridOutput class} + + // Since this program has a more + // difficult structure (it computed + // a dual solution in addition to a + // primal one), writing out the + // solution is no more done by an + // evaluation object since we want + // to write both solutions at once + // into one file, and that requires + // some more information than + // available to the evaluation + // classes. + // + // However, we also want to look at + // the grids generated. This again + // can be done with one such + // class. Its structure is analog + // to the SolutionOutput class + // of the previous example program, + // so we do not discuss it here in + // more detail. Furthermore, + // everything that is used here has + // already been used in previous + // example programs. template class GridOutput : public EvaluationBase { public: - GridOutput (const std::string &output_name_base); + GridOutput (const std::string &output_name_base); - virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const; + virtual void operator () (const DoFHandler &dof_handler, + const Vector &solution) const; private: - const std::string output_name_base; + const std::string output_name_base; }; template GridOutput:: GridOutput (const std::string &output_name_base) - : - output_name_base (output_name_base) + : + output_name_base (output_name_base) {} template void GridOutput::operator () (const DoFHandler &dof_handler, - const Vector &/*solution*/) const + const Vector &/*solution*/) const { std::ostringstream filename; filename << output_name_base << "-" - << this->refinement_cycle - << ".eps" - << std::ends; + << this->refinement_cycle + << ".eps" + << std::ends; std::ofstream out (filename.str().c_str()); GridOut().write_eps (dof_handler.get_tria(), out); @@ -434,68 +434,68 @@ namespace Step14 } - // @sect3{The Laplace solver classes} + // @sect3{The Laplace solver classes} - // Next are the actual solver - // classes. Again, we discuss only - // the differences to the previous - // program. + // Next are the actual solver + // classes. Again, we discuss only + // the differences to the previous + // program. namespace LaplaceSolver { - // Before everything else, - // forward-declare one class that - // we will have later, since we - // will want to make it a friend of - // some of the classes that follow, - // which requires the class to be - // known: + // Before everything else, + // forward-declare one class that + // we will have later, since we + // will want to make it a friend of + // some of the classes that follow, + // which requires the class to be + // known: template class WeightedResidual; - // @sect4{The Laplace solver base class} - - // This class is almost unchanged, - // with the exception that it - // declares two more functions: - // output_solution will be used - // to generate output files from - // the actual solutions computed by - // derived classes, and the - // set_refinement_cycle - // function by which the testing - // framework sets the number of the - // refinement cycle to a local - // variable in this class; this - // number is later used to generate - // filenames for the solution - // output. + // @sect4{The Laplace solver base class} + + // This class is almost unchanged, + // with the exception that it + // declares two more functions: + // output_solution will be used + // to generate output files from + // the actual solutions computed by + // derived classes, and the + // set_refinement_cycle + // function by which the testing + // framework sets the number of the + // refinement cycle to a local + // variable in this class; this + // number is later used to generate + // filenames for the solution + // output. template class Base { public: - Base (Triangulation &coarse_grid); - virtual ~Base (); + Base (Triangulation &coarse_grid); + virtual ~Base (); - virtual void solve_problem () = 0; - virtual void postprocess (const Evaluation::EvaluationBase &postprocessor) const = 0; - virtual void refine_grid () = 0; - virtual unsigned int n_dofs () const = 0; + virtual void solve_problem () = 0; + virtual void postprocess (const Evaluation::EvaluationBase &postprocessor) const = 0; + virtual void refine_grid () = 0; + virtual unsigned int n_dofs () const = 0; - virtual void set_refinement_cycle (const unsigned int cycle); + virtual void set_refinement_cycle (const unsigned int cycle); - virtual void output_solution () const = 0; + virtual void output_solution () const = 0; protected: - const SmartPointer > triangulation; + const SmartPointer > triangulation; - unsigned int refinement_cycle; + unsigned int refinement_cycle; }; template Base::Base (Triangulation &coarse_grid) - : - triangulation (&coarse_grid) + : + triangulation (&coarse_grid) {} @@ -513,83 +513,83 @@ namespace Step14 } - // @sect4{The Laplace Solver class} + // @sect4{The Laplace Solver class} - // Likewise, the Solver class - // is entirely unchanged and will - // thus not be discussed. + // Likewise, the Solver class + // is entirely unchanged and will + // thus not be discussed. template class Solver : public virtual Base { public: - Solver (Triangulation &triangulation, - const FiniteElement &fe, - const Quadrature &quadrature, - const Quadrature &face_quadrature, - const Function &boundary_values); - virtual - ~Solver (); - - virtual - void - solve_problem (); - - virtual - void - postprocess (const Evaluation::EvaluationBase &postprocessor) const; - - virtual - unsigned int - n_dofs () const; + Solver (Triangulation &triangulation, + const FiniteElement &fe, + const Quadrature &quadrature, + const Quadrature &face_quadrature, + const Function &boundary_values); + virtual + ~Solver (); + + virtual + void + solve_problem (); + + virtual + void + postprocess (const Evaluation::EvaluationBase &postprocessor) const; + + virtual + unsigned int + n_dofs () const; protected: - const SmartPointer > fe; - const SmartPointer > quadrature; - const SmartPointer > face_quadrature; - DoFHandler dof_handler; - Vector solution; - const SmartPointer > boundary_values; + const SmartPointer > fe; + const SmartPointer > quadrature; + const SmartPointer > face_quadrature; + DoFHandler dof_handler; + Vector solution; + const SmartPointer > boundary_values; - virtual void assemble_rhs (Vector &rhs) const = 0; + virtual void assemble_rhs (Vector &rhs) const = 0; private: - struct LinearSystem - { - LinearSystem (const DoFHandler &dof_handler); - - void solve (Vector &solution) const; - - ConstraintMatrix hanging_node_constraints; - SparsityPattern sparsity_pattern; - SparseMatrix matrix; - Vector rhs; - }; - - void - assemble_linear_system (LinearSystem &linear_system); - - void - assemble_matrix (LinearSystem &linear_system, - const typename DoFHandler::active_cell_iterator &begin_cell, - const typename DoFHandler::active_cell_iterator &end_cell, - Threads::ThreadMutex &mutex) const; + struct LinearSystem + { + LinearSystem (const DoFHandler &dof_handler); + + void solve (Vector &solution) const; + + ConstraintMatrix hanging_node_constraints; + SparsityPattern sparsity_pattern; + SparseMatrix matrix; + Vector rhs; + }; + + void + assemble_linear_system (LinearSystem &linear_system); + + void + assemble_matrix (LinearSystem &linear_system, + const typename DoFHandler::active_cell_iterator &begin_cell, + const typename DoFHandler::active_cell_iterator &end_cell, + Threads::ThreadMutex &mutex) const; }; template Solver::Solver (Triangulation &triangulation, - const FiniteElement &fe, - const Quadrature &quadrature, - const Quadrature &face_quadrature, - const Function &boundary_values) - : - Base (triangulation), - fe (&fe), - quadrature (&quadrature), - face_quadrature (&face_quadrature), - dof_handler (triangulation), - boundary_values (&boundary_values) + const FiniteElement &fe, + const Quadrature &quadrature, + const Quadrature &face_quadrature, + const Function &boundary_values) + : + Base (triangulation), + fe (&fe), + quadrature (&quadrature), + face_quadrature (&face_quadrature), + dof_handler (triangulation), + boundary_values (&boundary_values) {} @@ -635,54 +635,54 @@ namespace Step14 Solver::assemble_linear_system (LinearSystem &linear_system) { typedef - typename DoFHandler::active_cell_iterator - active_cell_iterator; + typename DoFHandler::active_cell_iterator + active_cell_iterator; const unsigned int n_threads = multithread_info.n_default_threads; std::vector > - thread_ranges - = Threads::split_range (dof_handler.begin_active (), - dof_handler.end (), - n_threads); + thread_ranges + = Threads::split_range (dof_handler.begin_active (), + dof_handler.end (), + n_threads); Threads::ThreadMutex mutex; Threads::ThreadGroup<> threads; for (unsigned int thread=0; thread::assemble_matrix, - *this, - linear_system, - thread_ranges[thread].first, - thread_ranges[thread].second, - mutex); + threads += Threads::new_thread (&Solver::assemble_matrix, + *this, + linear_system, + thread_ranges[thread].first, + thread_ranges[thread].second, + mutex); assemble_rhs (linear_system.rhs); linear_system.hanging_node_constraints.condense (linear_system.rhs); std::map boundary_value_map; VectorTools::interpolate_boundary_values (dof_handler, - 0, - *boundary_values, - boundary_value_map); + 0, + *boundary_values, + boundary_value_map); threads.join_all (); linear_system.hanging_node_constraints.condense (linear_system.matrix); MatrixTools::apply_boundary_values (boundary_value_map, - linear_system.matrix, - solution, - linear_system.rhs); + linear_system.matrix, + solution, + linear_system.rhs); } template void Solver::assemble_matrix (LinearSystem &linear_system, - const typename DoFHandler::active_cell_iterator &begin_cell, - const typename DoFHandler::active_cell_iterator &end_cell, - Threads::ThreadMutex &mutex) const + const typename DoFHandler::active_cell_iterator &begin_cell, + const typename DoFHandler::active_cell_iterator &end_cell, + Threads::ThreadMutex &mutex) const { FEValues fe_values (*fe, *quadrature, - update_gradients | update_JxW_values); + update_gradients | update_JxW_values); const unsigned int dofs_per_cell = fe->dofs_per_cell; const unsigned int n_q_points = quadrature->size(); @@ -692,28 +692,28 @@ namespace Step14 std::vector local_dof_indices (dofs_per_cell); for (typename DoFHandler::active_cell_iterator cell=begin_cell; - cell!=end_cell; ++cell) - { - cell_matrix = 0; - - fe_values.reinit (cell); - - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - Threads::ThreadMutex::ScopedLock lock (mutex); - for (unsigned int i=0; iget_dof_indices (local_dof_indices); + Threads::ThreadMutex::ScopedLock lock (mutex); + for (unsigned int i=0; i &, - ConstraintMatrix &) - = &DoFTools::make_hanging_node_constraints; + ConstraintMatrix &) + = &DoFTools::make_hanging_node_constraints; Threads::Thread<> - mhnc_thread = Threads::new_thread (mhnc_p, - dof_handler, - hanging_node_constraints); + mhnc_thread = Threads::new_thread (mhnc_p, + dof_handler, + hanging_node_constraints); sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); mhnc_thread.join (); @@ -766,120 +766,120 @@ namespace Step14 - // @sect4{The PrimalSolver class} - - // The PrimalSolver class is - // also mostly unchanged except for - // overloading the functions - // solve_problem, n_dofs, - // and postprocess of the base - // class, and implementing the - // output_solution - // function. These overloaded - // functions do nothing particular - // besides calling the functions of - // the base class -- that seems - // superfluous, but works around a - // bug in a popular compiler which - // requires us to write such - // functions for the following - // scenario: Besides the - // PrimalSolver class, we will - // have a DualSolver, both - // derived from Solver. We will - // then have a final classes which - // derived from these two, which - // will then have two instances of - // the Solver class as its base - // classes. If we want, for - // example, the number of degrees - // of freedom of the primal solver, - // we would have to indicate this - // like so: - // PrimalSolver::n_dofs(). - // However, the compiler does not - // accept this since the n_dofs - // function is actually from a base - // class of the PrimalSolver - // class, so we have to inject the - // name from the base to the - // derived class using these - // additional functions. - // - // Regarding the implementation of - // the output_solution - // function, we keep the - // GlobalRefinement and - // RefinementKelly classes in - // this program, and they can then - // rely on the default - // implementation of this function - // which simply outputs the primal - // solution. The class implementing - // dual weighted error estimators - // will overload this function - // itself, to also output the dual - // solution. - // - // Except for this, the class is - // unchanged with respect to the - // previous example. + // @sect4{The PrimalSolver class} + + // The PrimalSolver class is + // also mostly unchanged except for + // overloading the functions + // solve_problem, n_dofs, + // and postprocess of the base + // class, and implementing the + // output_solution + // function. These overloaded + // functions do nothing particular + // besides calling the functions of + // the base class -- that seems + // superfluous, but works around a + // bug in a popular compiler which + // requires us to write such + // functions for the following + // scenario: Besides the + // PrimalSolver class, we will + // have a DualSolver, both + // derived from Solver. We will + // then have a final classes which + // derived from these two, which + // will then have two instances of + // the Solver class as its base + // classes. If we want, for + // example, the number of degrees + // of freedom of the primal solver, + // we would have to indicate this + // like so: + // PrimalSolver::n_dofs(). + // However, the compiler does not + // accept this since the n_dofs + // function is actually from a base + // class of the PrimalSolver + // class, so we have to inject the + // name from the base to the + // derived class using these + // additional functions. + // + // Regarding the implementation of + // the output_solution + // function, we keep the + // GlobalRefinement and + // RefinementKelly classes in + // this program, and they can then + // rely on the default + // implementation of this function + // which simply outputs the primal + // solution. The class implementing + // dual weighted error estimators + // will overload this function + // itself, to also output the dual + // solution. + // + // Except for this, the class is + // unchanged with respect to the + // previous example. template class PrimalSolver : public Solver { public: - PrimalSolver (Triangulation &triangulation, - const FiniteElement &fe, - const Quadrature &quadrature, - const Quadrature &face_quadrature, - const Function &rhs_function, - const Function &boundary_values); + PrimalSolver (Triangulation &triangulation, + const FiniteElement &fe, + const Quadrature &quadrature, + const Quadrature &face_quadrature, + const Function &rhs_function, + const Function &boundary_values); - virtual - void solve_problem (); + virtual + void solve_problem (); - virtual - unsigned int n_dofs () const; + virtual + unsigned int n_dofs () const; - virtual - void postprocess (const Evaluation::EvaluationBase &postprocessor) const; + virtual + void postprocess (const Evaluation::EvaluationBase &postprocessor) const; - virtual - void output_solution () const; + virtual + void output_solution () const; protected: - const SmartPointer > rhs_function; - virtual void assemble_rhs (Vector &rhs) const; - - // Now, in order to work around - // some problems in one of the - // compilers this library can - // be compiled with, we will - // have to declare a - // class that is actually - // derived from the present - // one, as a friend (strange as - // that seems). The full - // rationale will be explained - // below. - friend class WeightedResidual; + const SmartPointer > rhs_function; + virtual void assemble_rhs (Vector &rhs) const; + + // Now, in order to work around + // some problems in one of the + // compilers this library can + // be compiled with, we will + // have to declare a + // class that is actually + // derived from the present + // one, as a friend (strange as + // that seems). The full + // rationale will be explained + // below. + friend class WeightedResidual; }; template PrimalSolver:: PrimalSolver (Triangulation &triangulation, - const FiniteElement &fe, - const Quadrature &quadrature, - const Quadrature &face_quadrature, - const Function &rhs_function, - const Function &boundary_values) - : - Base (triangulation), - Solver (triangulation, fe, - quadrature, face_quadrature, - boundary_values), - rhs_function (&rhs_function) + const FiniteElement &fe, + const Quadrature &quadrature, + const Quadrature &face_quadrature, + const Function &rhs_function, + const Function &boundary_values) + : + Base (triangulation), + Solver (triangulation, fe, + quadrature, face_quadrature, + boundary_values), + rhs_function (&rhs_function) {} @@ -920,9 +920,9 @@ namespace Step14 std::ostringstream filename; filename << "solution-" - << this->refinement_cycle - << ".gnuplot" - << std::ends; + << this->refinement_cycle + << ".gnuplot" + << std::ends; std::ofstream out (filename.str().c_str()); data_out.write (out, DataOut::gnuplot); @@ -936,8 +936,8 @@ namespace Step14 assemble_rhs (Vector &rhs) const { FEValues fe_values (*this->fe, *this->quadrature, - update_values | update_quadrature_points | - update_JxW_values); + update_values | update_quadrature_points | + update_JxW_values); const unsigned int dofs_per_cell = this->fe->dofs_per_cell; const unsigned int n_q_points = this->quadrature->size(); @@ -947,48 +947,48 @@ namespace Step14 std::vector local_dof_indices (dofs_per_cell); typename DoFHandler::active_cell_iterator - cell = this->dof_handler.begin_active(), - endc = this->dof_handler.end(); + cell = this->dof_handler.begin_active(), + endc = this->dof_handler.end(); for (; cell!=endc; ++cell) - { - cell_rhs = 0; + { + cell_rhs = 0; - fe_values.reinit (cell); + fe_values.reinit (cell); - rhs_function->value_list (fe_values.get_quadrature_points(), - rhs_values); + rhs_function->value_list (fe_values.get_quadrature_points(), + rhs_values); - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - for (unsigned int i=0; iget_dof_indices (local_dof_indices); + for (unsigned int i=0; i class RefinementGlobal : public PrimalSolver { public: - RefinementGlobal (Triangulation &coarse_grid, - const FiniteElement &fe, - const Quadrature &quadrature, - const Quadrature &face_quadrature, - const Function &rhs_function, - const Function &boundary_values); - - virtual void refine_grid (); + RefinementGlobal (Triangulation &coarse_grid, + const FiniteElement &fe, + const Quadrature &quadrature, + const Quadrature &face_quadrature, + const Function &rhs_function, + const Function &boundary_values); + + virtual void refine_grid (); }; @@ -996,16 +996,16 @@ namespace Step14 template RefinementGlobal:: RefinementGlobal (Triangulation &coarse_grid, - const FiniteElement &fe, - const Quadrature &quadrature, - const Quadrature &face_quadrature, - const Function &rhs_function, - const Function &boundary_values) - : - Base (coarse_grid), - PrimalSolver (coarse_grid, fe, quadrature, - face_quadrature, rhs_function, - boundary_values) + const FiniteElement &fe, + const Quadrature &quadrature, + const Quadrature &face_quadrature, + const Function &rhs_function, + const Function &boundary_values) + : + Base (coarse_grid), + PrimalSolver (coarse_grid, fe, quadrature, + face_quadrature, rhs_function, + boundary_values) {} @@ -1023,14 +1023,14 @@ namespace Step14 class RefinementKelly : public PrimalSolver { public: - RefinementKelly (Triangulation &coarse_grid, - const FiniteElement &fe, - const Quadrature &quadrature, - const Quadrature &face_quadrature, - const Function &rhs_function, - const Function &boundary_values); - - virtual void refine_grid (); + RefinementKelly (Triangulation &coarse_grid, + const FiniteElement &fe, + const Quadrature &quadrature, + const Quadrature &face_quadrature, + const Function &rhs_function, + const Function &boundary_values); + + virtual void refine_grid (); }; @@ -1038,16 +1038,16 @@ namespace Step14 template RefinementKelly:: RefinementKelly (Triangulation &coarse_grid, - const FiniteElement &fe, - const Quadrature &quadrature, - const Quadrature &face_quadrature, - const Function &rhs_function, - const Function &boundary_values) - : - Base (coarse_grid), - PrimalSolver (coarse_grid, fe, quadrature, - face_quadrature, - rhs_function, boundary_values) + const FiniteElement &fe, + const Quadrature &quadrature, + const Quadrature &face_quadrature, + const Function &rhs_function, + const Function &boundary_values) + : + Base (coarse_grid), + PrimalSolver (coarse_grid, fe, quadrature, + face_quadrature, + rhs_function, boundary_values) {} @@ -1058,58 +1058,58 @@ namespace Step14 { Vector estimated_error_per_cell (this->triangulation->n_active_cells()); KellyErrorEstimator::estimate (this->dof_handler, - QGauss(3), - typename FunctionMap::type(), - this->solution, - estimated_error_per_cell); + QGauss(3), + typename FunctionMap::type(), + this->solution, + estimated_error_per_cell); GridRefinement::refine_and_coarsen_fixed_number (*this->triangulation, - estimated_error_per_cell, - 0.3, 0.03); + estimated_error_per_cell, + 0.3, 0.03); this->triangulation->execute_coarsening_and_refinement (); } - // @sect4{The RefinementWeightedKelly class} - - // This class is a variant of the - // previous one, in that it allows - // to weight the refinement - // indicators we get from the - // library's Kelly indicator by - // some function. We include this - // class since the goal of this - // example program is to - // demonstrate automatic refinement - // criteria even for complex output - // quantities such as point values - // or stresses. If we did not solve - // a dual problem and compute the - // weights thereof, we would - // probably be tempted to give a - // hand-crafted weighting to the - // indicators to account for the - // fact that we are going to - // evaluate these quantities. This - // class accepts such a weighting - // function as argument to its - // constructor: + // @sect4{The RefinementWeightedKelly class} + + // This class is a variant of the + // previous one, in that it allows + // to weight the refinement + // indicators we get from the + // library's Kelly indicator by + // some function. We include this + // class since the goal of this + // example program is to + // demonstrate automatic refinement + // criteria even for complex output + // quantities such as point values + // or stresses. If we did not solve + // a dual problem and compute the + // weights thereof, we would + // probably be tempted to give a + // hand-crafted weighting to the + // indicators to account for the + // fact that we are going to + // evaluate these quantities. This + // class accepts such a weighting + // function as argument to its + // constructor: template class RefinementWeightedKelly : public PrimalSolver { public: - RefinementWeightedKelly (Triangulation &coarse_grid, - const FiniteElement &fe, - const Quadrature &quadrature, - const Quadrature &face_quadrature, - const Function &rhs_function, - const Function &boundary_values, - const Function &weighting_function); + RefinementWeightedKelly (Triangulation &coarse_grid, + const FiniteElement &fe, + const Quadrature &quadrature, + const Quadrature &face_quadrature, + const Function &rhs_function, + const Function &boundary_values, + const Function &weighting_function); - virtual void refine_grid (); + virtual void refine_grid (); private: - const SmartPointer > weighting_function; + const SmartPointer > weighting_function; }; @@ -1117,262 +1117,262 @@ namespace Step14 template RefinementWeightedKelly:: RefinementWeightedKelly (Triangulation &coarse_grid, - const FiniteElement &fe, - const Quadrature &quadrature, - const Quadrature &face_quadrature, - const Function &rhs_function, - const Function &boundary_values, - const Function &weighting_function) - : - Base (coarse_grid), - PrimalSolver (coarse_grid, fe, quadrature, - face_quadrature, - rhs_function, boundary_values), - weighting_function (&weighting_function) + const FiniteElement &fe, + const Quadrature &quadrature, + const Quadrature &face_quadrature, + const Function &rhs_function, + const Function &boundary_values, + const Function &weighting_function) + : + Base (coarse_grid), + PrimalSolver (coarse_grid, fe, quadrature, + face_quadrature, + rhs_function, boundary_values), + weighting_function (&weighting_function) {} - // Now, here comes the main - // function, including the - // weighting: + // Now, here comes the main + // function, including the + // weighting: template void RefinementWeightedKelly::refine_grid () { - // First compute some residual - // based error indicators for all - // cells by a method already - // implemented in the - // library. What exactly is - // computed can be read in the - // documentation of that class. + // First compute some residual + // based error indicators for all + // cells by a method already + // implemented in the + // library. What exactly is + // computed can be read in the + // documentation of that class. Vector estimated_error (this->triangulation->n_active_cells()); KellyErrorEstimator::estimate (this->dof_handler, - *this->face_quadrature, - typename FunctionMap::type(), - this->solution, - estimated_error); - - // Now we are going to weight - // these indicators by the value - // of the function given to the - // constructor: + *this->face_quadrature, + typename FunctionMap::type(), + this->solution, + estimated_error); + + // Now we are going to weight + // these indicators by the value + // of the function given to the + // constructor: typename DoFHandler::active_cell_iterator - cell = this->dof_handler.begin_active(), - endc = this->dof_handler.end(); + cell = this->dof_handler.begin_active(), + endc = this->dof_handler.end(); for (unsigned int cell_index=0; cell!=endc; ++cell, ++cell_index) - estimated_error(cell_index) - *= weighting_function->value (cell->center()); + estimated_error(cell_index) + *= weighting_function->value (cell->center()); GridRefinement::refine_and_coarsen_fixed_number (*this->triangulation, - estimated_error, - 0.3, 0.03); + estimated_error, + 0.3, 0.03); this->triangulation->execute_coarsening_and_refinement (); } } - // @sect3{Equation data} - // - // In this example program, we work - // with the same data sets as in the - // previous one, but as it may so - // happen that someone wants to run - // the program with different - // boundary values and right hand side - // functions, or on a different grid, - // we show a simple technique to do - // exactly that. For more clarity, we - // furthermore pack everything that - // has to do with equation data into - // a namespace of its own. - // - // The underlying assumption is that - // this is a research program, and - // that there we often have a number - // of test cases that consist of a - // domain, a right hand side, - // boundary values, possibly a - // specified coefficient, and a - // number of other parameters. They - // often vary all at the same time - // when shifting from one example to - // another. To make handling such - // sets of problem description - // parameters simple is the goal of - // the following. - // - // Basically, the idea is this: let - // us have a structure for each set - // of data, in which we pack - // everything that describes a test - // case: here, these are two - // subclasses, one called - // BoundaryValues for the - // boundary values of the exact - // solution, and one called - // RightHandSide, and then a way - // to generate the coarse grid. Since - // the solution of the previous - // example program looked like curved - // ridges, we use this name here for - // the enclosing class. Note that the - // names of the two inner classes - // have to be the same for all - // enclosing test case classes, and - // also that we have attached the - // dimension template argument to the - // enclosing class rather than to the - // inner ones, to make further - // processing simpler. (From a - // language viewpoint, a namespace - // would be better to encapsulate - // these inner classes, rather than a - // structure. However, namespaces - // cannot be given as template - // arguments, so we use a structure - // to allow a second object to select - // from within its given - // argument. The enclosing structure, - // of course, has no member variables - // apart from the classes it - // declares, and a static function to - // generate the coarse mesh; it will - // in general never be instantiated.) - // - // The idea is then the following - // (this is the right time to also - // take a brief look at the code - // below): we can generate objects - // for boundary values and - // right hand side by simply giving - // the name of the outer class as a - // template argument to a class which - // we call here Data::SetUp, and - // it then creates objects for the - // inner classes. In this case, to - // get all that characterizes the - // curved ridge solution, we would - // simply generate an instance of - // Data::SetUp@, - // and everything we need to know - // about the solution would be static - // member variables and functions of - // that object. - // - // This approach might seem like - // overkill in this case, but will - // become very handy once a certain - // set up is not only characterized - // by Dirichlet boundary values and a - // right hand side function, but in - // addition by material properties, - // Neumann values, different boundary - // descriptors, etc. In that case, - // the SetUp class might consist - // of a dozen or more objects, and - // each descriptor class (like the - // CurvedRidges class below) - // would have to provide them. Then, - // you will be happy to be able to - // change from one set of data to - // another by only changing the - // template argument to the SetUp - // class at one place, rather than at - // many. - // - // With this framework for different - // test cases, we are almost - // finished, but one thing remains: - // by now we can select statically, - // by changing one template argument, - // which data set to choose. In order - // to be able to do that dynamically, - // i.e. at run time, we need a base - // class. This we provide in the - // obvious way, see below, with - // virtual abstract functions. It - // forces us to introduce a second - // template parameter dim which - // we need for the base class (which - // could be avoided using some - // template magic, but we omit that), - // but that's all. - // - // Adding new testcases is now - // simple, you don't have to touch - // the framework classes, only a - // structure like the - // CurvedRidges one is needed. + // @sect3{Equation data} + // + // In this example program, we work + // with the same data sets as in the + // previous one, but as it may so + // happen that someone wants to run + // the program with different + // boundary values and right hand side + // functions, or on a different grid, + // we show a simple technique to do + // exactly that. For more clarity, we + // furthermore pack everything that + // has to do with equation data into + // a namespace of its own. + // + // The underlying assumption is that + // this is a research program, and + // that there we often have a number + // of test cases that consist of a + // domain, a right hand side, + // boundary values, possibly a + // specified coefficient, and a + // number of other parameters. They + // often vary all at the same time + // when shifting from one example to + // another. To make handling such + // sets of problem description + // parameters simple is the goal of + // the following. + // + // Basically, the idea is this: let + // us have a structure for each set + // of data, in which we pack + // everything that describes a test + // case: here, these are two + // subclasses, one called + // BoundaryValues for the + // boundary values of the exact + // solution, and one called + // RightHandSide, and then a way + // to generate the coarse grid. Since + // the solution of the previous + // example program looked like curved + // ridges, we use this name here for + // the enclosing class. Note that the + // names of the two inner classes + // have to be the same for all + // enclosing test case classes, and + // also that we have attached the + // dimension template argument to the + // enclosing class rather than to the + // inner ones, to make further + // processing simpler. (From a + // language viewpoint, a namespace + // would be better to encapsulate + // these inner classes, rather than a + // structure. However, namespaces + // cannot be given as template + // arguments, so we use a structure + // to allow a second object to select + // from within its given + // argument. The enclosing structure, + // of course, has no member variables + // apart from the classes it + // declares, and a static function to + // generate the coarse mesh; it will + // in general never be instantiated.) + // + // The idea is then the following + // (this is the right time to also + // take a brief look at the code + // below): we can generate objects + // for boundary values and + // right hand side by simply giving + // the name of the outer class as a + // template argument to a class which + // we call here Data::SetUp, and + // it then creates objects for the + // inner classes. In this case, to + // get all that characterizes the + // curved ridge solution, we would + // simply generate an instance of + // Data::SetUp@, + // and everything we need to know + // about the solution would be static + // member variables and functions of + // that object. + // + // This approach might seem like + // overkill in this case, but will + // become very handy once a certain + // set up is not only characterized + // by Dirichlet boundary values and a + // right hand side function, but in + // addition by material properties, + // Neumann values, different boundary + // descriptors, etc. In that case, + // the SetUp class might consist + // of a dozen or more objects, and + // each descriptor class (like the + // CurvedRidges class below) + // would have to provide them. Then, + // you will be happy to be able to + // change from one set of data to + // another by only changing the + // template argument to the SetUp + // class at one place, rather than at + // many. + // + // With this framework for different + // test cases, we are almost + // finished, but one thing remains: + // by now we can select statically, + // by changing one template argument, + // which data set to choose. In order + // to be able to do that dynamically, + // i.e. at run time, we need a base + // class. This we provide in the + // obvious way, see below, with + // virtual abstract functions. It + // forces us to introduce a second + // template parameter dim which + // we need for the base class (which + // could be avoided using some + // template magic, but we omit that), + // but that's all. + // + // Adding new testcases is now + // simple, you don't have to touch + // the framework classes, only a + // structure like the + // CurvedRidges one is needed. namespace Data { - // @sect4{The SetUpBase and SetUp classes} + // @sect4{The SetUpBase and SetUp classes} - // Based on the above description, - // the SetUpBase class then - // looks as follows. To allow using - // the SmartPointer class with - // this class, we derived from the - // Subscriptor class. + // Based on the above description, + // the SetUpBase class then + // looks as follows. To allow using + // the SmartPointer class with + // this class, we derived from the + // Subscriptor class. template struct SetUpBase : public Subscriptor { - virtual - const Function & get_boundary_values () const = 0; + virtual + const Function & get_boundary_values () const = 0; - virtual - const Function & get_right_hand_side () const = 0; + virtual + const Function & get_right_hand_side () const = 0; - virtual - void create_coarse_grid (Triangulation &coarse_grid) const = 0; + virtual + void create_coarse_grid (Triangulation &coarse_grid) const = 0; }; - // And now for the derived class - // that takes the template argument - // as explained above. For some - // reason, C++ requires us to - // define a constructor (which - // maybe empty), as otherwise a - // warning is generated that some - // data is not initialized. - // - // Here we pack the data elements - // into private variables, and - // allow access to them through the - // methods of the base class. + // And now for the derived class + // that takes the template argument + // as explained above. For some + // reason, C++ requires us to + // define a constructor (which + // maybe empty), as otherwise a + // warning is generated that some + // data is not initialized. + // + // Here we pack the data elements + // into private variables, and + // allow access to them through the + // methods of the base class. template struct SetUp : public SetUpBase { - SetUp () {} + SetUp () {} - virtual - const Function & get_boundary_values () const; + virtual + const Function & get_boundary_values () const; - virtual - const Function & get_right_hand_side () const; + virtual + const Function & get_right_hand_side () const; - virtual - void create_coarse_grid (Triangulation &coarse_grid) const; + virtual + void create_coarse_grid (Triangulation &coarse_grid) const; private: - static const typename Traits::BoundaryValues boundary_values; - static const typename Traits::RightHandSide right_hand_side; + static const typename Traits::BoundaryValues boundary_values; + static const typename Traits::RightHandSide right_hand_side; }; - // We have to provide definitions - // for the static member variables - // of the above class: + // We have to provide definitions + // for the static member variables + // of the above class: template const typename Traits::BoundaryValues SetUp::boundary_values; template const typename Traits::RightHandSide SetUp::right_hand_side; - // And definitions of the member - // functions: + // And definitions of the member + // functions: template const Function & SetUp::get_boundary_values () const @@ -1398,39 +1398,39 @@ namespace Step14 } - // @sect4{The CurvedRidges class} + // @sect4{The CurvedRidges class} - // The class that is used to - // describe the boundary values and - // right hand side of the curved - // ridge problem already used in - // the step-13 example program is - // then like so: + // The class that is used to + // describe the boundary values and + // right hand side of the curved + // ridge problem already used in + // the step-13 example program is + // then like so: template struct CurvedRidges { - class BoundaryValues : public Function - { - public: - BoundaryValues () : Function () {} + class BoundaryValues : public Function + { + public: + BoundaryValues () : Function () {} - virtual double value (const Point &p, - const unsigned int component) const; - }; + virtual double value (const Point &p, + const unsigned int component) const; + }; - class RightHandSide : public Function - { - public: - RightHandSide () : Function () {} + class RightHandSide : public Function + { + public: + RightHandSide () : Function () {} - virtual double value (const Point &p, - const unsigned int component) const; - }; + virtual double value (const Point &p, + const unsigned int component) const; + }; - static - void - create_coarse_grid (Triangulation &coarse_grid); + static + void + create_coarse_grid (Triangulation &coarse_grid); }; @@ -1438,11 +1438,11 @@ namespace Step14 double CurvedRidges::BoundaryValues:: value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { double q = p(0); for (unsigned int i=1; i double CurvedRidges::RightHandSide::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { double q = p(0); for (unsigned int i=1; i struct Exercise_2_3 { - // We need a class to denote - // the boundary values of the - // problem. In this case, this - // is simple: it's the zero - // function, so don't even - // declare a class, just a - // typedef: - typedef ZeroFunction BoundaryValues; - - // Second, a class that denotes - // the right hand side. Since - // they are constant, just - // subclass the corresponding - // class of the library and be - // done: - class RightHandSide : public ConstantFunction - { - public: - RightHandSide () : ConstantFunction (1.) {} - }; - - // Finally a function to - // generate the coarse - // grid. This is somewhat more - // complicated here, see - // immediately below. - static - void - create_coarse_grid (Triangulation &coarse_grid); + // We need a class to denote + // the boundary values of the + // problem. In this case, this + // is simple: it's the zero + // function, so don't even + // declare a class, just a + // typedef: + typedef ZeroFunction BoundaryValues; + + // Second, a class that denotes + // the right hand side. Since + // they are constant, just + // subclass the corresponding + // class of the library and be + // done: + class RightHandSide : public ConstantFunction + { + public: + RightHandSide () : ConstantFunction (1.) {} + }; + + // Finally a function to + // generate the coarse + // grid. This is somewhat more + // complicated here, see + // immediately below. + static + void + create_coarse_grid (Triangulation &coarse_grid); }; - // As stated above, the grid for - // this example is the square - // [-1,1]^2 with the square - // [-1/2,1/2]^2 as hole in it. We - // create the coarse grid as 4 - // times 4 cells with the middle - // four ones missing. - // - // Of course, the example has an - // extension to 3d, but since this - // function cannot be written in a - // dimension independent way we - // choose not to implement this - // here, but rather only specialize - // the template for dim=2. If you - // compile the program for 3d, - // you'll get a message from the - // linker that this function is not - // implemented for 3d, and needs to - // be provided. - // - // For the creation of this - // geometry, the library has no - // predefined method. In this case, - // the geometry is still simple - // enough to do the creation by - // hand, rather than using a mesh - // generator. + // As stated above, the grid for + // this example is the square + // [-1,1]^2 with the square + // [-1/2,1/2]^2 as hole in it. We + // create the coarse grid as 4 + // times 4 cells with the middle + // four ones missing. + // + // Of course, the example has an + // extension to 3d, but since this + // function cannot be written in a + // dimension independent way we + // choose not to implement this + // here, but rather only specialize + // the template for dim=2. If you + // compile the program for 3d, + // you'll get a message from the + // linker that this function is not + // implemented for 3d, and needs to + // be provided. + // + // For the creation of this + // geometry, the library has no + // predefined method. In this case, + // the geometry is still simple + // enough to do the creation by + // hand, rather than using a mesh + // generator. template <> void Exercise_2_3<2>:: create_coarse_grid (Triangulation<2> &coarse_grid) { - // First define the space - // dimension, to allow those - // parts of the function that are - // actually dimension independent - // to use this variable. That - // makes it simpler if you later - // takes this as a starting point - // to implement the 3d version. + // First define the space + // dimension, to allow those + // parts of the function that are + // actually dimension independent + // to use this variable. That + // makes it simpler if you later + // takes this as a starting point + // to implement the 3d version. const unsigned int dim = 2; - // Then have a list of - // vertices. Here, they are 24 (5 - // times 5, with the middle one - // omitted). It is probably best - // to draw a sketch here. Note - // that we leave the number of - // vertices open at first, but - // then let the compiler compute - // this number afterwards. This - // reduces the possibility of - // having the dimension to large - // and leaving the last ones - // uninitialized. + // Then have a list of + // vertices. Here, they are 24 (5 + // times 5, with the middle one + // omitted). It is probably best + // to draw a sketch here. Note + // that we leave the number of + // vertices open at first, but + // then let the compiler compute + // this number afterwards. This + // reduces the possibility of + // having the dimension to large + // and leaving the last ones + // uninitialized. static const Point<2> vertices_1[] - = { Point<2> (-1., -1.), - Point<2> (-1./2, -1.), - Point<2> (0., -1.), - Point<2> (+1./2, -1.), - Point<2> (+1, -1.), - - Point<2> (-1., -1./2.), - Point<2> (-1./2, -1./2.), - Point<2> (0., -1./2.), - Point<2> (+1./2, -1./2.), - Point<2> (+1, -1./2.), - - Point<2> (-1., 0.), - Point<2> (-1./2, 0.), - Point<2> (+1./2, 0.), - Point<2> (+1, 0.), - - Point<2> (-1., 1./2.), - Point<2> (-1./2, 1./2.), - Point<2> (0., 1./2.), - Point<2> (+1./2, 1./2.), - Point<2> (+1, 1./2.), - - Point<2> (-1., 1.), - Point<2> (-1./2, 1.), - Point<2> (0., 1.), - Point<2> (+1./2, 1.), - Point<2> (+1, 1.) }; + = { Point<2> (-1., -1.), + Point<2> (-1./2, -1.), + Point<2> (0., -1.), + Point<2> (+1./2, -1.), + Point<2> (+1, -1.), + + Point<2> (-1., -1./2.), + Point<2> (-1./2, -1./2.), + Point<2> (0., -1./2.), + Point<2> (+1./2, -1./2.), + Point<2> (+1, -1./2.), + + Point<2> (-1., 0.), + Point<2> (-1./2, 0.), + Point<2> (+1./2, 0.), + Point<2> (+1, 0.), + + Point<2> (-1., 1./2.), + Point<2> (-1./2, 1./2.), + Point<2> (0., 1./2.), + Point<2> (+1./2, 1./2.), + Point<2> (+1, 1./2.), + + Point<2> (-1., 1.), + Point<2> (-1./2, 1.), + Point<2> (0., 1.), + Point<2> (+1./2, 1.), + Point<2> (+1, 1.) }; const unsigned int - n_vertices = sizeof(vertices_1) / sizeof(vertices_1[0]); + n_vertices = sizeof(vertices_1) / sizeof(vertices_1[0]); - // From this static list of - // vertices, we generate an STL - // vector of the vertices, as - // this is the data type the - // library wants to see. + // From this static list of + // vertices, we generate an STL + // vector of the vertices, as + // this is the data type the + // library wants to see. const std::vector > vertices (&vertices_1[0], - &vertices_1[n_vertices]); - - // Next, we have to define the - // cells and the vertices they - // contain. Here, we have 8 - // vertices, but leave the number - // open and let it be computed - // afterwards: + &vertices_1[n_vertices]); + + // Next, we have to define the + // cells and the vertices they + // contain. Here, we have 8 + // vertices, but leave the number + // open and let it be computed + // afterwards: static const int cell_vertices[][GeometryInfo::vertices_per_cell] - = {{0, 1, 5, 6}, - {1, 2, 6, 7}, - {2, 3, 7, 8}, - {3, 4, 8, 9}, - {5, 6, 10, 11}, - {8, 9, 12, 13}, - {10, 11, 14, 15}, - {12, 13, 17, 18}, - {14, 15, 19, 20}, - {15, 16, 20, 21}, - {16, 17, 21, 22}, - {17, 18, 22, 23}}; + = {{0, 1, 5, 6}, + {1, 2, 6, 7}, + {2, 3, 7, 8}, + {3, 4, 8, 9}, + {5, 6, 10, 11}, + {8, 9, 12, 13}, + {10, 11, 14, 15}, + {12, 13, 17, 18}, + {14, 15, 19, 20}, + {15, 16, 20, 21}, + {16, 17, 21, 22}, + {17, 18, 22, 23}}; const unsigned int - n_cells = sizeof(cell_vertices) / sizeof(cell_vertices[0]); - - // Again, we generate a C++ - // vector type from this, but - // this time by looping over the - // cells (yes, this is - // boring). Additionally, we set - // the material indicator to zero - // for all the cells: + n_cells = sizeof(cell_vertices) / sizeof(cell_vertices[0]); + + // Again, we generate a C++ + // vector type from this, but + // this time by looping over the + // cells (yes, this is + // boring). Additionally, we set + // the material indicator to zero + // for all the cells: std::vector > cells (n_cells, CellData()); for (unsigned int i=0; i::vertices_per_cell; - ++j) - cells[i].vertices[j] = cell_vertices[i][j]; - cells[i].material_id = 0; - } - - // Finally pass all this - // information to the library to - // generate a triangulation. The - // last parameter may be used to - // pass information about - // non-zero boundary indicators - // at certain faces of the - // triangulation to the library, - // but we don't want that here, - // so we give an empty object: + { + for (unsigned int j=0; + j::vertices_per_cell; + ++j) + cells[i].vertices[j] = cell_vertices[i][j]; + cells[i].material_id = 0; + } + + // Finally pass all this + // information to the library to + // generate a triangulation. The + // last parameter may be used to + // pass information about + // non-zero boundary indicators + // at certain faces of the + // triangulation to the library, + // but we don't want that here, + // so we give an empty object: coarse_grid.create_triangulation (vertices, - cells, - SubCellData()); + cells, + SubCellData()); - // And since we want that the - // evaluation point (3/4,3/4) in - // this example is a grid point, - // we refine once globally: + // And since we want that the + // evaluation point (3/4,3/4) in + // this example is a grid point, + // we refine once globally: coarse_grid.refine_global (1); } } - // @sect4{Discussion} - // - // As you have now read through this - // framework, you may be wondering - // why we have not chosen to - // implement the classes implementing - // a certain setup (like the - // CurvedRidges class) directly - // as classes derived from - // Data::SetUpBase. Indeed, we - // could have done very well so. The - // only reason is that then we would - // have to have member variables for - // the solution and right hand side - // classes in the CurvedRidges - // class, as well as member functions - // overloading the abstract functions - // of the base class giving access to - // these member variables. The - // SetUp class has the sole - // reason to relieve us from the need - // to reiterate these member - // variables and functions that would - // be necessary in all such - // classes. In some way, the template - // mechanism here only provides a way - // to have default implementations - // for a number of functions that - // depend on external quantities and - // can thus not be provided using - // normal virtual functions, at least - // not without the help of templates. - // - // However, there might be good - // reasons to actually implement - // classes derived from - // Data::SetUpBase, for example - // if the solution or right hand side - // classes require constructors that - // take arguments, which the - // Data::SetUpBase class cannot - // provide. In that case, subclassing - // is a worthwhile strategy. Other - // possibilities for special cases - // are to derive from - // Data::SetUp@ where - // SomeSetUp denotes a class, or - // even to explicitly specialize - // Data::SetUp@. The - // latter allows to transparently use - // the way the SetUp class is - // used for other set-ups, but with - // special actions taken for special - // arguments. - // - // A final observation favoring the - // approach taken here is the - // following: we have found numerous - // times that when starting a - // project, the number of parameters - // (usually boundary values, right - // hand side, coarse grid, just as - // here) was small, and the number of - // test cases was small as well. One - // then starts out by handcoding them - // into a number of switch - // statements. Over time, projects - // grow, and so does the number of - // test cases. The number of - // switch statements grows with - // that, and their length as well, - // and one starts to find ways to - // consider impossible examples where - // domains, boundary values, and - // right hand sides do not fit - // together any more, and starts - // loosing the overview over the - // whole structure. Encapsulating - // everything belonging to a certain - // test case into a structure of its - // own has proven worthwhile for - // this, as it keeps everything that - // belongs to one test case in one - // place. Furthermore, it allows to - // put these things all in one or - // more files that are only devoted - // to test cases and their data, - // without having to bring their - // actual implementation into contact - // with the rest of the program. - - - // @sect3{Dual functionals} - - // As with the other components of - // the program, we put everything we - // need to describe dual functionals - // into a namespace of its own, and - // define an abstract base class that - // provides the interface the class - // solving the dual problem needs for - // its work. - // - // We will then implement two such - // classes, for the evaluation of a - // point value and of the derivative - // of the solution at that point. For - // these functionals we already have - // the corresponding evaluation - // objects, so they are comlementary. + // @sect4{Discussion} + // + // As you have now read through this + // framework, you may be wondering + // why we have not chosen to + // implement the classes implementing + // a certain setup (like the + // CurvedRidges class) directly + // as classes derived from + // Data::SetUpBase. Indeed, we + // could have done very well so. The + // only reason is that then we would + // have to have member variables for + // the solution and right hand side + // classes in the CurvedRidges + // class, as well as member functions + // overloading the abstract functions + // of the base class giving access to + // these member variables. The + // SetUp class has the sole + // reason to relieve us from the need + // to reiterate these member + // variables and functions that would + // be necessary in all such + // classes. In some way, the template + // mechanism here only provides a way + // to have default implementations + // for a number of functions that + // depend on external quantities and + // can thus not be provided using + // normal virtual functions, at least + // not without the help of templates. + // + // However, there might be good + // reasons to actually implement + // classes derived from + // Data::SetUpBase, for example + // if the solution or right hand side + // classes require constructors that + // take arguments, which the + // Data::SetUpBase class cannot + // provide. In that case, subclassing + // is a worthwhile strategy. Other + // possibilities for special cases + // are to derive from + // Data::SetUp@ where + // SomeSetUp denotes a class, or + // even to explicitly specialize + // Data::SetUp@. The + // latter allows to transparently use + // the way the SetUp class is + // used for other set-ups, but with + // special actions taken for special + // arguments. + // + // A final observation favoring the + // approach taken here is the + // following: we have found numerous + // times that when starting a + // project, the number of parameters + // (usually boundary values, right + // hand side, coarse grid, just as + // here) was small, and the number of + // test cases was small as well. One + // then starts out by handcoding them + // into a number of switch + // statements. Over time, projects + // grow, and so does the number of + // test cases. The number of + // switch statements grows with + // that, and their length as well, + // and one starts to find ways to + // consider impossible examples where + // domains, boundary values, and + // right hand sides do not fit + // together any more, and starts + // loosing the overview over the + // whole structure. Encapsulating + // everything belonging to a certain + // test case into a structure of its + // own has proven worthwhile for + // this, as it keeps everything that + // belongs to one test case in one + // place. Furthermore, it allows to + // put these things all in one or + // more files that are only devoted + // to test cases and their data, + // without having to bring their + // actual implementation into contact + // with the rest of the program. + + + // @sect3{Dual functionals} + + // As with the other components of + // the program, we put everything we + // need to describe dual functionals + // into a namespace of its own, and + // define an abstract base class that + // provides the interface the class + // solving the dual problem needs for + // its work. + // + // We will then implement two such + // classes, for the evaluation of a + // point value and of the derivative + // of the solution at that point. For + // these functionals we already have + // the corresponding evaluation + // objects, so they are comlementary. namespace DualFunctional { - // @sect4{The DualFunctionalBase class} - - // First start with the base class - // for dual functionals. Since for - // linear problems the - // characteristics of the dual - // problem play a role only in the - // right hand side, we only need to - // provide for a function that - // assembles the right hand side - // for a given discretization: + // @sect4{The DualFunctionalBase class} + + // First start with the base class + // for dual functionals. Since for + // linear problems the + // characteristics of the dual + // problem play a role only in the + // right hand side, we only need to + // provide for a function that + // assembles the right hand side + // for a given discretization: template class DualFunctionalBase : public Subscriptor { public: - virtual - void - assemble_rhs (const DoFHandler &dof_handler, - Vector &rhs) const = 0; + virtual + void + assemble_rhs (const DoFHandler &dof_handler, + Vector &rhs) const = 0; }; - // @sect4{The PointValueEvaluation class} + // @sect4{The PointValueEvaluation class} - // As a first application, we - // consider the functional - // corresponding to the evaluation - // of the solution's value at a - // given point which again we - // assume to be a vertex. Apart - // from the constructor that takes - // and stores the evaluation point, - // this class consists only of the - // function that implements - // assembling the right hand side. + // As a first application, we + // consider the functional + // corresponding to the evaluation + // of the solution's value at a + // given point which again we + // assume to be a vertex. Apart + // from the constructor that takes + // and stores the evaluation point, + // this class consists only of the + // function that implements + // assembling the right hand side. template class PointValueEvaluation : public DualFunctionalBase { public: - PointValueEvaluation (const Point &evaluation_point); + PointValueEvaluation (const Point &evaluation_point); - virtual - void - assemble_rhs (const DoFHandler &dof_handler, - Vector &rhs) const; + virtual + void + assemble_rhs (const DoFHandler &dof_handler, + Vector &rhs) const; - DeclException1 (ExcEvaluationPointNotFound, - Point, - << "The evaluation point " << arg1 - << " was not found among the vertices of the present grid."); + DeclException1 (ExcEvaluationPointNotFound, + Point, + << "The evaluation point " << arg1 + << " was not found among the vertices of the present grid."); protected: - const Point evaluation_point; + const Point evaluation_point; }; template PointValueEvaluation:: PointValueEvaluation (const Point &evaluation_point) - : - evaluation_point (evaluation_point) + : + evaluation_point (evaluation_point) {} - // As for doing the main purpose of - // the class, assembling the right - // hand side, let us first consider - // what is necessary: The right - // hand side of the dual problem is - // a vector of values J(phi_i), - // where J is the error functional, - // and phi_i is the i-th shape - // function. Here, J is the - // evaluation at the point x0, - // i.e. J(phi_i)=phi_i(x0). - // - // Now, we have assumed that the - // evaluation point is a - // vertex. Thus, for the usual - // finite elements we might be - // using in this program, we can - // take for granted that at such a - // point exactly one shape function - // is nonzero, and in particular - // has the value one. Thus, we set - // the right hand side vector to - // all-zeros, then seek for the - // shape function associated with - // that point and set the - // corresponding value of the right - // hand side vector to one: + // As for doing the main purpose of + // the class, assembling the right + // hand side, let us first consider + // what is necessary: The right + // hand side of the dual problem is + // a vector of values J(phi_i), + // where J is the error functional, + // and phi_i is the i-th shape + // function. Here, J is the + // evaluation at the point x0, + // i.e. J(phi_i)=phi_i(x0). + // + // Now, we have assumed that the + // evaluation point is a + // vertex. Thus, for the usual + // finite elements we might be + // using in this program, we can + // take for granted that at such a + // point exactly one shape function + // is nonzero, and in particular + // has the value one. Thus, we set + // the right hand side vector to + // all-zeros, then seek for the + // shape function associated with + // that point and set the + // corresponding value of the right + // hand side vector to one: template void PointValueEvaluation:: assemble_rhs (const DoFHandler &dof_handler, - Vector &rhs) const + Vector &rhs) const { - // So, first set everything to - // zeros... + // So, first set everything to + // zeros... rhs.reinit (dof_handler.n_dofs()); - // ...then loop over cells and - // find the evaluation point - // among the vertices (or very - // close to a vertex, which may - // happen due to floating point - // round-off): + // ...then loop over cells and + // find the evaluation point + // among the vertices (or very + // close to a vertex, which may + // happen due to floating point + // round-off): typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); + cell = dof_handler.begin_active(), + endc = dof_handler.end(); for (; cell!=endc; ++cell) - for (unsigned int vertex=0; - vertex::vertices_per_cell; - ++vertex) - if (cell->vertex(vertex).distance(evaluation_point) - < cell->diameter()*1e-8) - { - // Ok, found, so set - // corresponding entry, - // and leave function - // since we are finished: - rhs(cell->vertex_dof_index(vertex,0)) = 1; - return; - } - - // Finally, a sanity check: if we - // somehow got here, then we must - // have missed the evaluation - // point, so raise an exception - // unconditionally: + for (unsigned int vertex=0; + vertex::vertices_per_cell; + ++vertex) + if (cell->vertex(vertex).distance(evaluation_point) + < cell->diameter()*1e-8) + { + // Ok, found, so set + // corresponding entry, + // and leave function + // since we are finished: + rhs(cell->vertex_dof_index(vertex,0)) = 1; + return; + } + + // Finally, a sanity check: if we + // somehow got here, then we must + // have missed the evaluation + // point, so raise an exception + // unconditionally: AssertThrow (false, ExcEvaluationPointNotFound(evaluation_point)); } - // @sect4{The PointXDerivativeEvaluation class} + // @sect4{The PointXDerivativeEvaluation class} - // As second application, we again - // consider the evaluation of the - // x-derivative of the solution at - // one point. Again, the - // declaration of the class, and - // the implementation of its - // constructor is not too - // interesting: + // As second application, we again + // consider the evaluation of the + // x-derivative of the solution at + // one point. Again, the + // declaration of the class, and + // the implementation of its + // constructor is not too + // interesting: template class PointXDerivativeEvaluation : public DualFunctionalBase { public: - PointXDerivativeEvaluation (const Point &evaluation_point); + PointXDerivativeEvaluation (const Point &evaluation_point); - virtual - void - assemble_rhs (const DoFHandler &dof_handler, - Vector &rhs) const; + virtual + void + assemble_rhs (const DoFHandler &dof_handler, + Vector &rhs) const; - DeclException1 (ExcEvaluationPointNotFound, - Point, - << "The evaluation point " << arg1 - << " was not found among the vertices of the present grid."); + DeclException1 (ExcEvaluationPointNotFound, + Point, + << "The evaluation point " << arg1 + << " was not found among the vertices of the present grid."); protected: - const Point evaluation_point; + const Point evaluation_point; }; template PointXDerivativeEvaluation:: PointXDerivativeEvaluation (const Point &evaluation_point) - : - evaluation_point (evaluation_point) + : + evaluation_point (evaluation_point) {} - // What is interesting is the - // implementation of this - // functional: here, - // J(phi_i)=d/dx phi_i(x0). - // - // We could, as in the - // implementation of the respective - // evaluation object take the - // average of the gradients of each - // shape function phi_i at this - // evaluation point. However, we - // take a slightly different - // approach: we simply take the - // average over all cells that - // surround this point. The - // question which cells - // surrounds the evaluation - // point is made dependent on the - // mesh width by including those - // cells for which the distance of - // the cell's midpoint to the - // evaluation point is less than - // the cell's diameter. - // - // Taking the average of the - // gradient over the area/volume of - // these cells leads to a dual - // solution which is very close to - // the one which would result from - // the point evaluation of the - // gradient. It is simple to - // justify theoretically that this - // does not change the method - // significantly. + // What is interesting is the + // implementation of this + // functional: here, + // J(phi_i)=d/dx phi_i(x0). + // + // We could, as in the + // implementation of the respective + // evaluation object take the + // average of the gradients of each + // shape function phi_i at this + // evaluation point. However, we + // take a slightly different + // approach: we simply take the + // average over all cells that + // surround this point. The + // question which cells + // surrounds the evaluation + // point is made dependent on the + // mesh width by including those + // cells for which the distance of + // the cell's midpoint to the + // evaluation point is less than + // the cell's diameter. + // + // Taking the average of the + // gradient over the area/volume of + // these cells leads to a dual + // solution which is very close to + // the one which would result from + // the point evaluation of the + // gradient. It is simple to + // justify theoretically that this + // does not change the method + // significantly. template void PointXDerivativeEvaluation:: assemble_rhs (const DoFHandler &dof_handler, - Vector &rhs) const + Vector &rhs) const { - // Again, first set all entries - // to zero: + // Again, first set all entries + // to zero: rhs.reinit (dof_handler.n_dofs()); - // Initialize a FEValues - // object with a quadrature - // formula, have abbreviations - // for the number of quadrature - // points and shape functions... + // Initialize a FEValues + // object with a quadrature + // formula, have abbreviations + // for the number of quadrature + // points and shape functions... QGauss quadrature(4); FEValues fe_values (dof_handler.get_fe(), quadrature, - update_gradients | - update_quadrature_points | - update_JxW_values); + update_gradients | + update_quadrature_points | + update_JxW_values); const unsigned int n_q_points = fe_values.n_quadrature_points; const unsigned int dofs_per_cell = dof_handler.get_fe().dofs_per_cell; - // ...and have two objects that - // are used to store the global - // indices of the degrees of - // freedom on a cell, and the - // values of the gradients of the - // shape functions at the - // quadrature points: + // ...and have two objects that + // are used to store the global + // indices of the degrees of + // freedom on a cell, and the + // values of the gradients of the + // shape functions at the + // quadrature points: Vector cell_rhs (dofs_per_cell); std::vector local_dof_indices (dofs_per_cell); - // Finally have a variable in - // which we will sum up the - // area/volume of the cells over - // which we integrate, by - // integrating the unit functions - // on these cells: + // Finally have a variable in + // which we will sum up the + // area/volume of the cells over + // which we integrate, by + // integrating the unit functions + // on these cells: double total_volume = 0; - // Then start the loop over all - // cells, and select those cells - // which are close enough to the - // evaluation point: + // Then start the loop over all + // cells, and select those cells + // which are close enough to the + // evaluation point: typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); + cell = dof_handler.begin_active(), + endc = dof_handler.end(); for (; cell!=endc; ++cell) - if (cell->center().distance(evaluation_point) <= - cell->diameter()) - { - // If we have found such a - // cell, then initialize - // the FEValues object - // and integrate the - // x-component of the - // gradient of each shape - // function, as well as the - // unit function for the - // total area/volume. - fe_values.reinit (cell); - cell_rhs = 0; - - for (unsigned int q=0; qget_dof_indices (local_dof_indices); - for (unsigned int i=0; icenter().distance(evaluation_point) <= + cell->diameter()) + { + // If we have found such a + // cell, then initialize + // the FEValues object + // and integrate the + // x-component of the + // gradient of each shape + // function, as well as the + // unit function for the + // total area/volume. + fe_values.reinit (cell); + cell_rhs = 0; + + for (unsigned int q=0; qget_dof_indices (local_dof_indices); + for (unsigned int i=0; i 0, - ExcEvaluationPointNotFound(evaluation_point)); - - // Finally, we have by now only - // integrated the gradients of - // the shape functions, not - // taking their mean value. We - // fix this by dividing by the - // measure of the volume over - // which we have integrated: + ExcEvaluationPointNotFound(evaluation_point)); + + // Finally, we have by now only + // integrated the gradients of + // the shape functions, not + // taking their mean value. We + // fix this by dividing by the + // measure of the volume over + // which we have integrated: rhs.scale (1./total_volume); } @@ -2123,79 +2123,79 @@ namespace Step14 } - // @sect3{Extending the LaplaceSolver namespace} + // @sect3{Extending the LaplaceSolver namespace} namespace LaplaceSolver { - // @sect4{The DualSolver class} - - // In the same way as the - // PrimalSolver class above, we - // now implement a - // DualSolver. It has all the - // same features, the only - // difference is that it does not - // take a function object denoting - // a right hand side object, but - // now takes a - // DualFunctionalBase object - // that will assemble the right - // hand side vector of the dual - // problem. The rest of the class - // is rather trivial. - // - // Since both primal and dual - // solver will use the same - // triangulation, but different - // discretizations, it now becomes - // clear why we have made the - // Base class a virtual one: - // since the final class will be - // derived from both - // PrimalSolver as well as - // DualSolver, it would have - // two Base instances, would we - // not have marked the inheritance - // as virtual. Since in many - // applications the base class - // would store much more - // information than just the - // triangulation which needs to be - // shared between primal and dual - // solvers, we do not usually want - // to use two such base classes. + // @sect4{The DualSolver class} + + // In the same way as the + // PrimalSolver class above, we + // now implement a + // DualSolver. It has all the + // same features, the only + // difference is that it does not + // take a function object denoting + // a right hand side object, but + // now takes a + // DualFunctionalBase object + // that will assemble the right + // hand side vector of the dual + // problem. The rest of the class + // is rather trivial. + // + // Since both primal and dual + // solver will use the same + // triangulation, but different + // discretizations, it now becomes + // clear why we have made the + // Base class a virtual one: + // since the final class will be + // derived from both + // PrimalSolver as well as + // DualSolver, it would have + // two Base instances, would we + // not have marked the inheritance + // as virtual. Since in many + // applications the base class + // would store much more + // information than just the + // triangulation which needs to be + // shared between primal and dual + // solvers, we do not usually want + // to use two such base classes. template class DualSolver : public Solver { public: - DualSolver (Triangulation &triangulation, - const FiniteElement &fe, - const Quadrature &quadrature, - const Quadrature &face_quadrature, - const DualFunctional::DualFunctionalBase &dual_functional); + DualSolver (Triangulation &triangulation, + const FiniteElement &fe, + const Quadrature &quadrature, + const Quadrature &face_quadrature, + const DualFunctional::DualFunctionalBase &dual_functional); - virtual - void - solve_problem (); + virtual + void + solve_problem (); - virtual - unsigned int - n_dofs () const; + virtual + unsigned int + n_dofs () const; - virtual - void - postprocess (const Evaluation::EvaluationBase &postprocessor) const; + virtual + void + postprocess (const Evaluation::EvaluationBase &postprocessor) const; protected: - const SmartPointer > dual_functional; - virtual void assemble_rhs (Vector &rhs) const; + const SmartPointer > dual_functional; + virtual void assemble_rhs (Vector &rhs) const; - static const ZeroFunction boundary_values; + static const ZeroFunction boundary_values; - // Same as above -- make a - // derived class a friend of - // this one: - friend class WeightedResidual; + // Same as above -- make a + // derived class a friend of + // this one: + friend class WeightedResidual; }; template @@ -2204,16 +2204,16 @@ namespace Step14 template DualSolver:: DualSolver (Triangulation &triangulation, - const FiniteElement &fe, - const Quadrature &quadrature, - const Quadrature &face_quadrature, - const DualFunctional::DualFunctionalBase &dual_functional) - : - Base (triangulation), - Solver (triangulation, fe, - quadrature, face_quadrature, - boundary_values), - dual_functional (&dual_functional) + const FiniteElement &fe, + const Quadrature &quadrature, + const Quadrature &face_quadrature, + const DualFunctional::DualFunctionalBase &dual_functional) + : + Base (triangulation), + Solver (triangulation, fe, + quadrature, face_quadrature, + boundary_values), + dual_functional (&dual_functional) {} @@ -2253,307 +2253,307 @@ namespace Step14 } - // @sect4{The WeightedResidual class} - - // Here finally comes the main - // class of this program, the one - // that implements the dual - // weighted residual error - // estimator. It joins the primal - // and dual solver classes to use - // them for the computation of - // primal and dual solutions, and - // implements the error - // representation formula for use - // as error estimate and mesh - // refinement. - // - // The first few of the functions - // of this class are mostly - // overriders of the respective - // functions of the base class: + // @sect4{The WeightedResidual class} + + // Here finally comes the main + // class of this program, the one + // that implements the dual + // weighted residual error + // estimator. It joins the primal + // and dual solver classes to use + // them for the computation of + // primal and dual solutions, and + // implements the error + // representation formula for use + // as error estimate and mesh + // refinement. + // + // The first few of the functions + // of this class are mostly + // overriders of the respective + // functions of the base class: template class WeightedResidual : public PrimalSolver, - public DualSolver + public DualSolver { public: - WeightedResidual (Triangulation &coarse_grid, - const FiniteElement &primal_fe, - const FiniteElement &dual_fe, - const Quadrature &quadrature, - const Quadrature &face_quadrature, - const Function &rhs_function, - const Function &boundary_values, - const DualFunctional::DualFunctionalBase &dual_functional); + WeightedResidual (Triangulation &coarse_grid, + const FiniteElement &primal_fe, + const FiniteElement &dual_fe, + const Quadrature &quadrature, + const Quadrature &face_quadrature, + const Function &rhs_function, + const Function &boundary_values, + const DualFunctional::DualFunctionalBase &dual_functional); - virtual - void - solve_problem (); + virtual + void + solve_problem (); - virtual - void - postprocess (const Evaluation::EvaluationBase &postprocessor) const; + virtual + void + postprocess (const Evaluation::EvaluationBase &postprocessor) const; - virtual - unsigned int - n_dofs () const; + virtual + unsigned int + n_dofs () const; - virtual void refine_grid (); + virtual void refine_grid (); - virtual - void - output_solution () const; + virtual + void + output_solution () const; private: - // In the private section, we - // have two functions that are - // used to call the - // solve_problem functions - // of the primal and dual base - // classes. These two functions - // will be called in parallel - // by the solve_problem - // function of this class. - void solve_primal_problem (); - void solve_dual_problem (); - // Then declare abbreviations - // for active cell iterators, - // to avoid that we have to - // write this lengthy name - // over and over again: - - typedef - typename DoFHandler::active_cell_iterator - active_cell_iterator; - - // Next, declare a data type - // that we will us to store the - // contribution of faces to the - // error estimator. The idea is - // that we can compute the face - // terms from each of the two - // cells to this face, as they - // are the same when viewed - // from both sides. What we - // will do is to compute them - // only once, based on some - // rules explained below which - // of the two adjacent cells - // will be in charge to do - // so. We then store the - // contribution of each face in - // a map mapping faces to their - // values, and only collect the - // contributions for each cell - // by looping over the cells a - // second time and grabbing the - // values from the map. - // - // The data type of this map is - // declared here: - typedef - typename std::map::face_iterator,double> - FaceIntegrals; - - // In the computation of the - // error estimates on cells and - // faces, we need a number of - // helper objects, such as - // FEValues and - // FEFaceValues functions, - // but also temporary objects - // storing the values and - // gradients of primal and dual - // solutions, for - // example. These fields are - // needed in the three - // functions that do the - // integration on cells, and - // regular and irregular faces, - // respectively. - // - // There are three reasonable - // ways to provide these - // fields: first, as local - // variables in the function - // that needs them; second, as - // member variables of this - // class; third, as arguments - // passed to that function. - // - // These three alternatives all - // have drawbacks: the third - // that their number is not - // neglectable and would make - // calling these functions a - // lengthy enterprise. The - // second has the drawback that - // it disallows - // parallelization, since the - // threads that will compute - // the error estimate have to - // have their own copies of - // these variables each, so - // member variables of the - // enclosing class will not - // work. The first approach, - // although straightforward, - // has a subtle but important - // drawback: we will call these - // functions over and over - // again, many thousands of times - // maybe; it has now turned out - // that allocating vectors and - // other objects that need - // memory from the heap is an - // expensive business in terms - // of run-time, since memory - // allocation is expensive when - // several threads are - // involved. In our experience, - // more than 20 per cent of the - // total run time of error - // estimation functions are due - // to memory allocation, if - // done on a per-call level. It - // is thus significantly better - // to allocate the memory only - // once, and recycle the - // objects as often as - // possible. - // - // What to do? Our answer is to - // use a variant of the third - // strategy, namely generating - // these variables once in the - // main function of each - // thread, and passing them - // down to the functions that - // do the actual work. To avoid - // that we have to give these - // functions a dozen or so - // arguments, we pack all these - // variables into two - // structures, one which is - // used for the computations on - // cells, the other doing them - // on the faces. Instead of - // many individual objects, we - // will then only pass one such - // object to these functions, - // making their calling - // sequence simpler. - struct CellData - { - FEValues fe_values; - const SmartPointer > right_hand_side; - - std::vector cell_residual; - std::vector rhs_values; - std::vector dual_weights; - std::vector cell_laplacians; - CellData (const FiniteElement &fe, - const Quadrature &quadrature, - const Function &right_hand_side); - }; - - struct FaceData - { - FEFaceValues fe_face_values_cell; - FEFaceValues fe_face_values_neighbor; - FESubfaceValues fe_subface_values_cell; - - std::vector jump_residual; - std::vector dual_weights; - typename std::vector > cell_grads; - typename std::vector > neighbor_grads; - FaceData (const FiniteElement &fe, - const Quadrature &face_quadrature); - }; - - - - // Regarding the evaluation of - // the error estimator, we have - // two driver functions that do - // this: the first is called to - // generate the cell-wise - // estimates, and splits up the - // task in a number of threads - // each of which work on a - // subset of the cells. The - // first function will run the - // second for each of these - // threads: - void estimate_error (Vector &error_indicators) const; - - void estimate_some (const Vector &primal_solution, - const Vector &dual_weights, - const unsigned int n_threads, - const unsigned int this_thread, - Vector &error_indicators, - FaceIntegrals &face_integrals) const; - - // Then we have functions that - // do the actual integration of - // the error representation - // formula. They will treat the - // terms on the cell interiors, - // on those faces that have no - // hanging nodes, and on those - // faces with hanging nodes, - // respectively: - void - integrate_over_cell (const active_cell_iterator &cell, - const unsigned int cell_index, - const Vector &primal_solution, - const Vector &dual_weights, - CellData &cell_data, - Vector &error_indicators) const; - - void - integrate_over_regular_face (const active_cell_iterator &cell, - const unsigned int face_no, - const Vector &primal_solution, - const Vector &dual_weights, - FaceData &face_data, - FaceIntegrals &face_integrals) const; - void - integrate_over_irregular_face (const active_cell_iterator &cell, - const unsigned int face_no, - const Vector &primal_solution, - const Vector &dual_weights, - FaceData &face_data, - FaceIntegrals &face_integrals) const; + // In the private section, we + // have two functions that are + // used to call the + // solve_problem functions + // of the primal and dual base + // classes. These two functions + // will be called in parallel + // by the solve_problem + // function of this class. + void solve_primal_problem (); + void solve_dual_problem (); + // Then declare abbreviations + // for active cell iterators, + // to avoid that we have to + // write this lengthy name + // over and over again: + + typedef + typename DoFHandler::active_cell_iterator + active_cell_iterator; + + // Next, declare a data type + // that we will us to store the + // contribution of faces to the + // error estimator. The idea is + // that we can compute the face + // terms from each of the two + // cells to this face, as they + // are the same when viewed + // from both sides. What we + // will do is to compute them + // only once, based on some + // rules explained below which + // of the two adjacent cells + // will be in charge to do + // so. We then store the + // contribution of each face in + // a map mapping faces to their + // values, and only collect the + // contributions for each cell + // by looping over the cells a + // second time and grabbing the + // values from the map. + // + // The data type of this map is + // declared here: + typedef + typename std::map::face_iterator,double> + FaceIntegrals; + + // In the computation of the + // error estimates on cells and + // faces, we need a number of + // helper objects, such as + // FEValues and + // FEFaceValues functions, + // but also temporary objects + // storing the values and + // gradients of primal and dual + // solutions, for + // example. These fields are + // needed in the three + // functions that do the + // integration on cells, and + // regular and irregular faces, + // respectively. + // + // There are three reasonable + // ways to provide these + // fields: first, as local + // variables in the function + // that needs them; second, as + // member variables of this + // class; third, as arguments + // passed to that function. + // + // These three alternatives all + // have drawbacks: the third + // that their number is not + // neglectable and would make + // calling these functions a + // lengthy enterprise. The + // second has the drawback that + // it disallows + // parallelization, since the + // threads that will compute + // the error estimate have to + // have their own copies of + // these variables each, so + // member variables of the + // enclosing class will not + // work. The first approach, + // although straightforward, + // has a subtle but important + // drawback: we will call these + // functions over and over + // again, many thousands of times + // maybe; it has now turned out + // that allocating vectors and + // other objects that need + // memory from the heap is an + // expensive business in terms + // of run-time, since memory + // allocation is expensive when + // several threads are + // involved. In our experience, + // more than 20 per cent of the + // total run time of error + // estimation functions are due + // to memory allocation, if + // done on a per-call level. It + // is thus significantly better + // to allocate the memory only + // once, and recycle the + // objects as often as + // possible. + // + // What to do? Our answer is to + // use a variant of the third + // strategy, namely generating + // these variables once in the + // main function of each + // thread, and passing them + // down to the functions that + // do the actual work. To avoid + // that we have to give these + // functions a dozen or so + // arguments, we pack all these + // variables into two + // structures, one which is + // used for the computations on + // cells, the other doing them + // on the faces. Instead of + // many individual objects, we + // will then only pass one such + // object to these functions, + // making their calling + // sequence simpler. + struct CellData + { + FEValues fe_values; + const SmartPointer > right_hand_side; + + std::vector cell_residual; + std::vector rhs_values; + std::vector dual_weights; + std::vector cell_laplacians; + CellData (const FiniteElement &fe, + const Quadrature &quadrature, + const Function &right_hand_side); + }; + + struct FaceData + { + FEFaceValues fe_face_values_cell; + FEFaceValues fe_face_values_neighbor; + FESubfaceValues fe_subface_values_cell; + + std::vector jump_residual; + std::vector dual_weights; + typename std::vector > cell_grads; + typename std::vector > neighbor_grads; + FaceData (const FiniteElement &fe, + const Quadrature &face_quadrature); + }; + + + + // Regarding the evaluation of + // the error estimator, we have + // two driver functions that do + // this: the first is called to + // generate the cell-wise + // estimates, and splits up the + // task in a number of threads + // each of which work on a + // subset of the cells. The + // first function will run the + // second for each of these + // threads: + void estimate_error (Vector &error_indicators) const; + + void estimate_some (const Vector &primal_solution, + const Vector &dual_weights, + const unsigned int n_threads, + const unsigned int this_thread, + Vector &error_indicators, + FaceIntegrals &face_integrals) const; + + // Then we have functions that + // do the actual integration of + // the error representation + // formula. They will treat the + // terms on the cell interiors, + // on those faces that have no + // hanging nodes, and on those + // faces with hanging nodes, + // respectively: + void + integrate_over_cell (const active_cell_iterator &cell, + const unsigned int cell_index, + const Vector &primal_solution, + const Vector &dual_weights, + CellData &cell_data, + Vector &error_indicators) const; + + void + integrate_over_regular_face (const active_cell_iterator &cell, + const unsigned int face_no, + const Vector &primal_solution, + const Vector &dual_weights, + FaceData &face_data, + FaceIntegrals &face_integrals) const; + void + integrate_over_irregular_face (const active_cell_iterator &cell, + const unsigned int face_no, + const Vector &primal_solution, + const Vector &dual_weights, + FaceData &face_data, + FaceIntegrals &face_integrals) const; }; - // In the implementation of this - // class, we first have the - // constructors of the CellData - // and FaceData member classes, - // and the WeightedResidual - // constructor. They only - // initialize fields to their - // correct lengths, so we do not - // have to discuss them to length. + // In the implementation of this + // class, we first have the + // constructors of the CellData + // and FaceData member classes, + // and the WeightedResidual + // constructor. They only + // initialize fields to their + // correct lengths, so we do not + // have to discuss them to length. template WeightedResidual::CellData:: CellData (const FiniteElement &fe, - const Quadrature &quadrature, - const Function &right_hand_side) - : - fe_values (fe, quadrature, - update_values | - update_hessians | - update_quadrature_points | - update_JxW_values), - right_hand_side (&right_hand_side), - cell_residual (quadrature.size()), - rhs_values (quadrature.size()), - dual_weights (quadrature.size()), - cell_laplacians (quadrature.size()) + const Quadrature &quadrature, + const Function &right_hand_side) + : + fe_values (fe, quadrature, + update_values | + update_hessians | + update_quadrature_points | + update_JxW_values), + right_hand_side (&right_hand_side), + cell_residual (quadrature.size()), + rhs_values (quadrature.size()), + dual_weights (quadrature.size()), + cell_laplacians (quadrature.size()) {} @@ -2561,23 +2561,23 @@ namespace Step14 template WeightedResidual::FaceData:: FaceData (const FiniteElement &fe, - const Quadrature &face_quadrature) - : - fe_face_values_cell (fe, face_quadrature, - update_values | - update_gradients | - update_JxW_values | - update_normal_vectors), - fe_face_values_neighbor (fe, face_quadrature, - update_values | - update_gradients | - update_JxW_values | - update_normal_vectors), - fe_subface_values_cell (fe, face_quadrature, - update_gradients) + const Quadrature &face_quadrature) + : + fe_face_values_cell (fe, face_quadrature, + update_values | + update_gradients | + update_JxW_values | + update_normal_vectors), + fe_face_values_neighbor (fe, face_quadrature, + update_values | + update_gradients | + update_JxW_values | + update_normal_vectors), + fe_subface_values_cell (fe, face_quadrature, + update_gradients) { const unsigned int n_face_q_points - = face_quadrature.size(); + = face_quadrature.size(); jump_residual.resize(n_face_q_points); dual_weights.resize(n_face_q_points); @@ -2591,42 +2591,42 @@ namespace Step14 template WeightedResidual:: WeightedResidual (Triangulation &coarse_grid, - const FiniteElement &primal_fe, - const FiniteElement &dual_fe, - const Quadrature &quadrature, - const Quadrature &face_quadrature, - const Function &rhs_function, - const Function &bv, - const DualFunctional::DualFunctionalBase &dual_functional) - : - Base (coarse_grid), - PrimalSolver (coarse_grid, primal_fe, - quadrature, face_quadrature, - rhs_function, bv), - DualSolver (coarse_grid, dual_fe, - quadrature, face_quadrature, - dual_functional) + const FiniteElement &primal_fe, + const FiniteElement &dual_fe, + const Quadrature &quadrature, + const Quadrature &face_quadrature, + const Function &rhs_function, + const Function &bv, + const DualFunctional::DualFunctionalBase &dual_functional) + : + Base (coarse_grid), + PrimalSolver (coarse_grid, primal_fe, + quadrature, face_quadrature, + rhs_function, bv), + DualSolver (coarse_grid, dual_fe, + quadrature, face_quadrature, + dual_functional) {} - // The next five functions are - // boring, as they simply relay - // their work to the base - // classes. The first calls the - // primal and dual solvers in - // parallel, while postprocessing - // the solution and retrieving the - // number of degrees of freedom is - // done by the primal class. + // The next five functions are + // boring, as they simply relay + // their work to the base + // classes. The first calls the + // primal and dual solvers in + // parallel, while postprocessing + // the solution and retrieving the + // number of degrees of freedom is + // done by the primal class. template void WeightedResidual::solve_problem () { Threads::ThreadGroup<> threads; threads += Threads::new_thread (&WeightedResidual::solve_primal_problem, - *this); + *this); threads += Threads::new_thread (&WeightedResidual::solve_dual_problem, - *this); + *this); threads.join_all (); } @@ -2664,135 +2664,135 @@ namespace Step14 - // Now, it is becoming more - // interesting: the refine_grid - // function asks the error - // estimator to compute the - // cell-wise error indicators, then - // uses their absolute values for - // mesh refinement. + // Now, it is becoming more + // interesting: the refine_grid + // function asks the error + // estimator to compute the + // cell-wise error indicators, then + // uses their absolute values for + // mesh refinement. template void WeightedResidual::refine_grid () { - // First call the function that - // computes the cell-wise and - // global error: + // First call the function that + // computes the cell-wise and + // global error: Vector error_indicators (this->triangulation->n_active_cells()); estimate_error (error_indicators); - // Then note that marking cells - // for refinement or coarsening - // only works if all indicators - // are positive, to allow their - // comparison. Thus, drop the - // signs on all these indicators: + // Then note that marking cells + // for refinement or coarsening + // only works if all indicators + // are positive, to allow their + // comparison. Thus, drop the + // signs on all these indicators: for (Vector::iterator i=error_indicators.begin(); - i != error_indicators.end(); ++i) - *i = std::fabs (*i); - - // Finally, we can select between - // different strategies for - // refinement. The default here - // is to refine those cells with - // the largest error indicators - // that make up for a total of 80 - // per cent of the error, while - // we coarsen those with the - // smallest indicators that make - // up for the bottom 2 per cent - // of the error. + i != error_indicators.end(); ++i) + *i = std::fabs (*i); + + // Finally, we can select between + // different strategies for + // refinement. The default here + // is to refine those cells with + // the largest error indicators + // that make up for a total of 80 + // per cent of the error, while + // we coarsen those with the + // smallest indicators that make + // up for the bottom 2 per cent + // of the error. GridRefinement::refine_and_coarsen_fixed_fraction (*this->triangulation, - error_indicators, - 0.8, 0.02); + error_indicators, + 0.8, 0.02); this->triangulation->execute_coarsening_and_refinement (); } - // Since we want to output both the - // primal and the dual solution, we - // overload the output_solution - // function. The only interesting - // feature of this function is that - // the primal and dual solutions - // are defined on different finite - // element spaces, which is not the - // format the DataOut class - // expects. Thus, we have to - // transfer them to a common finite - // element space. Since we want the - // solutions only to see them - // qualitatively, we contend - // ourselves with interpolating the - // dual solution to the (smaller) - // primal space. For the - // interpolation, there is a - // library function, that takes a - // ConstraintMatrix object - // including the hanging node - // constraints. The rest is - // standard. - // - // There is, however, one - // work-around worth mentioning: in - // this function, as in a couple of - // following ones, we have to - // access the DoFHandler - // objects and solutions of both - // the primal as well as of the - // dual solver. Since these are - // members of the Solver base - // class which exists twice in the - // class hierarchy leading to the - // present class (once as base - // class of the PrimalSolver - // class, once as base class of the - // DualSolver class), we have - // to disambiguate accesses to them - // by telling the compiler a member - // of which of these two instances - // we want to access. The way to do - // this would be identify the - // member by pointing a path - // through the class hierarchy - // which disambiguates the base - // class, for example writing - // PrimalSolver::dof_handler to - // denote the member variable - // dof_handler from the - // Solver base class of the - // PrimalSolver - // class. Unfortunately, this - // confuses gcc's version 2.96 (a - // version that was intended as a - // development snapshot, but - // delivered as system compiler by - // Red Hat in their 7.x releases) - // so much that it bails out and - // refuses to compile the code. - // - // Thus, we have to work around - // this problem. We do this by - // introducing references to the - // PrimalSolver and - // DualSolver components of the - // WeightedResidual object at - // the beginning of the - // function. Since each of these - // has an unambiguous base class - // Solver, we can access the - // member variables we want through - // these references. However, we - // are now accessing protected - // member variables of these - // classes through a pointer other - // than the this pointer (in - // fact, this is of course the - // this pointer, but not - // explicitly). This finally is the - // reason why we had to declare the - // present class a friend of the - // classes we so access. + // Since we want to output both the + // primal and the dual solution, we + // overload the output_solution + // function. The only interesting + // feature of this function is that + // the primal and dual solutions + // are defined on different finite + // element spaces, which is not the + // format the DataOut class + // expects. Thus, we have to + // transfer them to a common finite + // element space. Since we want the + // solutions only to see them + // qualitatively, we contend + // ourselves with interpolating the + // dual solution to the (smaller) + // primal space. For the + // interpolation, there is a + // library function, that takes a + // ConstraintMatrix object + // including the hanging node + // constraints. The rest is + // standard. + // + // There is, however, one + // work-around worth mentioning: in + // this function, as in a couple of + // following ones, we have to + // access the DoFHandler + // objects and solutions of both + // the primal as well as of the + // dual solver. Since these are + // members of the Solver base + // class which exists twice in the + // class hierarchy leading to the + // present class (once as base + // class of the PrimalSolver + // class, once as base class of the + // DualSolver class), we have + // to disambiguate accesses to them + // by telling the compiler a member + // of which of these two instances + // we want to access. The way to do + // this would be identify the + // member by pointing a path + // through the class hierarchy + // which disambiguates the base + // class, for example writing + // PrimalSolver::dof_handler to + // denote the member variable + // dof_handler from the + // Solver base class of the + // PrimalSolver + // class. Unfortunately, this + // confuses gcc's version 2.96 (a + // version that was intended as a + // development snapshot, but + // delivered as system compiler by + // Red Hat in their 7.x releases) + // so much that it bails out and + // refuses to compile the code. + // + // Thus, we have to work around + // this problem. We do this by + // introducing references to the + // PrimalSolver and + // DualSolver components of the + // WeightedResidual object at + // the beginning of the + // function. Since each of these + // has an unambiguous base class + // Solver, we can access the + // member variables we want through + // these references. However, we + // are now accessing protected + // member variables of these + // classes through a pointer other + // than the this pointer (in + // fact, this is of course the + // this pointer, but not + // explicitly). This finally is the + // reason why we had to declare the + // present class a friend of the + // classes we so access. template void WeightedResidual::output_solution () const @@ -2802,52 +2802,52 @@ namespace Step14 ConstraintMatrix primal_hanging_node_constraints; DoFTools::make_hanging_node_constraints (primal_solver.dof_handler, - primal_hanging_node_constraints); + primal_hanging_node_constraints); primal_hanging_node_constraints.close(); Vector dual_solution (primal_solver.dof_handler.n_dofs()); FETools::interpolate (dual_solver.dof_handler, - dual_solver.solution, - primal_solver.dof_handler, - primal_hanging_node_constraints, - dual_solution); + dual_solver.solution, + primal_solver.dof_handler, + primal_hanging_node_constraints, + dual_solution); DataOut data_out; data_out.attach_dof_handler (primal_solver.dof_handler); - // Add the data vectors for which - // we want output. Add them both, - // the DataOut functions can - // handle as many data vectors as - // you wish to write to output: + // Add the data vectors for which + // we want output. Add them both, + // the DataOut functions can + // handle as many data vectors as + // you wish to write to output: data_out.add_data_vector (primal_solver.solution, - "primal_solution"); + "primal_solution"); data_out.add_data_vector (dual_solution, - "dual_solution"); + "dual_solution"); data_out.build_patches (); std::ostringstream filename; filename << "solution-" - << this->refinement_cycle - << ".gnuplot" - << std::ends; + << this->refinement_cycle + << ".gnuplot" + << std::ends; std::ofstream out (filename.str().c_str()); data_out.write (out, DataOut::gnuplot); } - // @sect3{Estimating errors} + // @sect3{Estimating errors} - // @sect4{Error estimation driver functions} - // - // As for the actual computation of - // error estimates, let's start - // with the function that drives - // all this, i.e. calls those - // functions that actually do the - // work, and finally collects the - // results. + // @sect4{Error estimation driver functions} + // + // As for the actual computation of + // error estimates, let's start + // with the function that drives + // all this, i.e. calls those + // functions that actually do the + // work, and finally collects the + // results. template void @@ -2857,1067 +2857,1067 @@ namespace Step14 const PrimalSolver &primal_solver = *this; const DualSolver &dual_solver = *this; - // The first task in computing - // the error is to set up vectors - // that denote the primal - // solution, and the weights - // (z-z_h)=(z-I_hz), both in the - // finite element space for which - // we have computed the dual - // solution. For this, we have to - // interpolate the primal - // solution to the dual finite - // element space, and to subtract - // the interpolation of the - // computed dual solution to the - // primal finite element - // space. Fortunately, the - // library provides functions for - // the interpolation into larger - // or smaller finite element - // spaces, so this is mostly - // obvious. - // - // First, let's do that for the - // primal solution: it is - // cell-wise interpolated into - // the finite element space in - // which we have solved the dual - // problem: But, again as in the - // WeightedResidual::output_solution - // function we first need to - // create a ConstraintMatrix - // including the hanging node - // constraints, but this time of - // the dual finite element space. + // The first task in computing + // the error is to set up vectors + // that denote the primal + // solution, and the weights + // (z-z_h)=(z-I_hz), both in the + // finite element space for which + // we have computed the dual + // solution. For this, we have to + // interpolate the primal + // solution to the dual finite + // element space, and to subtract + // the interpolation of the + // computed dual solution to the + // primal finite element + // space. Fortunately, the + // library provides functions for + // the interpolation into larger + // or smaller finite element + // spaces, so this is mostly + // obvious. + // + // First, let's do that for the + // primal solution: it is + // cell-wise interpolated into + // the finite element space in + // which we have solved the dual + // problem: But, again as in the + // WeightedResidual::output_solution + // function we first need to + // create a ConstraintMatrix + // including the hanging node + // constraints, but this time of + // the dual finite element space. ConstraintMatrix dual_hanging_node_constraints; DoFTools::make_hanging_node_constraints (dual_solver.dof_handler, - dual_hanging_node_constraints); + dual_hanging_node_constraints); dual_hanging_node_constraints.close(); Vector primal_solution (dual_solver.dof_handler.n_dofs()); FETools::interpolate (primal_solver.dof_handler, - primal_solver.solution, - dual_solver.dof_handler, - dual_hanging_node_constraints, - primal_solution); - - // Then for computing the - // interpolation of the - // numerically approximated dual - // solution z into the finite - // element space of the primal - // solution and subtracting it - // from z: use the - // interpolate_difference - // function, that gives (z-I_hz) - // in the element space of the - // dual solution. + primal_solver.solution, + dual_solver.dof_handler, + dual_hanging_node_constraints, + primal_solution); + + // Then for computing the + // interpolation of the + // numerically approximated dual + // solution z into the finite + // element space of the primal + // solution and subtracting it + // from z: use the + // interpolate_difference + // function, that gives (z-I_hz) + // in the element space of the + // dual solution. ConstraintMatrix primal_hanging_node_constraints; DoFTools::make_hanging_node_constraints (primal_solver.dof_handler, - primal_hanging_node_constraints); + primal_hanging_node_constraints); primal_hanging_node_constraints.close(); Vector dual_weights (dual_solver.dof_handler.n_dofs()); FETools::interpolation_difference (dual_solver.dof_handler, - dual_hanging_node_constraints, - dual_solver.solution, - primal_solver.dof_handler, - primal_hanging_node_constraints, - dual_weights); - - // Note that this could probably - // have been more efficient since - // those constraints have been - // used previously when - // assembling matrix and right - // hand side for the primal - // problem and writing out the - // dual solution. We leave the - // optimization of the program in - // this respect as an exercise. - - // Having computed the dual - // weights we now proceed with - // computing the cell and face - // residuals of the primal - // solution. First we set up a - // map between face iterators and - // their jump term contributions - // of faces to the error - // estimator. The reason is that - // we compute the jump terms only - // once, from one side of the - // face, and want to collect them - // only afterwards when looping - // over all cells a second time. - // - // We initialize this map already - // with a value of -1e20 for all - // faces, since this value will - // strike in the results if - // something should go wrong and - // we fail to compute the value - // for a face for some - // reason. Secondly, we - // initialize the map once before - // we branch to different threads - // since this way the map's - // structure is no more modified - // by the individual threads, - // only existing entries are set - // to new values. This relieves - // us from the necessity to - // synchronise the threads - // through a mutex each time they - // write to (and modify the - // structure of) this map. + dual_hanging_node_constraints, + dual_solver.solution, + primal_solver.dof_handler, + primal_hanging_node_constraints, + dual_weights); + + // Note that this could probably + // have been more efficient since + // those constraints have been + // used previously when + // assembling matrix and right + // hand side for the primal + // problem and writing out the + // dual solution. We leave the + // optimization of the program in + // this respect as an exercise. + + // Having computed the dual + // weights we now proceed with + // computing the cell and face + // residuals of the primal + // solution. First we set up a + // map between face iterators and + // their jump term contributions + // of faces to the error + // estimator. The reason is that + // we compute the jump terms only + // once, from one side of the + // face, and want to collect them + // only afterwards when looping + // over all cells a second time. + // + // We initialize this map already + // with a value of -1e20 for all + // faces, since this value will + // strike in the results if + // something should go wrong and + // we fail to compute the value + // for a face for some + // reason. Secondly, we + // initialize the map once before + // we branch to different threads + // since this way the map's + // structure is no more modified + // by the individual threads, + // only existing entries are set + // to new values. This relieves + // us from the necessity to + // synchronise the threads + // through a mutex each time they + // write to (and modify the + // structure of) this map. FaceIntegrals face_integrals; for (active_cell_iterator cell=dual_solver.dof_handler.begin_active(); - cell!=dual_solver.dof_handler.end(); - ++cell) - for (unsigned int face_no=0; - face_no::faces_per_cell; - ++face_no) - face_integrals[cell->face(face_no)] = -1e20; - - // Then set up a vector with - // error indicators. Reserve one - // slot for each cell and set it - // to zero. + cell!=dual_solver.dof_handler.end(); + ++cell) + for (unsigned int face_no=0; + face_no::faces_per_cell; + ++face_no) + face_integrals[cell->face(face_no)] = -1e20; + + // Then set up a vector with + // error indicators. Reserve one + // slot for each cell and set it + // to zero. error_indicators.reinit (dual_solver.dof_handler - .get_tria().n_active_cells()); - - // Now start a number of threads - // which compute the error - // formula on parts of all the - // cells, and once they are all - // started wait until they have - // all finished: + .get_tria().n_active_cells()); + + // Now start a number of threads + // which compute the error + // formula on parts of all the + // cells, and once they are all + // started wait until they have + // all finished: const unsigned int n_threads = multithread_info.n_default_threads; Threads::ThreadGroup<> threads; for (unsigned int i=0; i::estimate_some, - *this, - primal_solution, - dual_weights, - n_threads, i, - error_indicators, - face_integrals); + threads += Threads::new_thread (&WeightedResidual::estimate_some, + *this, + primal_solution, + dual_weights, + n_threads, i, + error_indicators, + face_integrals); threads.join_all(); - // Once the error contributions - // are computed, sum them up. For - // this, note that the cell terms - // are already set, and that only - // the edge terms need to be - // collected. Thus, loop over all - // cells and their faces, make - // sure that the contributions of - // each of the faces are there, - // and add them up. Only take - // minus one half of the jump - // term, since the other half - // will be taken by the - // neighboring cell. + // Once the error contributions + // are computed, sum them up. For + // this, note that the cell terms + // are already set, and that only + // the edge terms need to be + // collected. Thus, loop over all + // cells and their faces, make + // sure that the contributions of + // each of the faces are there, + // and add them up. Only take + // minus one half of the jump + // term, since the other half + // will be taken by the + // neighboring cell. unsigned int present_cell=0; for (active_cell_iterator cell=dual_solver.dof_handler.begin_active(); - cell!=dual_solver.dof_handler.end(); - ++cell, ++present_cell) - for (unsigned int face_no=0; face_no::faces_per_cell; - ++face_no) - { - Assert(face_integrals.find(cell->face(face_no)) != - face_integrals.end(), - ExcInternalError()); - error_indicators(present_cell) - -= 0.5*face_integrals[cell->face(face_no)]; - } + cell!=dual_solver.dof_handler.end(); + ++cell, ++present_cell) + for (unsigned int face_no=0; face_no::faces_per_cell; + ++face_no) + { + Assert(face_integrals.find(cell->face(face_no)) != + face_integrals.end(), + ExcInternalError()); + error_indicators(present_cell) + -= 0.5*face_integrals[cell->face(face_no)]; + } std::cout << " Estimated error=" - << std::accumulate (error_indicators.begin(), - error_indicators.end(), 0.) - << std::endl; + << std::accumulate (error_indicators.begin(), + error_indicators.end(), 0.) + << std::endl; } - // @sect4{Estimating on a subset of cells} + // @sect4{Estimating on a subset of cells} - // Next we have the function that - // is called to estimate the error - // on a subset of cells. The - // function may be called multiply - // if the library was configured to - // use multi-threading. Here it - // goes: + // Next we have the function that + // is called to estimate the error + // on a subset of cells. The + // function may be called multiply + // if the library was configured to + // use multi-threading. Here it + // goes: template void WeightedResidual:: estimate_some (const Vector &primal_solution, - const Vector &dual_weights, - const unsigned int n_threads, - const unsigned int this_thread, - Vector &error_indicators, - FaceIntegrals &face_integrals) const + const Vector &dual_weights, + const unsigned int n_threads, + const unsigned int this_thread, + Vector &error_indicators, + FaceIntegrals &face_integrals) const { const PrimalSolver &primal_solver = *this; const DualSolver &dual_solver = *this; - // At the beginning, we - // initialize two variables for - // each thread which may be - // running this function. The - // reason for these functions was - // discussed above, when the - // respective classes were - // discussed, so we here only - // point out that since they are - // local to the function that is - // spawned when running more than - // one thread, the data of these - // objects exists actually once - // per thread, so we don't have - // to take care about - // synchronising access to them. + // At the beginning, we + // initialize two variables for + // each thread which may be + // running this function. The + // reason for these functions was + // discussed above, when the + // respective classes were + // discussed, so we here only + // point out that since they are + // local to the function that is + // spawned when running more than + // one thread, the data of these + // objects exists actually once + // per thread, so we don't have + // to take care about + // synchronising access to them. CellData cell_data (*dual_solver.fe, - *dual_solver.quadrature, - *primal_solver.rhs_function); + *dual_solver.quadrature, + *primal_solver.rhs_function); FaceData face_data (*dual_solver.fe, - *dual_solver.face_quadrature); - - // Then calculate the start cell - // for this thread. We let the - // different threads run on - // interleaved cells, i.e. for - // example if we have 4 threads, - // then the first thread treates - // cells 0, 4, 8, etc, while the - // second threads works on cells 1, - // 5, 9, and so on. The reason is - // that it takes vastly more time - // to work on cells with hanging - // nodes than on regular cells, but - // such cells are not evenly - // distributed across the range of - // cell iterators, so in order to - // have the different threads do - // approximately the same amount of - // work, we have to let them work - // interleaved to the effect of a - // pseudorandom distribution of the - // `hard' cells to the different - // threads. + *dual_solver.face_quadrature); + + // Then calculate the start cell + // for this thread. We let the + // different threads run on + // interleaved cells, i.e. for + // example if we have 4 threads, + // then the first thread treates + // cells 0, 4, 8, etc, while the + // second threads works on cells 1, + // 5, 9, and so on. The reason is + // that it takes vastly more time + // to work on cells with hanging + // nodes than on regular cells, but + // such cells are not evenly + // distributed across the range of + // cell iterators, so in order to + // have the different threads do + // approximately the same amount of + // work, we have to let them work + // interleaved to the effect of a + // pseudorandom distribution of the + // `hard' cells to the different + // threads. active_cell_iterator cell=dual_solver.dof_handler.begin_active(); for (unsigned int t=0; - (terror_indicators - // variable: - integrate_over_cell (cell, cell_index, - primal_solution, - dual_weights, - cell_data, - error_indicators); - - // After computing the cell - // terms, turn to the face - // terms. For this, loop over - // all faces of the present - // cell, and see whether - // something needs to be - // computed on it: - for (unsigned int face_no=0; - face_no::faces_per_cell; - ++face_no) - { - // First, if this face is - // part of the boundary, - // then there is nothing - // to do. However, to - // make things easier - // when summing up the - // contributions of the - // faces of cells, we - // enter this face into - // the list of faces with - // a zero contribution to - // the error. - if (cell->face(face_no)->at_boundary()) - { - face_integrals[cell->face(face_no)] = 0; - continue; - } - - // Next, note that since - // we want to compute the - // jump terms on each - // face only once - // although we access it - // twice (if it is not at - // the boundary), we have - // to define some rules - // who is responsible for - // computing on a face: - // - // First, if the - // neighboring cell is on - // the same level as this - // one, i.e. neither - // further refined not - // coarser, then the one - // with the lower index - // within this level does - // the work. In other - // words: if the other - // one has a lower index, - // then skip work on this - // face: - if ((cell->neighbor(face_no)->has_children() == false) && - (cell->neighbor(face_no)->level() == cell->level()) && - (cell->neighbor(face_no)->index() < cell->index())) - continue; - - // Likewise, we always - // work from the coarser - // cell if this and its - // neighbor differ in - // refinement. Thus, if - // the neighboring cell - // is less refined than - // the present one, then - // do nothing since we - // integrate over the - // subfaces when we visit - // the coarse cell. - if (cell->at_boundary(face_no) == false) - if (cell->neighbor(face_no)->level() < cell->level()) - continue; - - - // Now we know that we - // are in charge here, so - // actually compute the - // face jump terms. If - // the face is a regular - // one, i.e. the other - // side's cell is neither - // coarser not finer than - // this cell, then call - // one function, and if - // the cell on the other - // side is further - // refined, then use - // another function. Note - // that the case that the - // cell on the other side - // is coarser cannot - // happen since we have - // decided above that we - // handle this case when - // we pass over that - // other cell. - if (cell->face(face_no)->has_children() == false) - integrate_over_regular_face (cell, face_no, - primal_solution, - dual_weights, - face_data, - face_integrals); - else - integrate_over_irregular_face (cell, face_no, - primal_solution, - dual_weights, - face_data, - face_integrals); - } - - // After computing the cell - // contributions and looping - // over the faces, go to the - // next cell for this - // thread. Note again that - // the cells for each of the - // threads are interleaved. - // If we are at the end of - // our workload, jump out - // of the loop. - for (unsigned int t=0; - ((terror_indicators + // variable: + integrate_over_cell (cell, cell_index, + primal_solution, + dual_weights, + cell_data, + error_indicators); + + // After computing the cell + // terms, turn to the face + // terms. For this, loop over + // all faces of the present + // cell, and see whether + // something needs to be + // computed on it: + for (unsigned int face_no=0; + face_no::faces_per_cell; + ++face_no) + { + // First, if this face is + // part of the boundary, + // then there is nothing + // to do. However, to + // make things easier + // when summing up the + // contributions of the + // faces of cells, we + // enter this face into + // the list of faces with + // a zero contribution to + // the error. + if (cell->face(face_no)->at_boundary()) + { + face_integrals[cell->face(face_no)] = 0; + continue; + } + + // Next, note that since + // we want to compute the + // jump terms on each + // face only once + // although we access it + // twice (if it is not at + // the boundary), we have + // to define some rules + // who is responsible for + // computing on a face: + // + // First, if the + // neighboring cell is on + // the same level as this + // one, i.e. neither + // further refined not + // coarser, then the one + // with the lower index + // within this level does + // the work. In other + // words: if the other + // one has a lower index, + // then skip work on this + // face: + if ((cell->neighbor(face_no)->has_children() == false) && + (cell->neighbor(face_no)->level() == cell->level()) && + (cell->neighbor(face_no)->index() < cell->index())) + continue; + + // Likewise, we always + // work from the coarser + // cell if this and its + // neighbor differ in + // refinement. Thus, if + // the neighboring cell + // is less refined than + // the present one, then + // do nothing since we + // integrate over the + // subfaces when we visit + // the coarse cell. + if (cell->at_boundary(face_no) == false) + if (cell->neighbor(face_no)->level() < cell->level()) + continue; + + + // Now we know that we + // are in charge here, so + // actually compute the + // face jump terms. If + // the face is a regular + // one, i.e. the other + // side's cell is neither + // coarser not finer than + // this cell, then call + // one function, and if + // the cell on the other + // side is further + // refined, then use + // another function. Note + // that the case that the + // cell on the other side + // is coarser cannot + // happen since we have + // decided above that we + // handle this case when + // we pass over that + // other cell. + if (cell->face(face_no)->has_children() == false) + integrate_over_regular_face (cell, face_no, + primal_solution, + dual_weights, + face_data, + face_integrals); + else + integrate_over_irregular_face (cell, face_no, + primal_solution, + dual_weights, + face_data, + face_integrals); + } + + // After computing the cell + // contributions and looping + // over the faces, go to the + // next cell for this + // thread. Note again that + // the cells for each of the + // threads are interleaved. + // If we are at the end of + // our workload, jump out + // of the loop. + for (unsigned int t=0; + ((t void WeightedResidual:: integrate_over_cell (const active_cell_iterator &cell, - const unsigned int cell_index, - const Vector &primal_solution, - const Vector &dual_weights, - CellData &cell_data, - Vector &error_indicators) const - { - // The tasks to be done are what - // appears natural from looking - // at the error estimation - // formula: first get the - // right hand side and - // Laplacian of the numerical - // solution at the quadrature - // points for the cell residual, + const unsigned int cell_index, + const Vector &primal_solution, + const Vector &dual_weights, + CellData &cell_data, + Vector &error_indicators) const + { + // The tasks to be done are what + // appears natural from looking + // at the error estimation + // formula: first get the + // right hand side and + // Laplacian of the numerical + // solution at the quadrature + // points for the cell residual, cell_data.fe_values.reinit (cell); cell_data.right_hand_side - ->value_list (cell_data.fe_values.get_quadrature_points(), - cell_data.rhs_values); + ->value_list (cell_data.fe_values.get_quadrature_points(), + cell_data.rhs_values); cell_data.fe_values.get_function_laplacians (primal_solution, - cell_data.cell_laplacians); + cell_data.cell_laplacians); - // ...then get the dual weights... + // ...then get the dual weights... cell_data.fe_values.get_function_values (dual_weights, - cell_data.dual_weights); + cell_data.dual_weights); - // ...and finally build the sum - // over all quadrature points and - // store it with the present - // cell: + // ...and finally build the sum + // over all quadrature points and + // store it with the present + // cell: double sum = 0; for (unsigned int p=0; p void WeightedResidual:: integrate_over_regular_face (const active_cell_iterator &cell, - const unsigned int face_no, - const Vector &primal_solution, - const Vector &dual_weights, - FaceData &face_data, - FaceIntegrals &face_integrals) const + const unsigned int face_no, + const Vector &primal_solution, + const Vector &dual_weights, + FaceData &face_data, + FaceIntegrals &face_integrals) const { const unsigned int - n_q_points = face_data.fe_face_values_cell.n_quadrature_points; - - // The first step is to get the - // values of the gradients at the - // quadrature points of the - // finite element field on the - // present cell. For this, - // initialize the - // FEFaceValues object - // corresponding to this side of - // the face, and extract the - // gradients using that - // object. + n_q_points = face_data.fe_face_values_cell.n_quadrature_points; + + // The first step is to get the + // values of the gradients at the + // quadrature points of the + // finite element field on the + // present cell. For this, + // initialize the + // FEFaceValues object + // corresponding to this side of + // the face, and extract the + // gradients using that + // object. face_data.fe_face_values_cell.reinit (cell, face_no); face_data.fe_face_values_cell.get_function_grads (primal_solution, - face_data.cell_grads); - - // The second step is then to - // extract the gradients of the - // finite element solution at the - // quadrature points on the other - // side of the face, i.e. from - // the neighboring cell. - // - // For this, do a sanity check - // before: make sure that the - // neigbor actually exists (yes, - // we should not have come here - // if the neighbor did not exist, - // but in complicated software - // there are bugs, so better - // check this), and if this is - // not the case throw an error. + face_data.cell_grads); + + // The second step is then to + // extract the gradients of the + // finite element solution at the + // quadrature points on the other + // side of the face, i.e. from + // the neighboring cell. + // + // For this, do a sanity check + // before: make sure that the + // neigbor actually exists (yes, + // we should not have come here + // if the neighbor did not exist, + // but in complicated software + // there are bugs, so better + // check this), and if this is + // not the case throw an error. Assert (cell->neighbor(face_no).state() == IteratorState::valid, - ExcInternalError()); - // If we have that, then we need - // to find out with which face of - // the neighboring cell we have - // to work, i.e. the - // home-manythe neighbor the - // present cell is of the cell - // behind the present face. For - // this, there is a function, and - // we put the result into a - // variable with the name - // neighbor_neighbor: + ExcInternalError()); + // If we have that, then we need + // to find out with which face of + // the neighboring cell we have + // to work, i.e. the + // home-manythe neighbor the + // present cell is of the cell + // behind the present face. For + // this, there is a function, and + // we put the result into a + // variable with the name + // neighbor_neighbor: const unsigned int - neighbor_neighbor = cell->neighbor_of_neighbor (face_no); - // Then define an abbreviation - // for the neigbor cell, - // initialize the - // FEFaceValues object on - // that cell, and extract the - // gradients on that cell: + neighbor_neighbor = cell->neighbor_of_neighbor (face_no); + // Then define an abbreviation + // for the neigbor cell, + // initialize the + // FEFaceValues object on + // that cell, and extract the + // gradients on that cell: const active_cell_iterator neighbor = cell->neighbor(face_no); face_data.fe_face_values_neighbor.reinit (neighbor, neighbor_neighbor); face_data.fe_face_values_neighbor.get_function_grads (primal_solution, - face_data.neighbor_grads); - - // Now that we have the gradients - // on this and the neighboring - // cell, compute the jump - // residual by multiplying the - // jump in the gradient with the - // normal vector: + face_data.neighbor_grads); + + // Now that we have the gradients + // on this and the neighboring + // cell, compute the jump + // residual by multiplying the + // jump in the gradient with the + // normal vector: for (unsigned int p=0; pface(face_no)) != face_integrals.end(), - ExcInternalError()); + ExcInternalError()); Assert (face_integrals[cell->face(face_no)] == -1e20, - ExcInternalError()); - - // ...then store computed value - // at assigned location. Note - // that the stored value does not - // contain the factor 1/2 that - // appears in the error - // representation. The reason is - // that the term actually does - // not have this factor if we - // loop over all faces in the - // triangulation, but only - // appears if we write it as a - // sum over all cells and all - // faces of each cell; we thus - // visit the same face twice. We - // take account of this by using - // this factor -1/2 later, when we - // sum up the contributions for - // each cell individually. + ExcInternalError()); + + // ...then store computed value + // at assigned location. Note + // that the stored value does not + // contain the factor 1/2 that + // appears in the error + // representation. The reason is + // that the term actually does + // not have this factor if we + // loop over all faces in the + // triangulation, but only + // appears if we write it as a + // sum over all cells and all + // faces of each cell; we thus + // visit the same face twice. We + // take account of this by using + // this factor -1/2 later, when we + // sum up the contributions for + // each cell individually. face_integrals[cell->face(face_no)] = face_integral; } - // @sect4{Computing edge term error contributions -- 2} + // @sect4{Computing edge term error contributions -- 2} - // We are still missing the case of - // faces with hanging nodes. This - // is what is covered in this - // function: + // We are still missing the case of + // faces with hanging nodes. This + // is what is covered in this + // function: template void WeightedResidual:: integrate_over_irregular_face (const active_cell_iterator &cell, - const unsigned int face_no, - const Vector &primal_solution, - const Vector &dual_weights, - FaceData &face_data, - FaceIntegrals &face_integrals) const - { - // First again two abbreviations, - // and some consistency checks - // whether the function is called - // only on faces for which it is - // supposed to be called: + const unsigned int face_no, + const Vector &primal_solution, + const Vector &dual_weights, + FaceData &face_data, + FaceIntegrals &face_integrals) const + { + // First again two abbreviations, + // and some consistency checks + // whether the function is called + // only on faces for which it is + // supposed to be called: const unsigned int - n_q_points = face_data.fe_face_values_cell.n_quadrature_points; + n_q_points = face_data.fe_face_values_cell.n_quadrature_points; const typename DoFHandler::face_iterator - face = cell->face(face_no); + face = cell->face(face_no); const typename DoFHandler::cell_iterator - neighbor = cell->neighbor(face_no); + neighbor = cell->neighbor(face_no); Assert (neighbor.state() == IteratorState::valid, - ExcInternalError()); + ExcInternalError()); Assert (neighbor->has_children(), - ExcInternalError()); - - // Then find out which neighbor - // the present cell is of the - // adjacent cell. Note that we - // will operator on the children - // of this adjacent cell, but - // that their orientation is the - // same as that of their mother, - // i.e. the neigbor direction is - // the same. + ExcInternalError()); + + // Then find out which neighbor + // the present cell is of the + // adjacent cell. Note that we + // will operator on the children + // of this adjacent cell, but + // that their orientation is the + // same as that of their mother, + // i.e. the neigbor direction is + // the same. const unsigned int - neighbor_neighbor = cell->neighbor_of_neighbor (face_no); + neighbor_neighbor = cell->neighbor_of_neighbor (face_no); - // Then simply do everything we - // did in the previous function - // for one face for all the - // sub-faces now: + // Then simply do everything we + // did in the previous function + // for one face for all the + // sub-faces now: for (unsigned int subface_no=0; - subface_non_children(); ++subface_no) - { - // Start with some checks - // again: get an iterator - // pointing to the cell - // behind the present subface - // and check whether its face - // is a subface of the one we - // are considering. If that - // were not the case, then - // there would be either a - // bug in the - // neighbor_neighbor - // function called above, or - // -- worse -- some function - // in the library did not - // keep to some underlying - // assumptions about cells, - // their children, and their - // faces. In any case, even - // though this assertion - // should not be triggered, - // it does not harm to be - // cautious, and in optimized - // mode computations the - // assertion will be removed - // anyway. - const active_cell_iterator neighbor_child - = cell->neighbor_child_on_subface (face_no, subface_no); - Assert (neighbor_child->face(neighbor_neighbor) == - cell->face(face_no)->child(subface_no), - ExcInternalError()); - - // Now start the work by - // again getting the gradient - // of the solution first at - // this side of the - // interface, - face_data.fe_subface_values_cell.reinit (cell, face_no, subface_no); - face_data.fe_subface_values_cell.get_function_grads (primal_solution, - face_data.cell_grads); - // then at the other side, - face_data.fe_face_values_neighbor.reinit (neighbor_child, - neighbor_neighbor); - face_data.fe_face_values_neighbor.get_function_grads (primal_solution, - face_data.neighbor_grads); - - // and finally building the - // jump residuals. Since we - // take the normal vector - // from the other cell this - // time, revert the sign of - // the first term compared to - // the other function: - for (unsigned int p=0; pface(neighbor_neighbor)] - = face_integral; - } - - // Once the contributions of all - // sub-faces are computed, loop - // over all sub-faces to collect - // and store them with the mother - // face for simple use when later - // collecting the error terms of - // cells. Again make safety - // checks that the entries for - // the sub-faces have been - // computed and do not carry an - // invalid value. + subface_non_children(); ++subface_no) + { + // Start with some checks + // again: get an iterator + // pointing to the cell + // behind the present subface + // and check whether its face + // is a subface of the one we + // are considering. If that + // were not the case, then + // there would be either a + // bug in the + // neighbor_neighbor + // function called above, or + // -- worse -- some function + // in the library did not + // keep to some underlying + // assumptions about cells, + // their children, and their + // faces. In any case, even + // though this assertion + // should not be triggered, + // it does not harm to be + // cautious, and in optimized + // mode computations the + // assertion will be removed + // anyway. + const active_cell_iterator neighbor_child + = cell->neighbor_child_on_subface (face_no, subface_no); + Assert (neighbor_child->face(neighbor_neighbor) == + cell->face(face_no)->child(subface_no), + ExcInternalError()); + + // Now start the work by + // again getting the gradient + // of the solution first at + // this side of the + // interface, + face_data.fe_subface_values_cell.reinit (cell, face_no, subface_no); + face_data.fe_subface_values_cell.get_function_grads (primal_solution, + face_data.cell_grads); + // then at the other side, + face_data.fe_face_values_neighbor.reinit (neighbor_child, + neighbor_neighbor); + face_data.fe_face_values_neighbor.get_function_grads (primal_solution, + face_data.neighbor_grads); + + // and finally building the + // jump residuals. Since we + // take the normal vector + // from the other cell this + // time, revert the sign of + // the first term compared to + // the other function: + for (unsigned int p=0; pface(neighbor_neighbor)] + = face_integral; + } + + // Once the contributions of all + // sub-faces are computed, loop + // over all sub-faces to collect + // and store them with the mother + // face for simple use when later + // collecting the error terms of + // cells. Again make safety + // checks that the entries for + // the sub-faces have been + // computed and do not carry an + // invalid value. double sum = 0; for (unsigned int subface_no=0; - subface_non_children(); ++subface_no) - { - Assert (face_integrals.find(face->child(subface_no)) != - face_integrals.end(), - ExcInternalError()); - Assert (face_integrals[face->child(subface_no)] != -1e20, - ExcInternalError()); - - sum += face_integrals[face->child(subface_no)]; - } - // Finally store the value with - // the parent face. + subface_non_children(); ++subface_no) + { + Assert (face_integrals.find(face->child(subface_no)) != + face_integrals.end(), + ExcInternalError()); + Assert (face_integrals[face->child(subface_no)] != -1e20, + ExcInternalError()); + + sum += face_integrals[face->child(subface_no)]; + } + // Finally store the value with + // the parent face. face_integrals[face] = sum; } } - // @sect3{A simulation framework} - - // In the previous example program, - // we have had two functions that - // were used to drive the process of - // solving on subsequently finer - // grids. We extend this here to - // allow for a number of parameters - // to be passed to these functions, - // and put all of that into framework - // class. - // - // You will have noted that this - // program is built up of a number of - // small parts (evaluation functions, - // solver classes implementing - // various refinement methods, - // different dual functionals, - // different problem and data - // descriptions), which makes the - // program relatively simple to - // extend, but also allows to solve a - // large number of different problems - // by replacing one part by - // another. We reflect this - // flexibility by declaring a - // structure in the following - // framework class that holds a - // number of parameters that may be - // set to test various combinations - // of the parts of this program, and - // which can be used to test it at - // various problems and - // discretizations in a simple way. + // @sect3{A simulation framework} + + // In the previous example program, + // we have had two functions that + // were used to drive the process of + // solving on subsequently finer + // grids. We extend this here to + // allow for a number of parameters + // to be passed to these functions, + // and put all of that into framework + // class. + // + // You will have noted that this + // program is built up of a number of + // small parts (evaluation functions, + // solver classes implementing + // various refinement methods, + // different dual functionals, + // different problem and data + // descriptions), which makes the + // program relatively simple to + // extend, but also allows to solve a + // large number of different problems + // by replacing one part by + // another. We reflect this + // flexibility by declaring a + // structure in the following + // framework class that holds a + // number of parameters that may be + // set to test various combinations + // of the parts of this program, and + // which can be used to test it at + // various problems and + // discretizations in a simple way. template struct Framework { public: - // First, we declare two - // abbreviations for simple use - // of the respective data types: + // First, we declare two + // abbreviations for simple use + // of the respective data types: typedef Evaluation::EvaluationBase Evaluator; typedef std::list EvaluatorList; - // Then we have the structure - // which declares all the - // parameters that may be set. In - // the default constructor of the - // structure, these values are - // all set to default values, for - // simple use. + // Then we have the structure + // which declares all the + // parameters that may be set. In + // the default constructor of the + // structure, these values are + // all set to default values, for + // simple use. struct ProblemDescription { - // First allow for the - // degrees of the piecewise - // polynomials by which the - // primal and dual problems - // will be discretized. They - // default to (bi-, - // tri-)linear ansatz - // functions for the primal, - // and (bi-, tri-)quadratic - // ones for the dual - // problem. If a refinement - // criterion is chosen that - // does not need the solution - // of a dual problem, the - // value of the dual finite - // element degree is of - // course ignored. - unsigned int primal_fe_degree; - unsigned int dual_fe_degree; - - // Then have an object that - // describes the problem - // type, i.e. right hand - // side, domain, boundary - // values, etc. The pointer - // needed here defaults to - // the Null pointer, i.e. you - // will have to set it in - // actual instances of this - // object to make it useful. - SmartPointer > data; - - // Since we allow to use - // different refinement - // criteria (global - // refinement, refinement by - // the Kelly error indicator, - // possibly with a weight, - // and using the dual - // estimator), define a - // number of enumeration - // values, and subsequently a - // variable of that type. It - // will default to - // dual_weighted_error_estimator. - enum RefinementCriterion { - dual_weighted_error_estimator, - global_refinement, - kelly_indicator, - weighted_kelly_indicator - }; - - RefinementCriterion refinement_criterion; - - // Next, an object that - // describes the dual - // functional. It is only - // needed if the dual - // weighted residual - // refinement is chosen, and - // also defaults to a Null - // pointer. - SmartPointer > dual_functional; - - // Then a list of evaluation - // objects. Its default value - // is empty, i.e. no - // evaluation objects. - EvaluatorList evaluator_list; - - // Next to last, a function - // that is used as a weight - // to the - // RefinementWeightedKelly - // class. The default value - // of this pointer is zero, - // but you have to set it to - // some other value if you - // want to use the - // weighted_kelly_indicator - // refinement criterion. - SmartPointer > kelly_weight; - - // Finally, we have a - // variable that denotes the - // maximum number of degrees - // of freedom we allow for - // the (primal) - // discretization. If it is - // exceeded, we stop the - // process of solving and - // intermittend mesh - // refinement. Its default - // value is 20,000. - unsigned int max_degrees_of_freedom; - - // Finally the default - // constructor of this class: - ProblemDescription (); + // First allow for the + // degrees of the piecewise + // polynomials by which the + // primal and dual problems + // will be discretized. They + // default to (bi-, + // tri-)linear ansatz + // functions for the primal, + // and (bi-, tri-)quadratic + // ones for the dual + // problem. If a refinement + // criterion is chosen that + // does not need the solution + // of a dual problem, the + // value of the dual finite + // element degree is of + // course ignored. + unsigned int primal_fe_degree; + unsigned int dual_fe_degree; + + // Then have an object that + // describes the problem + // type, i.e. right hand + // side, domain, boundary + // values, etc. The pointer + // needed here defaults to + // the Null pointer, i.e. you + // will have to set it in + // actual instances of this + // object to make it useful. + SmartPointer > data; + + // Since we allow to use + // different refinement + // criteria (global + // refinement, refinement by + // the Kelly error indicator, + // possibly with a weight, + // and using the dual + // estimator), define a + // number of enumeration + // values, and subsequently a + // variable of that type. It + // will default to + // dual_weighted_error_estimator. + enum RefinementCriterion { + dual_weighted_error_estimator, + global_refinement, + kelly_indicator, + weighted_kelly_indicator + }; + + RefinementCriterion refinement_criterion; + + // Next, an object that + // describes the dual + // functional. It is only + // needed if the dual + // weighted residual + // refinement is chosen, and + // also defaults to a Null + // pointer. + SmartPointer > dual_functional; + + // Then a list of evaluation + // objects. Its default value + // is empty, i.e. no + // evaluation objects. + EvaluatorList evaluator_list; + + // Next to last, a function + // that is used as a weight + // to the + // RefinementWeightedKelly + // class. The default value + // of this pointer is zero, + // but you have to set it to + // some other value if you + // want to use the + // weighted_kelly_indicator + // refinement criterion. + SmartPointer > kelly_weight; + + // Finally, we have a + // variable that denotes the + // maximum number of degrees + // of freedom we allow for + // the (primal) + // discretization. If it is + // exceeded, we stop the + // process of solving and + // intermittend mesh + // refinement. Its default + // value is 20,000. + unsigned int max_degrees_of_freedom; + + // Finally the default + // constructor of this class: + ProblemDescription (); }; - // The driver framework class - // only has one method which - // calls solver and mesh - // refinement intermittently, and - // does some other small tasks in - // between. Since it does not - // need data besides the - // parameters given to it, we - // make it static: + // The driver framework class + // only has one method which + // calls solver and mesh + // refinement intermittently, and + // does some other small tasks in + // between. Since it does not + // need data besides the + // parameters given to it, we + // make it static: static void run (const ProblemDescription &descriptor); }; - // As for the implementation, first - // the constructor of the parameter - // object, setting all values to - // their defaults: + // As for the implementation, first + // the constructor of the parameter + // object, setting all values to + // their defaults: template Framework::ProblemDescription::ProblemDescription () - : - primal_fe_degree (1), - dual_fe_degree (2), - refinement_criterion (dual_weighted_error_estimator), - max_degrees_of_freedom (20000) + : + primal_fe_degree (1), + dual_fe_degree (2), + refinement_criterion (dual_weighted_error_estimator), + max_degrees_of_freedom (20000) {} - // Then the function which drives the - // whole process: + // Then the function which drives the + // whole process: template void Framework::run (const ProblemDescription &descriptor) { - // First create a triangulation - // from the given data object, + // First create a triangulation + // from the given data object, Triangulation triangulation (Triangulation::smoothing_on_refinement); descriptor.data->create_coarse_grid (triangulation); - // then a set of finite elements - // and appropriate quadrature - // formula: + // then a set of finite elements + // and appropriate quadrature + // formula: const FE_Q primal_fe(descriptor.primal_fe_degree); const FE_Q dual_fe(descriptor.dual_fe_degree); const QGauss quadrature(descriptor.dual_fe_degree+1); const QGauss face_quadrature(descriptor.dual_fe_degree+1); - // Next, select one of the classes - // implementing different - // refinement criteria. + // Next, select one of the classes + // implementing different + // refinement criteria. LaplaceSolver::Base * solver = 0; switch (descriptor.refinement_criterion) { - case ProblemDescription::dual_weighted_error_estimator: - { - solver - = new LaplaceSolver::WeightedResidual (triangulation, - primal_fe, - dual_fe, - quadrature, - face_quadrature, - descriptor.data->get_right_hand_side(), - descriptor.data->get_boundary_values(), - *descriptor.dual_functional); - break; - } - - case ProblemDescription::global_refinement: - { - solver - = new LaplaceSolver::RefinementGlobal (triangulation, - primal_fe, - quadrature, - face_quadrature, - descriptor.data->get_right_hand_side(), - descriptor.data->get_boundary_values()); - break; - } - - case ProblemDescription::kelly_indicator: - { - solver - = new LaplaceSolver::RefinementKelly (triangulation, - primal_fe, - quadrature, - face_quadrature, - descriptor.data->get_right_hand_side(), - descriptor.data->get_boundary_values()); - break; - } - - case ProblemDescription::weighted_kelly_indicator: - { - solver - = new LaplaceSolver::RefinementWeightedKelly (triangulation, - primal_fe, - quadrature, - face_quadrature, - descriptor.data->get_right_hand_side(), - descriptor.data->get_boundary_values(), - *descriptor.kelly_weight); - break; - } - - default: - AssertThrow (false, ExcInternalError()); + case ProblemDescription::dual_weighted_error_estimator: + { + solver + = new LaplaceSolver::WeightedResidual (triangulation, + primal_fe, + dual_fe, + quadrature, + face_quadrature, + descriptor.data->get_right_hand_side(), + descriptor.data->get_boundary_values(), + *descriptor.dual_functional); + break; + } + + case ProblemDescription::global_refinement: + { + solver + = new LaplaceSolver::RefinementGlobal (triangulation, + primal_fe, + quadrature, + face_quadrature, + descriptor.data->get_right_hand_side(), + descriptor.data->get_boundary_values()); + break; + } + + case ProblemDescription::kelly_indicator: + { + solver + = new LaplaceSolver::RefinementKelly (triangulation, + primal_fe, + quadrature, + face_quadrature, + descriptor.data->get_right_hand_side(), + descriptor.data->get_boundary_values()); + break; + } + + case ProblemDescription::weighted_kelly_indicator: + { + solver + = new LaplaceSolver::RefinementWeightedKelly (triangulation, + primal_fe, + quadrature, + face_quadrature, + descriptor.data->get_right_hand_side(), + descriptor.data->get_boundary_values(), + *descriptor.kelly_weight); + break; + } + + default: + AssertThrow (false, ExcInternalError()); } - // Now that all objects are in - // place, run the main loop. The - // stopping criterion is - // implemented at the bottom of the - // loop. - // - // In the loop, first set the new - // cycle number, then solve the - // problem, output its solution(s), - // apply the evaluation objects to - // it, then decide whether we want - // to refine the mesh further and - // solve again on this mesh, or - // jump out of the loop. + // Now that all objects are in + // place, run the main loop. The + // stopping criterion is + // implemented at the bottom of the + // loop. + // + // In the loop, first set the new + // cycle number, then solve the + // problem, output its solution(s), + // apply the evaluation objects to + // it, then decide whether we want + // to refine the mesh further and + // solve again on this mesh, or + // jump out of the loop. for (unsigned int step=0; true; ++step) { - std::cout << "Refinement cycle: " << step - << std::endl; + std::cout << "Refinement cycle: " << step + << std::endl; - solver->set_refinement_cycle (step); - solver->solve_problem (); - solver->output_solution (); + solver->set_refinement_cycle (step); + solver->solve_problem (); + solver->output_solution (); - std::cout << " Number of degrees of freedom=" - << solver->n_dofs() << std::endl; + std::cout << " Number of degrees of freedom=" + << solver->n_dofs() << std::endl; - for (typename EvaluatorList::const_iterator - e = descriptor.evaluator_list.begin(); - e != descriptor.evaluator_list.end(); ++e) - { - (*e)->set_refinement_cycle (step); - solver->postprocess (**e); - } + for (typename EvaluatorList::const_iterator + e = descriptor.evaluator_list.begin(); + e != descriptor.evaluator_list.end(); ++e) + { + (*e)->set_refinement_cycle (step); + solver->postprocess (**e); + } - if (solver->n_dofs() < descriptor.max_degrees_of_freedom) - solver->refine_grid (); - else - break; + if (solver->n_dofs() < descriptor.max_degrees_of_freedom) + solver->refine_grid (); + else + break; } - // After the loop has run, clean up - // the screen, and delete objects - // no more needed: + // After the loop has run, clean up + // the screen, and delete objects + // no more needed: std::cout << std::endl; delete solver; solver = 0; @@ -3927,17 +3927,17 @@ namespace Step14 - // @sect3{The main function} + // @sect3{The main function} - // Here finally comes the main - // function. It drives the whole - // process by specifying a set of - // parameters to be used for the - // simulation (polynomial degrees, - // evaluation and dual functionals, - // etc), and passes them packed into - // a structure to the frame work - // class above. + // Here finally comes the main + // function. It drives the whole + // process by specifying a set of + // parameters to be used for the + // simulation (polynomial degrees, + // evaluation and dual functionals, + // etc), and passes them packed into + // a structure to the frame work + // class above. int main () { try @@ -3946,128 +3946,128 @@ int main () using namespace Step14; deallog.depth_console (0); - // Describe the problem we want - // to solve here by passing a - // descriptor object to the - // function doing the rest of - // the work: + // Describe the problem we want + // to solve here by passing a + // descriptor object to the + // function doing the rest of + // the work: const unsigned int dim = 2; Framework::ProblemDescription descriptor; - // First set the refinement - // criterion we wish to use: + // First set the refinement + // criterion we wish to use: descriptor.refinement_criterion - = Framework::ProblemDescription::dual_weighted_error_estimator; - // Here, we could as well have - // used global_refinement - // or - // weighted_kelly_indicator. Note - // that the information given - // about dual finite elements, - // dual functional, etc is only - // important for the given - // choice of refinement - // criterion, and is ignored - // otherwise. - - // Then set the polynomial - // degrees of primal and dual - // problem. We choose here - // bi-linear and bi-quadratic - // ones: + = Framework::ProblemDescription::dual_weighted_error_estimator; + // Here, we could as well have + // used global_refinement + // or + // weighted_kelly_indicator. Note + // that the information given + // about dual finite elements, + // dual functional, etc is only + // important for the given + // choice of refinement + // criterion, and is ignored + // otherwise. + + // Then set the polynomial + // degrees of primal and dual + // problem. We choose here + // bi-linear and bi-quadratic + // ones: descriptor.primal_fe_degree = 1; descriptor.dual_fe_degree = 2; - // Then set the description of - // the test case, i.e. domain, - // boundary values, and right - // hand side. These are - // prepackaged in classes. We - // take here the description of - // Exercise_2_3, but you - // can also use - // CurvedRidges@: + // Then set the description of + // the test case, i.e. domain, + // boundary values, and right + // hand side. These are + // prepackaged in classes. We + // take here the description of + // Exercise_2_3, but you + // can also use + // CurvedRidges@: descriptor.data = new Data::SetUp,dim> (); - // Next set first a dual - // functional, then a list of - // evaluation objects. We - // choose as default the - // evaluation of the - // value at an - // evaluation point, - // represented by the classes - // PointValueEvaluation - // in the namespaces of - // evaluation and dual - // functional classes. You can - // also set the - // PointXDerivativeEvaluation - // classes for the x-derivative - // instead of the value - // at the evaluation point. - // - // Note that dual functional - // and evaluation objects - // should match. However, you - // can give as many evaluation - // functionals as you want, so - // you can have both point - // value and derivative - // evaluated after each step. - // One such additional - // evaluation is to output the - // grid in each step. + // Next set first a dual + // functional, then a list of + // evaluation objects. We + // choose as default the + // evaluation of the + // value at an + // evaluation point, + // represented by the classes + // PointValueEvaluation + // in the namespaces of + // evaluation and dual + // functional classes. You can + // also set the + // PointXDerivativeEvaluation + // classes for the x-derivative + // instead of the value + // at the evaluation point. + // + // Note that dual functional + // and evaluation objects + // should match. However, you + // can give as many evaluation + // functionals as you want, so + // you can have both point + // value and derivative + // evaluated after each step. + // One such additional + // evaluation is to output the + // grid in each step. const Point evaluation_point (0.75, 0.75); descriptor.dual_functional - = new DualFunctional::PointValueEvaluation (evaluation_point); + = new DualFunctional::PointValueEvaluation (evaluation_point); Evaluation::PointValueEvaluation - postprocessor1 (evaluation_point); + postprocessor1 (evaluation_point); Evaluation::GridOutput - postprocessor2 ("grid"); + postprocessor2 ("grid"); descriptor.evaluator_list.push_back (&postprocessor1); descriptor.evaluator_list.push_back (&postprocessor2); - // Set the maximal number of - // degrees of freedom after - // which we want the program to - // stop refining the mesh - // further: + // Set the maximal number of + // degrees of freedom after + // which we want the program to + // stop refining the mesh + // further: descriptor.max_degrees_of_freedom = 20000; - // Finally pass the descriptor - // object to a function that - // runs the entire solution - // with it: + // Finally pass the descriptor + // object to a function that + // runs the entire solution + // with it: Framework::run (descriptor); } - // Catch exceptions to give - // information about things that - // failed: + // Catch exceptions to give + // information about things that + // failed: catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-15/step-15.cc b/deal.II/examples/step-15/step-15.cc index bf86ce174f..6e4c008397 100644 --- a/deal.II/examples/step-15/step-15.cc +++ b/deal.II/examples/step-15/step-15.cc @@ -48,51 +48,51 @@ #include #include - // The last step is as in all - // previous programs: + // The last step is as in all + // previous programs: namespace Step15 { using namespace dealii; - // The first thing we have here is a helper - // function that computes an even power $|v|^n$ - // of a vector $v$, by evaluating - // $(v\cdot v)^{n/2}$. We need this in the - // computations below where we do not want to - // dwell on the fact that the gradient of the - // solution is actually a scalar in the 1d - // situation we consider in this program (in - // 1d, the gradient is a vector with a single - // element, which is easily extracted). Small - // tricks like this make it significantly - // simpler to later extend a program so that - // it also runs in higher space dimensions. - // - // While the implementation of the function - // is obvious, note the assertion at the - // beginning of the function body, which - // makes sure that the exponent is indeed an - // even number (here, we use that n/2 is - // computed in integer arithmetic, i.e. any - // remainder of the division is - // lost). ExcMessage is a pre-defined - // exception class that takes a string - // argument explaining what goes wrong. It is - // a simpler way to declare exceptions than - // the ones shown in step-9 and step-13/14 - // where we explicitly declared exception - // classes. However, by using a generic - // exception class, we lose the ability to - // attach additional information at run-time - // to the exception message, such as the - // value of the variable n. By following - // the way explained in above example - // programs, adding this feature is simple, - // though. + // The first thing we have here is a helper + // function that computes an even power $|v|^n$ + // of a vector $v$, by evaluating + // $(v\cdot v)^{n/2}$. We need this in the + // computations below where we do not want to + // dwell on the fact that the gradient of the + // solution is actually a scalar in the 1d + // situation we consider in this program (in + // 1d, the gradient is a vector with a single + // element, which is easily extracted). Small + // tricks like this make it significantly + // simpler to later extend a program so that + // it also runs in higher space dimensions. + // + // While the implementation of the function + // is obvious, note the assertion at the + // beginning of the function body, which + // makes sure that the exponent is indeed an + // even number (here, we use that n/2 is + // computed in integer arithmetic, i.e. any + // remainder of the division is + // lost). ExcMessage is a pre-defined + // exception class that takes a string + // argument explaining what goes wrong. It is + // a simpler way to declare exceptions than + // the ones shown in step-9 and step-13/14 + // where we explicitly declared exception + // classes. However, by using a generic + // exception class, we lose the ability to + // attach additional information at run-time + // to the exception message, such as the + // value of the variable n. By following + // the way explained in above example + // programs, adding this feature is simple, + // though. template inline double gradient_power (const Tensor<1,dim> &v, - const unsigned int n) + const unsigned int n) { Assert ((n/2)*2 == n, ExcMessage ("Value of 'n' must be even")); double p = 1; @@ -103,55 +103,55 @@ namespace Step15 - // Secondly, we declare a class that defines - // our initial values for the nonlinear - // iteration. It is a function object, - // i.e. it has a member operator that returns - // for a given point the value of the - // function. The value we return is a random - // perturbation of the $x^{1/3}$ function - // which we know is the optimal solution in a - // larger function space. To make things a - // little simpler on the optimizer, we return - // zero if the proposed random value is - // negative. - // - // Note that this class works strictly only - // for 1d. If the program is to be extended - // to higher space dimensions, so has to be - // this class. + // Secondly, we declare a class that defines + // our initial values for the nonlinear + // iteration. It is a function object, + // i.e. it has a member operator that returns + // for a given point the value of the + // function. The value we return is a random + // perturbation of the $x^{1/3}$ function + // which we know is the optimal solution in a + // larger function space. To make things a + // little simpler on the optimizer, we return + // zero if the proposed random value is + // negative. + // + // Note that this class works strictly only + // for 1d. If the program is to be extended + // to higher space dimensions, so has to be + // this class. class InitializationValues : public Function<1> { public: InitializationValues () : Function<1>() {} virtual double value (const Point<1> &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; - // So here comes the function that implements - // the function object. The base value is - // $x^{1/3}$, while random is a random - // number between -1 and 1 (note that - // rand() returns a random integer value - // between zero and RAND_MAX; to convert - // it to a floating point value between 0 and - // 2, we have to divide by RAND_MAX and - // multiply by two -- note that the first - // multiplication has to happen in floating - // point arithmetic, so that the division is - // done in non-truncating floating point mode - // as well; the final step is then to shift - // the interval [0,2] to [-1,1]). - // - // In a second step, we add the base value - // and a random value in [-0.1,0.1] together - // and return it, unless it is less than - // zero, in which case we take zero. + // So here comes the function that implements + // the function object. The base value is + // $x^{1/3}$, while random is a random + // number between -1 and 1 (note that + // rand() returns a random integer value + // between zero and RAND_MAX; to convert + // it to a floating point value between 0 and + // 2, we have to divide by RAND_MAX and + // multiply by two -- note that the first + // multiplication has to happen in floating + // point arithmetic, so that the division is + // done in non-truncating floating point mode + // as well; the final step is then to shift + // the interval [0,2] to [-1,1]). + // + // In a second step, we add the base value + // and a random value in [-0.1,0.1] together + // and return it, unless it is less than + // zero, in which case we take zero. double InitializationValues::value (const Point<1> &p, - const unsigned int) const + const unsigned int) const { const double base = std::pow(p(0), 1./3.); const double random = 2.*rand()/RAND_MAX-1; @@ -160,37 +160,37 @@ namespace Step15 - // Next is the declaration of the main - // class. As in most of the previous example - // programs, the public interface of the - // class consists only of a constructor and a - // run function that does the actual - // work. The constructor takes an additional - // argument that indicates the number of the - // run we are presently performing. This - // value is only used at the very end when we - // generate graphical output with a filename - // that matches this number. - // - // The private section of the class has the - // usual assortment of functions setting up - // the computations, doing one nonlinear - // step, refineming the mesh, doing a line - // search for step length computations, - // etc. The energy function computes the - // value of the optimization functional on an - // arbitrary finite element function with - // nodal values given on the DoFHandler - // given as an argument. Since it does not - // depend on the state of this object, we - // declare this function as static. - // - // The member variables of this class are - // what we have seen before, and the - // variables that characterize the linear - // system to be solved in the next nonlinear - // step, as well as the present approximation - // of the solution. + // Next is the declaration of the main + // class. As in most of the previous example + // programs, the public interface of the + // class consists only of a constructor and a + // run function that does the actual + // work. The constructor takes an additional + // argument that indicates the number of the + // run we are presently performing. This + // value is only used at the very end when we + // generate graphical output with a filename + // that matches this number. + // + // The private section of the class has the + // usual assortment of functions setting up + // the computations, doing one nonlinear + // step, refineming the mesh, doing a line + // search for step length computations, + // etc. The energy function computes the + // value of the optimization functional on an + // arbitrary finite element function with + // nodal values given on the DoFHandler + // given as an argument. Since it does not + // depend on the state of this object, we + // declare this function as static. + // + // The member variables of this class are + // what we have seen before, and the + // variables that characterize the linear + // system to be solved in the next nonlinear + // step, as well as the present approximation + // of the solution. template class MinimizationProblem { @@ -208,7 +208,7 @@ namespace Step15 void refine_grid (); static double energy (const DoFHandler &dof_handler, - const Vector &function); + const Vector &function); const unsigned int run_number; @@ -229,92 +229,92 @@ namespace Step15 - // The constructor of this class is actually - // somewhat boring: + // The constructor of this class is actually + // somewhat boring: template MinimizationProblem::MinimizationProblem (const unsigned int run_number) - : - run_number (run_number), - fe (1), - dof_handler (triangulation) + : + run_number (run_number), + fe (1), + dof_handler (triangulation) {} - // Then, here is the function that - // initializes the solution before the first - // non-linear iteration, by setting the - // initial values to the random function - // described above and making sure that the - // boundary values are set correctly. We will - // then only seek updates to this function - // with zero boundary values, so that the - // boundary values are always correct. - // - // Note how we have specialized this function - // to 1d only. We do this since the second - // part of the function, where we deal with - // boundary values, is only correct if we are - // in 1d. Not generating a general template - // for this function prevents the compiler - // from erroneously compiling this function - // for other space dimensions, then. + // Then, here is the function that + // initializes the solution before the first + // non-linear iteration, by setting the + // initial values to the random function + // described above and making sure that the + // boundary values are set correctly. We will + // then only seek updates to this function + // with zero boundary values, so that the + // boundary values are always correct. + // + // Note how we have specialized this function + // to 1d only. We do this since the second + // part of the function, where we deal with + // boundary values, is only correct if we are + // in 1d. Not generating a general template + // for this function prevents the compiler + // from erroneously compiling this function + // for other space dimensions, then. template <> void MinimizationProblem<1>::initialize_solution () { - // The first part is to assign the correct - // size to the vector, and use library - // function that takes a function object, - // and interpolates the given vector living - // on a DoFHandler to this function - // object: + // The first part is to assign the correct + // size to the vector, and use library + // function that takes a function object, + // and interpolates the given vector living + // on a DoFHandler to this function + // object: present_solution.reinit (dof_handler.n_dofs()); VectorTools::interpolate (dof_handler, - InitializationValues(), - present_solution); - - // Then we still have to make sure that we - // get the boundary values right. This - // could have been done inside the - // InitializationValues class, but it - // is instructive to see how it can also be - // done, in particular since it is so - // simple in 1d. First, start out with an - // arbitrary cell on level 0, i.e. the - // coarse mesh: + InitializationValues(), + present_solution); + + // Then we still have to make sure that we + // get the boundary values right. This + // could have been done inside the + // InitializationValues class, but it + // is instructive to see how it can also be + // done, in particular since it is so + // simple in 1d. First, start out with an + // arbitrary cell on level 0, i.e. the + // coarse mesh: DoFHandler<1>::cell_iterator cell; cell = dof_handler.begin(0); - // Then move as far to the left as - // possible. Note that while in two or more - // space dimensions, there is is no - // guarantee as to the coordinate - // directions of a given face number of a - // cell, in 1d the zeroth face (and - // neighbor) is always the one to the left, - // and the first one the one to the - // right. Similarly, the zeroth child is - // the left one, the first child is the - // right one. + // Then move as far to the left as + // possible. Note that while in two or more + // space dimensions, there is is no + // guarantee as to the coordinate + // directions of a given face number of a + // cell, in 1d the zeroth face (and + // neighbor) is always the one to the left, + // and the first one the one to the + // right. Similarly, the zeroth child is + // the left one, the first child is the + // right one. while (cell->at_boundary(0) == false) cell = cell->neighbor(0); - // Now that we are at the leftmost coarse - // grid cell, go recursively through its - // left children until we find a terminal - // one: + // Now that we are at the leftmost coarse + // grid cell, go recursively through its + // left children until we find a terminal + // one: while (cell->has_children() == true) cell = cell->child(0); - // Then set the value of the solution - // corresponding to the zeroth degree of - // freedom and the zeroth vertex of the - // cell to zero. Note that the zeroth - // vertex is the left one, and that zero is - // the only valid second argument to the - // call to vertex_dof_index, since we - // have a scalar finite element; thus, - // there is only a single component. + // Then set the value of the solution + // corresponding to the zeroth degree of + // freedom and the zeroth vertex of the + // cell to zero. Note that the zeroth + // vertex is the left one, and that zero is + // the only valid second argument to the + // call to vertex_dof_index, since we + // have a scalar finite element; thus, + // there is only a single component. present_solution(cell->vertex_dof_index(0,0)) = 0; - // Now do all the same with the right - // boundary value, and set it to one: + // Now do all the same with the right + // boundary value, and set it to one: cell = dof_handler.begin(0); while (cell->at_boundary(1) == false) cell = cell->neighbor(1); @@ -324,37 +324,37 @@ namespace Step15 } - // The function that prepares the member - // variables of this class for assembling the - // linear system in each nonlinear step is - // also not very interesting. This has all - // been shown before in previous example - // programs. Note, however, that all this - // works in 1d just as in any other space - // dimension, and would not require any - // changes if we were to use the program in - // another space dimension. - // - // Note that this function is only called - // when the mesh has been changed (or before - // the first nonlinear step). It only - // initializes the variables to their right - // sizes, but since these sizes don't change - // as long as we don't change the mesh, we - // can use them for more than just one - // nonlinear iteration without reinitializing - // them. + // The function that prepares the member + // variables of this class for assembling the + // linear system in each nonlinear step is + // also not very interesting. This has all + // been shown before in previous example + // programs. Note, however, that all this + // works in 1d just as in any other space + // dimension, and would not require any + // changes if we were to use the program in + // another space dimension. + // + // Note that this function is only called + // when the mesh has been changed (or before + // the first nonlinear step). It only + // initializes the variables to their right + // sizes, but since these sizes don't change + // as long as we don't change the mesh, we + // can use them for more than just one + // nonlinear iteration without reinitializing + // them. template void MinimizationProblem::setup_system_on_mesh () { hanging_node_constraints.clear (); DoFTools::make_hanging_node_constraints (dof_handler, - hanging_node_constraints); + hanging_node_constraints); hanging_node_constraints.close (); sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); hanging_node_constraints.condense (sparsity_pattern); @@ -364,61 +364,61 @@ namespace Step15 - // Next is the function that assembles the - // linear system. The first part, - // initializing various local variables is - // what we have been doing previously - // already. + // Next is the function that assembles the + // linear system. The first part, + // initializing various local variables is + // what we have been doing previously + // already. template void MinimizationProblem::assemble_step () { - // The first two lines of the function - // clear the matrix and right hand side - // values of their prior content, which - // could possibly still be there from the - // previous nonlinear step. + // The first two lines of the function + // clear the matrix and right hand side + // values of their prior content, which + // could possibly still be there from the + // previous nonlinear step. matrix.reinit (sparsity_pattern); residual.reinit (dof_handler.n_dofs()); - // Then we initialize a FEValues object - // with a 4-point Gauss quadrature - // formula. This object will be used to - // compute the values and gradients of the - // shape functions at the quadrature - // points, which we need to assemble the - // matrix and right hand side of the - // nonlinear step as outlined in the - // introduction to this example program. In - // order to compute values and gradients, - // we need to pass the update_values - // and update_gradients flags to the - // constructor, and the - // update_JxW_values flag for the - // Jacobian times the weight at a - // quadrature point. In addition, we need - // to have the coordinate values of each - // quadrature point in real space for the - // $x-u^3$ terms; to get these from the - // FEValues object, we need to pass it - // the update_quadrature_points flag. - // - // It is a simple calculation to figure out - // that for linear elements, the integrals - // in the right hand side semilinear form - // is a polynomial of sixth order. Thus, - // the appropriate quadrature formula is - // the one we have chosen here. + // Then we initialize a FEValues object + // with a 4-point Gauss quadrature + // formula. This object will be used to + // compute the values and gradients of the + // shape functions at the quadrature + // points, which we need to assemble the + // matrix and right hand side of the + // nonlinear step as outlined in the + // introduction to this example program. In + // order to compute values and gradients, + // we need to pass the update_values + // and update_gradients flags to the + // constructor, and the + // update_JxW_values flag for the + // Jacobian times the weight at a + // quadrature point. In addition, we need + // to have the coordinate values of each + // quadrature point in real space for the + // $x-u^3$ terms; to get these from the + // FEValues object, we need to pass it + // the update_quadrature_points flag. + // + // It is a simple calculation to figure out + // that for linear elements, the integrals + // in the right hand side semilinear form + // is a polynomial of sixth order. Thus, + // the appropriate quadrature formula is + // the one we have chosen here. QGauss quadrature_formula(4); FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); - - // Next, here are the usual two convenience - // variables, followed by declarations for - // the local contributions to matrix and - // right hand side, as well as an array to - // hold the indices of the local degrees of - // freedom on each cell: + update_values | update_gradients | + update_quadrature_points | update_JxW_values); + + // Next, here are the usual two convenience + // variables, followed by declarations for + // the local contributions to matrix and + // right hand side, as well as an array to + // hold the indices of the local degrees of + // freedom on each cell: const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -427,369 +427,369 @@ namespace Step15 std::vector local_dof_indices (dofs_per_cell); - // The next two variables are needed since - // the problem we consider is nonlinear, - // and thus the right hand side depends on - // the previous solution (in a Newton - // method, for example, the left hand side - // matrix would also depend on the previous - // solution, but as explained in the - // introduction, we only use a simple - // gradient-type method in which the matrix - // is a scaled Laplace-type matrix). In - // order to compute the values of the - // integrand for the right hand side, we - // therefore need to have the values and - // gradients of the previous solution at - // the quadrature points. We will get them - // from the FEValues object above, and - // will put them into the following two - // variables: + // The next two variables are needed since + // the problem we consider is nonlinear, + // and thus the right hand side depends on + // the previous solution (in a Newton + // method, for example, the left hand side + // matrix would also depend on the previous + // solution, but as explained in the + // introduction, we only use a simple + // gradient-type method in which the matrix + // is a scaled Laplace-type matrix). In + // order to compute the values of the + // integrand for the right hand side, we + // therefore need to have the values and + // gradients of the previous solution at + // the quadrature points. We will get them + // from the FEValues object above, and + // will put them into the following two + // variables: std::vector local_solution_values (n_q_points); std::vector > local_solution_grads (n_q_points); - // Now, here comes the main loop over all - // the cells of the mesh: + // Now, here comes the main loop over all + // the cells of the mesh: typename DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), endc = dof_handler.end(); for (; cell!=endc; ++cell) { - // First, clear the objects that hold - // the local matrix and right hand side - // contributions for this cell: - cell_matrix = 0; - cell_rhs = 0; - - // Then initialize the values and - // gradients of the shape functions at - // the quadrature points of this cell: - fe_values.reinit (cell); - - // And get the values and gradients of - // the previous solution at the - // quadrature points. To get them, we - // don't actually have to do much, - // except for giving the FEValues - // object the global node vector from - // which to compute this data, and a - // reference to the objects into which - // to put them. After the calls, the - // local_solution_values and - // local_solution_values variables - // will contain values and gradients - // for each of the quadrature points on - // this cell. - fe_values.get_function_values (present_solution, - local_solution_values); - fe_values.get_function_grads (present_solution, - local_solution_grads); - - // Then loop over all quadrature - // points: - for (unsigned int q_point=0; q_point u_prime = local_solution_grads[q_point]; - - // Then do the double loop over all - // shape functions to compute the - // local contribution to the - // matrix. The terms are simple - // equivalents of the formula - // stated in the introduction. Note - // how we extract the size of an - // element from the iterator to the - // present cell: - for (unsigned int i=0; idiameter() * - cell->diameter() - + - fe_values.shape_value(i,q_point) * - fe_values.shape_value(j,q_point)) * - fe_values.JxW(q_point); - - // And here comes the loop over all - // local degrees of freedom to form - // the right hand side. The formula - // looks a little convoluted, but - // is again a simple image of what - // was given in the introduction: - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - for (unsigned int i=0; iFEValues + // object the global node vector from + // which to compute this data, and a + // reference to the objects into which + // to put them. After the calls, the + // local_solution_values and + // local_solution_values variables + // will contain values and gradients + // for each of the quadrature points on + // this cell. + fe_values.get_function_values (present_solution, + local_solution_values); + fe_values.get_function_grads (present_solution, + local_solution_grads); + + // Then loop over all quadrature + // points: + for (unsigned int q_point=0; q_point u_prime = local_solution_grads[q_point]; + + // Then do the double loop over all + // shape functions to compute the + // local contribution to the + // matrix. The terms are simple + // equivalents of the formula + // stated in the introduction. Note + // how we extract the size of an + // element from the iterator to the + // present cell: + for (unsigned int i=0; idiameter() * + cell->diameter() + + + fe_values.shape_value(i,q_point) * + fe_values.shape_value(j,q_point)) * + fe_values.JxW(q_point); + + // And here comes the loop over all + // local degrees of freedom to form + // the right hand side. The formula + // looks a little convoluted, but + // is again a simple image of what + // was given in the introduction: + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + for (unsigned int i=0; iif statement - // in front of the second function call. - // - // Note that we need zero boundary - // conditions on both ends, since the space - // in which search for the solution has - // fixed boundary conditions zero and one, - // and we have set the initial values to - // already satisfy them. Thus, the updates - // computed in each nonlinear step must - // have zero boundary values. + // %Boundary values are, too, but with a + // twist this time: in all previous example + // programs, we have used that by default + // (i.e. unless something else is set), all + // boundaries have indicator zero. To + // figure out what boundary indicator a + // face of a cell had, the library + // functions would query an iterator + // designating this face, which would in + // turn pluck out this value from some of + // the data structures in the + // library. Unfortunately, in 1d cells have + // no faces: these would only be points, + // and we don't associated anything in the + // library with points except for their + // coordinates. Thus there are no face + // iterators, and no way to figure out + // which boundary indicator it may have. On + // the other hand, in 1d, there can only be + // two boundaries anyway for a connected + // domain: the left end point and the right + // end point. And in contrast to the case + // in higher dimensions, where the + // (changeable) default is zero for all + // boundary parts, in 1d the convention is + // that the left boundary point has + // indicator zero, while the right boundary + // point has indicator one. Since there are + // no face iterators, it is also not + // possible to change this, but you will + // hardly ever have to. So in order to + // assign zero boundary values on both + // sides, in 1d we not only need to + // evaluate boundary values for indicator + // zero, but also for indicator one. If + // this program is ever going to be run in + // higher dimensions, then we should only + // evaluate for indicator zero, which is + // why we have placed the if statement + // in front of the second function call. + // + // Note that we need zero boundary + // conditions on both ends, since the space + // in which search for the solution has + // fixed boundary conditions zero and one, + // and we have set the initial values to + // already satisfy them. Thus, the updates + // computed in each nonlinear step must + // have zero boundary values. std::map boundary_values; VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction(), - boundary_values); + 0, + ZeroFunction(), + boundary_values); if (dim == 1) VectorTools::interpolate_boundary_values (dof_handler, - 1, - ZeroFunction(), - boundary_values); + 1, + ZeroFunction(), + boundary_values); Vector dummy (residual.size()); MatrixTools::apply_boundary_values (boundary_values, - matrix, - dummy, - residual); + matrix, + dummy, + residual); } - // Once we have a search (update) direction, - // we need to figure out how far to go in - // this direction. This is what line search - // is good for, and this function does - // exactly this: compute and return the - // length of the update step. - // - // Since we already know the direction, we - // only have to solve the one-dimensional - // problem of minimizing the energy along - // this direction. Note, however, that in - // general we do not have the gradient of the - // energy functional in this direction, so we - // have to approximate it (and the second - // derivatives) using finite differences. - // - // In most applications, it is sufficient to - // find an approximate minimizer of this - // one-dimensional problem, or even just a - // point that may not be a minimizer but - // instead just satisfies a few conditions - // like those of Armijo and Goldstein. The - // rational for this is generally that - // evaluating the objective function too - // often is too expensive. However, here, we - // are a little more lenient, since the - // overall run-time is dominated by inverting - // the system matrix in each nonlinear - // step. Thus, we will do this minimization - // by using a fixed number of five Newton - // steps in this one-dimensional problem, and - // using a bisection algorithm as a substep - // in it. - // - // As is quite common in step length - // procedures, this function contains a fair - // number of heuristics and strategies that - // might not be obvious at first. Step length - // determination is notorious for its - // complications, and this implementation is - // not an exception. Note that if one tries - // to omit the special-casing, then one - // oftentimes encounters situations where the - // found step length is really not very good. + // Once we have a search (update) direction, + // we need to figure out how far to go in + // this direction. This is what line search + // is good for, and this function does + // exactly this: compute and return the + // length of the update step. + // + // Since we already know the direction, we + // only have to solve the one-dimensional + // problem of minimizing the energy along + // this direction. Note, however, that in + // general we do not have the gradient of the + // energy functional in this direction, so we + // have to approximate it (and the second + // derivatives) using finite differences. + // + // In most applications, it is sufficient to + // find an approximate minimizer of this + // one-dimensional problem, or even just a + // point that may not be a minimizer but + // instead just satisfies a few conditions + // like those of Armijo and Goldstein. The + // rational for this is generally that + // evaluating the objective function too + // often is too expensive. However, here, we + // are a little more lenient, since the + // overall run-time is dominated by inverting + // the system matrix in each nonlinear + // step. Thus, we will do this minimization + // by using a fixed number of five Newton + // steps in this one-dimensional problem, and + // using a bisection algorithm as a substep + // in it. + // + // As is quite common in step length + // procedures, this function contains a fair + // number of heuristics and strategies that + // might not be obvious at first. Step length + // determination is notorious for its + // complications, and this implementation is + // not an exception. Note that if one tries + // to omit the special-casing, then one + // oftentimes encounters situations where the + // found step length is really not very good. template double MinimizationProblem::line_search (const Vector &update) const { - // Start out with a zero step length: + // Start out with a zero step length: double alpha = 0.; Vector tmp (present_solution.size()); - // Then do at most five Newton steps: + // Then do at most five Newton steps: for (unsigned int step=0; step<5; ++step) { - // At the present location, which is - // present_solution+alpha*update, - // evaluate the energy - tmp = present_solution; - tmp.add (alpha, update); - const double f_a = energy (dof_handler, tmp); - - // Then determine a finite difference - // step length dalpha, and also - // evaluate the energy functional at - // positions alpha+dalpha and - // alpha-dalpha along the search - // direction: - const double dalpha = (alpha != 0 ? alpha/100 : 0.01); - - tmp = present_solution; - tmp.add (alpha+dalpha, update); - const double f_a_plus = energy (dof_handler, tmp); - - tmp = present_solution; - tmp.add (alpha-dalpha, update); - const double f_a_minus = energy (dof_handler, tmp); - - // From these three data points, we can - // compute a finite difference - // approximation of the first and - // second derivatives: - const double f_a_prime = (f_a_plus-f_a_minus) / (2*dalpha); - const double f_a_doubleprime = ((f_a_plus-2*f_a+f_a_minus) / - (dalpha*dalpha)); - - // If the gradient is (relative to the - // energy value) too small, then this - // means that we have found a minimum - // of the energy functional along the - // search direction. In this case, - // abort here and return the found step - // length value: - if (std::fabs(f_a_prime) < 1e-7*std::fabs(f_a)) - break; - - // Alternatively, also abort if the - // curvature is too small, because we - // can't compute a Newton step - // then. This is somewhat - // unsatisfactory, since we are not at - // a minimum, and can certainly be - // improved. There are a number of - // other strategies for this case, - // which we leave for interested - // readers: - if (std::fabs(f_a_doubleprime) < 1e-7*std::fabs(f_a_prime)) - break; - - // Then compute the Newton step as the - // negative of the inverse Hessian - // applied to the gradient. - double step_length = -f_a_prime / f_a_doubleprime; - - // And do a number of correcting steps: - // if the energy at the predicted new - // position would be larger than at the - // present position, then halve the - // step length and try again. If this - // does not help after three such - // cycles, then simply give up and use - // the value we have. - for (unsigned int i=0; i<3; ++i) - { - tmp = present_solution; - tmp.add (alpha+step_length, update); - const double e = energy (dof_handler, tmp); - - if (e >= f_a) - step_length /= 2; - else - break; - } - - // After all this, update alpha and go - // on to the next Newton step. - alpha += step_length; + // At the present location, which is + // present_solution+alpha*update, + // evaluate the energy + tmp = present_solution; + tmp.add (alpha, update); + const double f_a = energy (dof_handler, tmp); + + // Then determine a finite difference + // step length dalpha, and also + // evaluate the energy functional at + // positions alpha+dalpha and + // alpha-dalpha along the search + // direction: + const double dalpha = (alpha != 0 ? alpha/100 : 0.01); + + tmp = present_solution; + tmp.add (alpha+dalpha, update); + const double f_a_plus = energy (dof_handler, tmp); + + tmp = present_solution; + tmp.add (alpha-dalpha, update); + const double f_a_minus = energy (dof_handler, tmp); + + // From these three data points, we can + // compute a finite difference + // approximation of the first and + // second derivatives: + const double f_a_prime = (f_a_plus-f_a_minus) / (2*dalpha); + const double f_a_doubleprime = ((f_a_plus-2*f_a+f_a_minus) / + (dalpha*dalpha)); + + // If the gradient is (relative to the + // energy value) too small, then this + // means that we have found a minimum + // of the energy functional along the + // search direction. In this case, + // abort here and return the found step + // length value: + if (std::fabs(f_a_prime) < 1e-7*std::fabs(f_a)) + break; + + // Alternatively, also abort if the + // curvature is too small, because we + // can't compute a Newton step + // then. This is somewhat + // unsatisfactory, since we are not at + // a minimum, and can certainly be + // improved. There are a number of + // other strategies for this case, + // which we leave for interested + // readers: + if (std::fabs(f_a_doubleprime) < 1e-7*std::fabs(f_a_prime)) + break; + + // Then compute the Newton step as the + // negative of the inverse Hessian + // applied to the gradient. + double step_length = -f_a_prime / f_a_doubleprime; + + // And do a number of correcting steps: + // if the energy at the predicted new + // position would be larger than at the + // present position, then halve the + // step length and try again. If this + // does not help after three such + // cycles, then simply give up and use + // the value we have. + for (unsigned int i=0; i<3; ++i) + { + tmp = present_solution; + tmp.add (alpha+step_length, update); + const double e = energy (dof_handler, tmp); + + if (e >= f_a) + step_length /= 2; + else + break; + } + + // After all this, update alpha and go + // on to the next Newton step. + alpha += step_length; } - // Finally, return with the computed step length. + // Finally, return with the computed step length. return alpha; } - // The next function is again a rather boring - // one: it does one nonlinear step, by - // calling the function that assembles the - // linear system, then solving it, computing - // a step length, and finally updating the - // solution vector. This should all be mostly - // self-explanatory, given that we have shown - // the solution of a linear system before. + // The next function is again a rather boring + // one: it does one nonlinear step, by + // calling the function that assembles the + // linear system, then solving it, computing + // a step length, and finally updating the + // solution vector. This should all be mostly + // self-explanatory, given that we have shown + // the solution of a linear system before. template void MinimizationProblem::do_step () { @@ -798,14 +798,14 @@ namespace Step15 Vector update (present_solution.size()); { SolverControl solver_control (residual.size(), - 1e-2*residual.l2_norm()); + 1e-2*residual.l2_norm()); SolverCG<> solver (solver_control); PreconditionSSOR<> preconditioner; preconditioner.initialize(matrix); solver.solve (matrix, update, residual, - preconditioner); + preconditioner); hanging_node_constraints.distribute (update); } @@ -815,11 +815,11 @@ namespace Step15 - // The same holds for the function that - // outputs the solution in gnuplot format - // into a file with a name that includes the - // number of the run we are presently - // performing. + // The same holds for the function that + // outputs the solution in gnuplot format + // into a file with a name that includes the + // number of the run we are presently + // performing. template void MinimizationProblem::output_results () const @@ -831,9 +831,9 @@ namespace Step15 std::ostringstream filename; filename << "solution-" - << run_number - << ".gnuplot" - << std::ends; + << run_number + << ".gnuplot" + << std::ends; std::ofstream out (filename.str().c_str()); data_out.write_gnuplot (out); @@ -841,21 +841,21 @@ namespace Step15 - // The function to compute error indicator - // and refine the mesh accordingly is a - // little more interesting. In particular, it - // shows some more of the techniques usually - // used in 1d applications. First, note that - // this again is a specialization that only - // works in 1d. However, to make later - // extension to higher space dimensions - // simpler, we define a constant integer - // dim at the beginning of the function; - // by using this constant as template - // argument in all places, we are actually - // able to write most of the code as if it - // were dimension independent, thus - // minimizing the amount of later changes. + // The function to compute error indicator + // and refine the mesh accordingly is a + // little more interesting. In particular, it + // shows some more of the techniques usually + // used in 1d applications. First, note that + // this again is a specialization that only + // works in 1d. However, to make later + // extension to higher space dimensions + // simpler, we define a constant integer + // dim at the beginning of the function; + // by using this constant as template + // argument in all places, we are actually + // able to write most of the code as if it + // were dimension independent, thus + // minimizing the amount of later changes. template <> void MinimizationProblem<1>::refine_grid () { @@ -863,411 +863,411 @@ namespace Step15 Vector error_indicators (triangulation.n_active_cells()); - // Then define the quadrature formula, and - // what values we will want to extract from - // the solution. Here, we use the two-point - // trapezoidal rule, i.e. we evaluate the - // residual only at the end points of the - // cells. Incidentally, this also makes - // evaluating the jump terms between cells - // simpler. Note that for the error - // indicators, we not only need values and - // gradients of the solution, but also its - // second derivatives, as well as the - // physical location of quadrature points. + // Then define the quadrature formula, and + // what values we will want to extract from + // the solution. Here, we use the two-point + // trapezoidal rule, i.e. we evaluate the + // residual only at the end points of the + // cells. Incidentally, this also makes + // evaluating the jump terms between cells + // simpler. Note that for the error + // indicators, we not only need values and + // gradients of the solution, but also its + // second derivatives, as well as the + // physical location of quadrature points. QTrapez quadrature; FEValues fe_values (fe, quadrature, - update_values | update_gradients | - update_hessians | - update_quadrature_points | update_JxW_values); - - // The error indicator formula presented in - // the introduction requires us to compute - // jumps of the solution and gradient - // across cell boundaries. Since the - // solution itself is continuous, we only - // need to evaluate the gradient on the - // neighbor cells. To avoid some of the - // work needed to reinitialize a - // FEValues object on a cell, we define - // another such object here that we will - // only use for the neighbor cells. The - // data we need from the side of the - // present cell is provided by above - // object. + update_values | update_gradients | + update_hessians | + update_quadrature_points | update_JxW_values); + + // The error indicator formula presented in + // the introduction requires us to compute + // jumps of the solution and gradient + // across cell boundaries. Since the + // solution itself is continuous, we only + // need to evaluate the gradient on the + // neighbor cells. To avoid some of the + // work needed to reinitialize a + // FEValues object on a cell, we define + // another such object here that we will + // only use for the neighbor cells. The + // data we need from the side of the + // present cell is provided by above + // object. FEValues neighbor_fe_values (fe, quadrature, - update_gradients); + update_gradients); - // Then, as before, we need objects holding - // values and derivatives of the solution - // at quadrature points. Here, we also need - // second derivatives, which is simple, - // however: + // Then, as before, we need objects holding + // values and derivatives of the solution + // at quadrature points. Here, we also need + // second derivatives, which is simple, + // however: std::vector local_values (quadrature.size()); std::vector > local_gradients (quadrature.size()); std::vector > local_2nd_derivs (quadrature.size()); - // With all this, we can start the loop - // over all cells. Since we need to write - // the result for each cell into - // consecutive elements of a vector, we - // also keep a running index cell_index - // that we increase with each cell treated. + // With all this, we can start the loop + // over all cells. Since we need to write + // the result for each cell into + // consecutive elements of a vector, we + // also keep a running index cell_index + // that we increase with each cell treated. DoFHandler::active_cell_iterator cell = dof_handler.begin_active (), endc = dof_handler.end (); for (unsigned int cell_index = 0; cell!=endc; ++cell, ++cell_index) { - // After initializing the FEValues - // object on each cell, use it to - // evaluate solution and first and - // second derivatives of it at the - // quadrature points: - fe_values.reinit (cell); - fe_values.get_function_values (present_solution, local_values); - fe_values.get_function_grads (present_solution, local_gradients); - fe_values.get_function_2nd_derivatives (present_solution, local_2nd_derivs); - - // Given the formula in the - // introduction, the computation of the - // cell residuals should actually be - // relatively obvious. The result, - // multiplied by the appropriate power - // of the cell's size is then written - // into the vector of error indicators. - // - // Note that in the following - // computations, we have already made - // use of the fact that we are in 1d, - // since we extract the gradient as a - // scalar value. - double cell_residual_norm = 0; - for (unsigned int q=0; qdiameter() * cell->diameter(); - - // The next step is to evaluate the - // jump terms. To make computations - // somewhat simpler (and to free up the - // local_* variables for use on - // neighboring elements), we define - // some convenience variables for the - // positions of the left and right cell - // boundary point, as well as the - // values and gradients at these - // points. - // - // To be cautious, we don't blindly - // trust that the trapezoidal rule has - // its evaluation points as the left - // and right end point of the cell (it - // could in principle have them in the - // reverse order, i.e. the zeroth point - // is at x=1, and the first one at - // x=0), and use an assertion to - // actually check for this. If this - // would not be the case, an exception - // of the (predefined) class - // ExcInternalError would be - // thrown. Of course, this does not - // happen in this program, but it shows - // a way of defensive coding: if you - // are not sure of an assumption, guard - // it by a test. This also guards us - // against possible future changes in - // the library: the quadrature classes - // do not promise any particular order - // of their quadrature points, so the - // QTrapez class could in principle - // change the order of its two - // evaluation points. In that case, - // your code would tell you that - // something changed, rather than - // computing a wrong result when you - // upgrade to a new version of the - // library. (The point made here is - // theoretical: we are not going to - // change the order of evaluation - // points; the intent is simply how to - // add some defensive touches to a - // program that make sure that it - // really does what it is hoped to do.) - // - // Given that we are now sure that - // x_left and x_right, - // extracted from the zeroth and first - // quadrature point, are indeed the - // left and right vertex of the cell, - // we can also be sure that the values - // we extract for u_left et al. are - // the ones we expect them to be, since - // the order of these values must of - // course match the order of the - // quadrature points. - const double x_left = fe_values.quadrature_point(0)[0]; - const double x_right = fe_values.quadrature_point(1)[0]; - - Assert (x_left == cell->vertex(0)[0], ExcInternalError()); - Assert (x_right == cell->vertex(1)[0], ExcInternalError()); - - const double u_left = local_values[0]; - const double u_right = local_values[1]; - - const double u_prime_left = local_gradients[0][0]; - const double u_prime_right = local_gradients[1][0]; - - // Next, we have to check whether this - // cell has a left neighbor: - if (cell->at_boundary(0) == false) - { - // If so, find its left - // neighbor. We do so by asking for - // the cell that is immediately - // adjacent to the left (the zeroth - // neighbor in 1d). However, this - // may be a cell that in itself has - // children, so to get to the - // active left neighbor, we have to - // recursively check whether that - // cell has children, and if so - // take its right child, since that - // is adjacent to the left of the - // present cell. Note that unless - // you are in 1d, there is no safe - // way to assume that the first - // child of the zeroth neighbor is - // indeed adjacent to the present - // cell. Rather, more than one of - // the children of a neighbor may - // be adjacent to the present - // cell. Also note that in two or - // higher space dimensions, a - // neighbor of an active cell may - // only be at most once refined, - // since we have the rule that - // there can only be one hanging - // node per face. This rule does - // not exist in 1d: neighboring - // cells may have totally - // independent refinement - // levels. Thus, we really need the - // while loop, not only an - // if clause. - DoFHandler::cell_iterator left_neighbor = cell->neighbor(0); - while (left_neighbor->has_children()) - left_neighbor = left_neighbor->child(1); - - // With the so-found neighbor, - // initialize the second - // FEValues object to it, - // extract the gradients of the - // solution there, and from this - // get the gradient at the - // interface (this is the first - // element of local_gradients, - // since the right end point of the - // neighbor cell has index 1) as a - // scalar value (this is the zeroth - // component of - // local_gradients[1]. - neighbor_fe_values.reinit (left_neighbor); - neighbor_fe_values.get_function_grads (present_solution, local_gradients); - - const double neighbor_u_prime_left = local_gradients[1][0]; - - // Then compute the jump, and add a - // suitable multiple to the error - // indicator for this cell: - const double left_jump = std::pow(x_left-std::pow(u_left,3), 2) * - (std::pow(neighbor_u_prime_left,5) - - std::pow(u_prime_left,5)); - error_indicators(cell_index) += left_jump * left_jump * - cell->diameter(); - } - - // Once we have done the left neighbor, - // we can play exactly the same game - // with the right neighbor: - if (cell->at_boundary(1) == false) - { - DoFHandler::cell_iterator right_neighbor = cell->neighbor(1); - while (right_neighbor->has_children()) - right_neighbor = right_neighbor->child(0); - - neighbor_fe_values.reinit (right_neighbor); - neighbor_fe_values.get_function_grads (present_solution, local_gradients); - - const double neighbor_u_prime_right = local_gradients[0][0]; - - const double right_jump = std::pow(x_right-std::pow(u_right,3), 2) * - (std::pow(neighbor_u_prime_right,5) - - std::pow(u_prime_right,5)); - error_indicators(cell_index) += right_jump * right_jump * - cell->diameter(); - } + // After initializing the FEValues + // object on each cell, use it to + // evaluate solution and first and + // second derivatives of it at the + // quadrature points: + fe_values.reinit (cell); + fe_values.get_function_values (present_solution, local_values); + fe_values.get_function_grads (present_solution, local_gradients); + fe_values.get_function_2nd_derivatives (present_solution, local_2nd_derivs); + + // Given the formula in the + // introduction, the computation of the + // cell residuals should actually be + // relatively obvious. The result, + // multiplied by the appropriate power + // of the cell's size is then written + // into the vector of error indicators. + // + // Note that in the following + // computations, we have already made + // use of the fact that we are in 1d, + // since we extract the gradient as a + // scalar value. + double cell_residual_norm = 0; + for (unsigned int q=0; qdiameter() * cell->diameter(); + + // The next step is to evaluate the + // jump terms. To make computations + // somewhat simpler (and to free up the + // local_* variables for use on + // neighboring elements), we define + // some convenience variables for the + // positions of the left and right cell + // boundary point, as well as the + // values and gradients at these + // points. + // + // To be cautious, we don't blindly + // trust that the trapezoidal rule has + // its evaluation points as the left + // and right end point of the cell (it + // could in principle have them in the + // reverse order, i.e. the zeroth point + // is at x=1, and the first one at + // x=0), and use an assertion to + // actually check for this. If this + // would not be the case, an exception + // of the (predefined) class + // ExcInternalError would be + // thrown. Of course, this does not + // happen in this program, but it shows + // a way of defensive coding: if you + // are not sure of an assumption, guard + // it by a test. This also guards us + // against possible future changes in + // the library: the quadrature classes + // do not promise any particular order + // of their quadrature points, so the + // QTrapez class could in principle + // change the order of its two + // evaluation points. In that case, + // your code would tell you that + // something changed, rather than + // computing a wrong result when you + // upgrade to a new version of the + // library. (The point made here is + // theoretical: we are not going to + // change the order of evaluation + // points; the intent is simply how to + // add some defensive touches to a + // program that make sure that it + // really does what it is hoped to do.) + // + // Given that we are now sure that + // x_left and x_right, + // extracted from the zeroth and first + // quadrature point, are indeed the + // left and right vertex of the cell, + // we can also be sure that the values + // we extract for u_left et al. are + // the ones we expect them to be, since + // the order of these values must of + // course match the order of the + // quadrature points. + const double x_left = fe_values.quadrature_point(0)[0]; + const double x_right = fe_values.quadrature_point(1)[0]; + + Assert (x_left == cell->vertex(0)[0], ExcInternalError()); + Assert (x_right == cell->vertex(1)[0], ExcInternalError()); + + const double u_left = local_values[0]; + const double u_right = local_values[1]; + + const double u_prime_left = local_gradients[0][0]; + const double u_prime_right = local_gradients[1][0]; + + // Next, we have to check whether this + // cell has a left neighbor: + if (cell->at_boundary(0) == false) + { + // If so, find its left + // neighbor. We do so by asking for + // the cell that is immediately + // adjacent to the left (the zeroth + // neighbor in 1d). However, this + // may be a cell that in itself has + // children, so to get to the + // active left neighbor, we have to + // recursively check whether that + // cell has children, and if so + // take its right child, since that + // is adjacent to the left of the + // present cell. Note that unless + // you are in 1d, there is no safe + // way to assume that the first + // child of the zeroth neighbor is + // indeed adjacent to the present + // cell. Rather, more than one of + // the children of a neighbor may + // be adjacent to the present + // cell. Also note that in two or + // higher space dimensions, a + // neighbor of an active cell may + // only be at most once refined, + // since we have the rule that + // there can only be one hanging + // node per face. This rule does + // not exist in 1d: neighboring + // cells may have totally + // independent refinement + // levels. Thus, we really need the + // while loop, not only an + // if clause. + DoFHandler::cell_iterator left_neighbor = cell->neighbor(0); + while (left_neighbor->has_children()) + left_neighbor = left_neighbor->child(1); + + // With the so-found neighbor, + // initialize the second + // FEValues object to it, + // extract the gradients of the + // solution there, and from this + // get the gradient at the + // interface (this is the first + // element of local_gradients, + // since the right end point of the + // neighbor cell has index 1) as a + // scalar value (this is the zeroth + // component of + // local_gradients[1]. + neighbor_fe_values.reinit (left_neighbor); + neighbor_fe_values.get_function_grads (present_solution, local_gradients); + + const double neighbor_u_prime_left = local_gradients[1][0]; + + // Then compute the jump, and add a + // suitable multiple to the error + // indicator for this cell: + const double left_jump = std::pow(x_left-std::pow(u_left,3), 2) * + (std::pow(neighbor_u_prime_left,5) - + std::pow(u_prime_left,5)); + error_indicators(cell_index) += left_jump * left_jump * + cell->diameter(); + } + + // Once we have done the left neighbor, + // we can play exactly the same game + // with the right neighbor: + if (cell->at_boundary(1) == false) + { + DoFHandler::cell_iterator right_neighbor = cell->neighbor(1); + while (right_neighbor->has_children()) + right_neighbor = right_neighbor->child(0); + + neighbor_fe_values.reinit (right_neighbor); + neighbor_fe_values.get_function_grads (present_solution, local_gradients); + + const double neighbor_u_prime_right = local_gradients[0][0]; + + const double right_jump = std::pow(x_right-std::pow(u_right,3), 2) * + (std::pow(neighbor_u_prime_right,5) - + std::pow(u_prime_right,5)); + error_indicators(cell_index) += right_jump * right_jump * + cell->diameter(); + } } - // Now we have all the refinement - // indicators computed, and want to refine - // the grid. In contrast to previous - // examples, however, we would like to - // transfer the solution vector from the - // old to the new grid. This is what the - // SolutionTransfer class is good for, - // but it requires some preliminary - // work. First, we need to tag the cells - // that we want to refine or coarsen, as - // usual: + // Now we have all the refinement + // indicators computed, and want to refine + // the grid. In contrast to previous + // examples, however, we would like to + // transfer the solution vector from the + // old to the new grid. This is what the + // SolutionTransfer class is good for, + // but it requires some preliminary + // work. First, we need to tag the cells + // that we want to refine or coarsen, as + // usual: GridRefinement::refine_and_coarsen_fixed_number (triangulation, - error_indicators, - 0.3, 0.03); - // Then, however, we need an additional - // step: if, for example, you flag a cell - // that is once more refined than its - // neighbor, and that neighbor is not - // flagged for refinement, we would end up - // with a jump of two refinement levels - // across a cell interface. In 1d, this - // would in general be allowed, but not in - // higher space dimensions, and some mesh - // smoothing algorithms in 1d may also - // disallow this. To avoid these - // situations, the library will silently - // also have to refine the neighbor cell - // once. It does so by calling the - // Triangulation::prepare_coarsening_and_refinement - // function before actually doing the - // refinement and coarsening. This function - // flags a set of additional cells for - // refinement or coarsening, to enforce - // rules like the one-hanging-node - // rule. The cells that are flagged for - // refinement and coarsening after calling - // this function are exactly the ones that - // will actually be refined or - // coarsened. Since the - // SolutionTransfer class needs this - // information in order to store the data - // from the old mesh and transfer to the - // new one. + error_indicators, + 0.3, 0.03); + // Then, however, we need an additional + // step: if, for example, you flag a cell + // that is once more refined than its + // neighbor, and that neighbor is not + // flagged for refinement, we would end up + // with a jump of two refinement levels + // across a cell interface. In 1d, this + // would in general be allowed, but not in + // higher space dimensions, and some mesh + // smoothing algorithms in 1d may also + // disallow this. To avoid these + // situations, the library will silently + // also have to refine the neighbor cell + // once. It does so by calling the + // Triangulation::prepare_coarsening_and_refinement + // function before actually doing the + // refinement and coarsening. This function + // flags a set of additional cells for + // refinement or coarsening, to enforce + // rules like the one-hanging-node + // rule. The cells that are flagged for + // refinement and coarsening after calling + // this function are exactly the ones that + // will actually be refined or + // coarsened. Since the + // SolutionTransfer class needs this + // information in order to store the data + // from the old mesh and transfer to the + // new one. triangulation.prepare_coarsening_and_refinement(); - // With this out of the way, we initialize - // a SolutionTransfer object with the - // present DoFHandler and attach the - // solution vector to it: + // With this out of the way, we initialize + // a SolutionTransfer object with the + // present DoFHandler and attach the + // solution vector to it: SolutionTransfer solution_transfer(dof_handler); solution_transfer.prepare_for_coarsening_and_refinement (present_solution); - // Then we do the actual refinement, and - // distribute degrees of freedom on the new - // mesh: + // Then we do the actual refinement, and + // distribute degrees of freedom on the new + // mesh: triangulation.execute_coarsening_and_refinement (); dof_handler.distribute_dofs (fe); - // Finally, we retrieve the old solution - // interpolated to the new mesh. Since the - // SolutionTransfer function does not - // actually store the values of the old - // solution, but rather indices, we need to - // preserve the old solution vector until - // we have gotten the new interpolated - // values. Thus, we have the new values - // written into a temporary vector, and - // only afterwards write them into the - // solution vector object: + // Finally, we retrieve the old solution + // interpolated to the new mesh. Since the + // SolutionTransfer function does not + // actually store the values of the old + // solution, but rather indices, we need to + // preserve the old solution vector until + // we have gotten the new interpolated + // values. Thus, we have the new values + // written into a temporary vector, and + // only afterwards write them into the + // solution vector object: Vector tmp (dof_handler.n_dofs()); solution_transfer.interpolate (present_solution, tmp); present_solution = tmp; - // Here is some final thing, that is - // actually unnecessary in 1d, but - // necessary for higher space dimensions, - // so we show it anyway: the result of what - // the SolutionTransfer class provides - // is a vector that is interpolated from - // the old to the new mesh. Unfortunately, - // it does not necessarily have the right - // values at constrained (hanging) nodes, - // so we have to fix this up to make the - // solution conforming again. The simplest - // way to do this is this: + // Here is some final thing, that is + // actually unnecessary in 1d, but + // necessary for higher space dimensions, + // so we show it anyway: the result of what + // the SolutionTransfer class provides + // is a vector that is interpolated from + // the old to the new mesh. Unfortunately, + // it does not necessarily have the right + // values at constrained (hanging) nodes, + // so we have to fix this up to make the + // solution conforming again. The simplest + // way to do this is this: hanging_node_constraints.clear (); DoFTools::make_hanging_node_constraints (dof_handler, - hanging_node_constraints); + hanging_node_constraints); hanging_node_constraints.close (); hanging_node_constraints.distribute (present_solution); - // This is wasteful, since we create a - // ConstraintMatrix object that will be - // recreated again in the next call to - // setup_system_on_mesh immediately - // afterwards. A more efficient - // implementation would make sure that it - // is created only once. We don't care so - // much here, since in 1d there are no - // constraints, so all of these operations - // are really cheap, but we do not - // recommend this as general programming - // strategy. + // This is wasteful, since we create a + // ConstraintMatrix object that will be + // recreated again in the next call to + // setup_system_on_mesh immediately + // afterwards. A more efficient + // implementation would make sure that it + // is created only once. We don't care so + // much here, since in 1d there are no + // constraints, so all of these operations + // are really cheap, but we do not + // recommend this as general programming + // strategy. } - // Before going over to the framework - // functions, we still need to look at the - // implementation of the function that - // computes the energy of a nodal vector in - // the functional considered in this example - // program. Its idea is simple: take a nodal - // vector and the DoFHandler object it is - // living on, then loop over all cells and - // add up the local contributions to the - // energy: + // Before going over to the framework + // functions, we still need to look at the + // implementation of the function that + // computes the energy of a nodal vector in + // the functional considered in this example + // program. Its idea is simple: take a nodal + // vector and the DoFHandler object it is + // living on, then loop over all cells and + // add up the local contributions to the + // energy: template double MinimizationProblem::energy (const DoFHandler &dof_handler, - const Vector &function) + const Vector &function) { - // First define the quadrature formula and - // a FEValues object with which to - // compute the values of the input function - // at the quadrature points. Note again - // that the integrand is a polynomial of - // degree six, so a 4-point Gauss formula - // is appropriate: + // First define the quadrature formula and + // a FEValues object with which to + // compute the values of the input function + // at the quadrature points. Note again + // that the integrand is a polynomial of + // degree six, so a 4-point Gauss formula + // is appropriate: QGauss quadrature_formula(4); FEValues fe_values (dof_handler.get_fe(), quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); const unsigned int n_q_points = quadrature_formula.size(); - // Then, just as when we integrated the - // linear system, we need two variables - // that will hold the values and gradients - // of the given function at the quadrature - // points: + // Then, just as when we integrated the + // linear system, we need two variables + // that will hold the values and gradients + // of the given function at the quadrature + // points: std::vector local_solution_values (n_q_points); std::vector > local_solution_grads (n_q_points); - // With this, define an energy variable, - // and loop over all the cells: + // With this, define an energy variable, + // and loop over all the cells: double energy = 0.; typename DoFHandler::active_cell_iterator @@ -1275,50 +1275,50 @@ namespace Step15 endc = dof_handler.end(); for (; cell!=endc; ++cell) { - // On each cell, initialize the - // FEValues object, and extract - // values and gradients of the given - // function: - fe_values.reinit (cell); - fe_values.get_function_values (function, - local_solution_values); - fe_values.get_function_grads (function, - local_solution_grads); - - // Then loop over all quadrature points - // on this cell, and add up the - // contribution of each to the global - // energy: - for (unsigned int q_point=0; q_pointFEValues object, and extract + // values and gradients of the given + // function: + fe_values.reinit (cell); + fe_values.get_function_values (function, + local_solution_values); + fe_values.get_function_grads (function, + local_solution_grads); + + // Then loop over all quadrature points + // on this cell, and add up the + // contribution of each to the global + // energy: + for (unsigned int q_point=0; q_pointrun(). It generate a coarse mesh, - // refines it a couple of times, and - // initializes the starting values. It then - // goes into a loop in which we first set up - // the member variables for the new mesh, and - // then do a fixed number of five gradient - // steps. If after this the energy has not - // significantly decreased compares to the - // last time we checked, we assume that we - // have converged and exit, otherwise we - // refine the mesh and start over. Once we - // have determined that the computations have - // converged somewhere, we output the - // results. + // So here is the driver function, + // run(). It generate a coarse mesh, + // refines it a couple of times, and + // initializes the starting values. It then + // goes into a loop in which we first set up + // the member variables for the new mesh, and + // then do a fixed number of five gradient + // steps. If after this the energy has not + // significantly decreased compares to the + // last time we checked, we assume that we + // have converged and exit, otherwise we + // refine the mesh and start over. Once we + // have determined that the computations have + // converged somewhere, we output the + // results. template void MinimizationProblem::run () { @@ -1331,20 +1331,20 @@ namespace Step15 while (true) { - setup_system_on_mesh (); + setup_system_on_mesh (); - for (unsigned int iteration=0; iteration<5; ++iteration) - do_step (); + for (unsigned int iteration=0; iteration<5; ++iteration) + do_step (); - const double this_energy = energy (dof_handler, present_solution); - std::cout << " Energy: " << this_energy << std::endl; + const double this_energy = energy (dof_handler, present_solution); + std::cout << " Energy: " << this_energy << std::endl; - if ((last_energy-this_energy) < 1e-5*last_energy) - break; + if ((last_energy-this_energy) < 1e-5*last_energy) + break; - last_energy = this_energy; + last_energy = this_energy; - refine_grid (); + refine_grid (); } output_results (); @@ -1390,24 +1390,24 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } return 0; diff --git a/deal.II/examples/step-16/step-16.cc b/deal.II/examples/step-16/step-16.cc index 90c5b8ded9..1e84f87815 100644 --- a/deal.II/examples/step-16/step-16.cc +++ b/deal.II/examples/step-16/step-16.cc @@ -11,25 +11,25 @@ /* to the file deal.II/doc/license.html for the text and */ /* further information on this license. */ - // As discussed in the introduction, most of - // this program is copied almost verbatim - // from step-6, which itself is only a slight - // modification of step-5. Consequently, a - // significant part of this program is not - // new if you've read all the material up to - // step-6, and we won't comment on that part - // of the functionality that is - // unchanged. Rather, we will focus on those - // aspects of the program that have to do - // with the multigrid functionality which - // forms the new aspect of this tutorial - // program. + // As discussed in the introduction, most of + // this program is copied almost verbatim + // from step-6, which itself is only a slight + // modification of step-5. Consequently, a + // significant part of this program is not + // new if you've read all the material up to + // step-6, and we won't comment on that part + // of the functionality that is + // unchanged. Rather, we will focus on those + // aspects of the program that have to do + // with the multigrid functionality which + // forms the new aspect of this tutorial + // program. // @sect3{Include files} - // Again, the first few include files - // are already known, so we won't - // comment on them: + // Again, the first few include files + // are already known, so we won't + // comment on them: #include #include #include @@ -59,19 +59,19 @@ #include #include - // These, now, are the include necessary for - // the multi-level methods. The first two - // declare classes that allow us to enumerate - // degrees of freedom not only on the finest - // mesh level, but also on intermediate - // levels (that's what the MGDoFHandler class - // does) as well as allow to access this - // information (iterators and accessors over - // these cells). - // - // The rest of the include files deals with - // the mechanics of multigrid as a linear - // operator (solver or preconditioner). + // These, now, are the include necessary for + // the multi-level methods. The first two + // declare classes that allow us to enumerate + // degrees of freedom not only on the finest + // mesh level, but also on intermediate + // levels (that's what the MGDoFHandler class + // does) as well as allow to access this + // information (iterators and accessors over + // these cells). + // + // The rest of the include files deals with + // the mechanics of multigrid as a linear + // operator (solver or preconditioner). #include #include #include @@ -82,26 +82,26 @@ #include #include - // This is C++: + // This is C++: #include #include - // The last step is as in all - // previous programs: + // The last step is as in all + // previous programs: namespace Step16 { using namespace dealii; - // @sect3{The LaplaceProblem class template} + // @sect3{The LaplaceProblem class template} - // This main class is basically the same - // class as in step-6. As far as member - // functions is concerned, the only addition - // is the assemble_multigrid - // function that assembles the matrices that - // correspond to the discrete operators on - // intermediate levels: + // This main class is basically the same + // class as in step-6. As far as member + // functions is concerned, the only addition + // is the assemble_multigrid + // function that assembles the matrices that + // correspond to the discrete operators on + // intermediate levels: template class LaplaceProblem { @@ -124,14 +124,14 @@ namespace Step16 SparsityPattern sparsity_pattern; SparseMatrix system_matrix; - // We need an additional object for the - // hanging nodes constraints. They are - // handed to the transfer object in the - // multigrid. Since we call a compress - // inside the multigrid these constraints - // are not allowed to be inhomogeneous so - // we store them in different ConstraintMatrix - // objects. + // We need an additional object for the + // hanging nodes constraints. They are + // handed to the transfer object in the + // multigrid. Since we call a compress + // inside the multigrid these constraints + // are not allowed to be inhomogeneous so + // we store them in different ConstraintMatrix + // objects. ConstraintMatrix hanging_node_constraints; ConstraintMatrix constraints; @@ -140,43 +140,43 @@ namespace Step16 const unsigned int degree; - // The following four objects are the - // only additional member variables, - // compared to step-6. They first three - // represent the - // operators that act on individual - // levels of the multilevel hierarchy, - // rather than on the finest mesh as do - // the objects above while the last object - // stores information about the boundary - // indices on each level and information - // about indices lying on a refinement - // edge between two different refinement - // levels. - // - // To facilitate having objects on each - // level of a multilevel hierarchy, - // deal.II has the MGLevelObject class - // template that provides storage for - // objects on each level. What we need - // here are matrices on each level, which - // implies that we also need sparsity - // patterns on each level. As outlined in - // the @ref mg_paper, the operators - // (matrices) that we need are actually - // twofold: one on the interior of each - // level, and one at the interface - // between each level and that part of - // the domain where the mesh is - // coarser. In fact, we will need the - // latter in two versions: for the - // direction from coarse to fine mesh and - // from fine to coarse. Fortunately, - // however, we here have a self-adjoint - // problem for which one of these is the - // transpose of the other, and so we only - // have to build one; we choose the one - // from coarse to fine. + // The following four objects are the + // only additional member variables, + // compared to step-6. They first three + // represent the + // operators that act on individual + // levels of the multilevel hierarchy, + // rather than on the finest mesh as do + // the objects above while the last object + // stores information about the boundary + // indices on each level and information + // about indices lying on a refinement + // edge between two different refinement + // levels. + // + // To facilitate having objects on each + // level of a multilevel hierarchy, + // deal.II has the MGLevelObject class + // template that provides storage for + // objects on each level. What we need + // here are matrices on each level, which + // implies that we also need sparsity + // patterns on each level. As outlined in + // the @ref mg_paper, the operators + // (matrices) that we need are actually + // twofold: one on the interior of each + // level, and one at the interface + // between each level and that part of + // the domain where the mesh is + // coarser. In fact, we will need the + // latter in two versions: for the + // direction from coarse to fine mesh and + // from fine to coarse. Fortunately, + // however, we here have a self-adjoint + // problem for which one of these is the + // transpose of the other, and so we only + // have to build one; we choose the one + // from coarse to fine. MGLevelObject mg_sparsity_patterns; MGLevelObject > mg_matrices; MGLevelObject > mg_interface_matrices; @@ -185,11 +185,11 @@ namespace Step16 - // @sect3{Nonconstant coefficients} + // @sect3{Nonconstant coefficients} - // The implementation of nonconstant - // coefficients is copied verbatim - // from step-5 and step-6: + // The implementation of nonconstant + // coefficients is copied verbatim + // from step-5 and step-6: template class Coefficient : public Function @@ -198,18 +198,18 @@ namespace Step16 Coefficient () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void value_list (const std::vector > &points, - std::vector &values, - const unsigned int component = 0) const; + std::vector &values, + const unsigned int component = 0) const; }; template double Coefficient::value (const Point &p, - const unsigned int) const + const unsigned int) const { if (p.square() < 0.5*0.5) return 20; @@ -221,112 +221,112 @@ namespace Step16 template void Coefficient::value_list (const std::vector > &points, - std::vector &values, - const unsigned int component) const + std::vector &values, + const unsigned int component) const { const unsigned int n_points = points.size(); Assert (values.size() == n_points, - ExcDimensionMismatch (values.size(), n_points)); + ExcDimensionMismatch (values.size(), n_points)); Assert (component == 0, - ExcIndexRange (component, 0, 1)); + ExcIndexRange (component, 0, 1)); for (unsigned int i=0; i::value (points[i]); } - // @sect3{The LaplaceProblem class implementation} - - // @sect4{LaplaceProblem::LaplaceProblem} - - // The constructor is left mostly - // unchanged. We take the polynomial degree - // of the finite elements to be used as a - // constructor argument and store it in a - // member variable. - // - // By convention, all adaptively refined - // triangulations in deal.II never change by - // more than one level across a face between - // cells. For our multigrid algorithms, - // however, we need a slightly stricter - // guarantee, namely that the mesh also does - // not change by more than refinement level - // across vertices that might connect two - // cells. In other words, we must prevent the - // following situation: - // - // @image html limit_level_difference_at_vertices.png "" - // - // This is achieved by passing the - // Triangulation::limit_level_difference_at_vertices - // flag to the constructor of the - // triangulation class. + // @sect3{The LaplaceProblem class implementation} + + // @sect4{LaplaceProblem::LaplaceProblem} + + // The constructor is left mostly + // unchanged. We take the polynomial degree + // of the finite elements to be used as a + // constructor argument and store it in a + // member variable. + // + // By convention, all adaptively refined + // triangulations in deal.II never change by + // more than one level across a face between + // cells. For our multigrid algorithms, + // however, we need a slightly stricter + // guarantee, namely that the mesh also does + // not change by more than refinement level + // across vertices that might connect two + // cells. In other words, we must prevent the + // following situation: + // + // @image html limit_level_difference_at_vertices.png "" + // + // This is achieved by passing the + // Triangulation::limit_level_difference_at_vertices + // flag to the constructor of the + // triangulation class. template LaplaceProblem::LaplaceProblem (const unsigned int degree) - : - triangulation (Triangulation:: - limit_level_difference_at_vertices), - fe (degree), - mg_dof_handler (triangulation), - degree(degree) + : + triangulation (Triangulation:: + limit_level_difference_at_vertices), + fe (degree), + mg_dof_handler (triangulation), + degree(degree) {} - // @sect4{LaplaceProblem::setup_system} + // @sect4{LaplaceProblem::setup_system} - // The following function extends what the - // corresponding one in step-6 did. The top - // part, apart from the additional output, - // does the same: + // The following function extends what the + // corresponding one in step-6 did. The top + // part, apart from the additional output, + // does the same: template void LaplaceProblem::setup_system () { mg_dof_handler.distribute_dofs (fe); - // Here we output not only the - // degrees of freedom on the finest - // level, but also in the - // multilevel structure + // Here we output not only the + // degrees of freedom on the finest + // level, but also in the + // multilevel structure deallog << "Number of degrees of freedom: " - << mg_dof_handler.n_dofs(); + << mg_dof_handler.n_dofs(); for (unsigned int l=0;l homogeneous_dirichlet_bc (1); dirichlet_boundary[0] = &homogeneous_dirichlet_bc; VectorTools::interpolate_boundary_values (static_cast&>(mg_dof_handler), - dirichlet_boundary, - constraints); + dirichlet_boundary, + constraints); constraints.close (); hanging_node_constraints.close (); constraints.condense (sparsity_pattern); sparsity_pattern.compress(); system_matrix.reinit (sparsity_pattern); - // The multigrid constraints have to be - // initialized. They need to know about - // the boundary values as well, so we - // pass the dirichlet_boundary - // here as well. + // The multigrid constraints have to be + // initialized. They need to know about + // the boundary values as well, so we + // pass the dirichlet_boundary + // here as well. mg_constrained_dofs.clear(); mg_constrained_dofs.initialize(mg_dof_handler, dirichlet_boundary); - // Now for the things that concern the - // multigrid data structures. First, we - // resize the multi-level objects to hold - // matrices and sparsity patterns for every - // level. The coarse level is zero (this is - // mandatory right now but may change in a - // future revision). Note that these - // functions take a complete, inclusive - // range here (not a starting index and - // size), so the finest level is - // n_levels-1. We first have - // to resize the container holding the - // SparseMatrix classes, since they have to - // release their SparsityPattern before the - // can be destroyed upon resizing. + // Now for the things that concern the + // multigrid data structures. First, we + // resize the multi-level objects to hold + // matrices and sparsity patterns for every + // level. The coarse level is zero (this is + // mandatory right now but may change in a + // future revision). Note that these + // functions take a complete, inclusive + // range here (not a starting index and + // size), so the finest level is + // n_levels-1. We first have + // to resize the container holding the + // SparseMatrix classes, since they have to + // release their SparsityPattern before the + // can be destroyed upon resizing. const unsigned int n_levels = triangulation.n_levels(); mg_interface_matrices.resize(0, n_levels-1); @@ -376,73 +376,73 @@ namespace Step16 mg_matrices.clear (); mg_sparsity_patterns.resize(0, n_levels-1); - // Now, we have to provide a matrix on each - // level. To this end, we first use the - // MGTools::make_sparsity_pattern function - // to first generate a preliminary - // compressed sparsity pattern on each - // level (see the @ref Sparsity module for - // more information on this topic) and then - // copy it over to the one we really - // want. The next step is to initialize - // both kinds of level matrices with these - // sparsity patterns. - // - // It may be worth pointing out that the - // interface matrices only have entries for - // degrees of freedom that sit at or next - // to the interface between coarser and - // finer levels of the mesh. They are - // therefore even sparser than the matrices - // on the individual levels of our - // multigrid hierarchy. If we were more - // concerned about memory usage (and - // possibly the speed with which we can - // multiply with these matrices), we should - // use separate and different sparsity - // patterns for these two kinds of - // matrices. + // Now, we have to provide a matrix on each + // level. To this end, we first use the + // MGTools::make_sparsity_pattern function + // to first generate a preliminary + // compressed sparsity pattern on each + // level (see the @ref Sparsity module for + // more information on this topic) and then + // copy it over to the one we really + // want. The next step is to initialize + // both kinds of level matrices with these + // sparsity patterns. + // + // It may be worth pointing out that the + // interface matrices only have entries for + // degrees of freedom that sit at or next + // to the interface between coarser and + // finer levels of the mesh. They are + // therefore even sparser than the matrices + // on the individual levels of our + // multigrid hierarchy. If we were more + // concerned about memory usage (and + // possibly the speed with which we can + // multiply with these matrices), we should + // use separate and different sparsity + // patterns for these two kinds of + // matrices. for (unsigned int level=0; level void LaplaceProblem::assemble_system () { const QGauss quadrature_formula(degree+1); FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -460,61 +460,61 @@ namespace Step16 endc = mg_dof_handler.end(); for (; cell!=endc; ++cell) { - cell_matrix = 0; - cell_rhs = 0; - - fe_values.reinit (cell); - - coefficient.value_list (fe_values.get_quadrature_points(), - coefficient_values); - - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - constraints.distribute_local_to_global (cell_matrix, cell_rhs, - local_dof_indices, - system_matrix, system_rhs); + cell_matrix = 0; + cell_rhs = 0; + + fe_values.reinit (cell); + + coefficient.value_list (fe_values.get_quadrature_points(), + coefficient_values); + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + constraints.distribute_local_to_global (cell_matrix, cell_rhs, + local_dof_indices, + system_matrix, system_rhs); } } - // @sect4{LaplaceProblem::assemble_multigrid} - - // The next function is the one that builds - // the linear operators (matrices) that - // define the multigrid method on each level - // of the mesh. The integration core is the - // same as above, but the loop below will go - // over all existing cells instead of just - // the active ones, and the results must be - // entered into the correct matrix. Note also - // that since we only do multi-level - // preconditioning, no right-hand side needs - // to be assembled here. - // - // Before we go there, however, we have to - // take care of a significant amount of book - // keeping: + // @sect4{LaplaceProblem::assemble_multigrid} + + // The next function is the one that builds + // the linear operators (matrices) that + // define the multigrid method on each level + // of the mesh. The integration core is the + // same as above, but the loop below will go + // over all existing cells instead of just + // the active ones, and the results must be + // entered into the correct matrix. Note also + // that since we only do multi-level + // preconditioning, no right-hand side needs + // to be assembled here. + // + // Before we go there, however, we have to + // take care of a significant amount of book + // keeping: template void LaplaceProblem::assemble_multigrid () { QGauss quadrature_formula(1+degree); FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -526,251 +526,251 @@ namespace Step16 const Coefficient coefficient; std::vector coefficient_values (n_q_points); - // Next a few things that are specific to - // building the multigrid data structures - // (since we only need them in the current - // function, rather than also elsewhere, we - // build them here instead of the - // setup_system - // function). Some of the following may be - // a bit obscure if you're not familiar - // with the algorithm actually implemented - // in deal.II to support multilevel - // algorithms on adaptive meshes; if some - // of the things below seem strange, take a - // look at the @ref mg_paper. - // - // Our first job is to identify those - // degrees of freedom on each level that - // are located on interfaces between - // adaptively refined levels, and those - // that lie on the interface but also on - // the exterior boundary of the domain. As - // in many other parts of the library, we - // do this by using boolean masks, - // i.e. vectors of booleans each element of - // which indicates whether the - // corresponding degree of freedom index is - // an interface DoF or not. The MGConstraints - // already computed the information for us - // when we called initialize in setup_system(). + // Next a few things that are specific to + // building the multigrid data structures + // (since we only need them in the current + // function, rather than also elsewhere, we + // build them here instead of the + // setup_system + // function). Some of the following may be + // a bit obscure if you're not familiar + // with the algorithm actually implemented + // in deal.II to support multilevel + // algorithms on adaptive meshes; if some + // of the things below seem strange, take a + // look at the @ref mg_paper. + // + // Our first job is to identify those + // degrees of freedom on each level that + // are located on interfaces between + // adaptively refined levels, and those + // that lie on the interface but also on + // the exterior boundary of the domain. As + // in many other parts of the library, we + // do this by using boolean masks, + // i.e. vectors of booleans each element of + // which indicates whether the + // corresponding degree of freedom index is + // an interface DoF or not. The MGConstraints + // already computed the information for us + // when we called initialize in setup_system(). std::vector > interface_dofs = mg_constrained_dofs.get_refinement_edge_indices (); std::vector > boundary_interface_dofs = mg_constrained_dofs.get_refinement_edge_boundary_indices (); - // The indices just identified will later - // be used to decide where the assembled value - // has to be added into on each level. - // On the other hand, - // we also have to impose zero boundary - // conditions on the external boundary of - // each level. But this the MGConstraints - // knows it. So we simply ask for them by calling - // get_boundary_indices (). - // The third step is to construct - // constraints on all those degrees of - // freedom: their value should be zero - // after each application of the level - // operators. To this end, we construct - // ConstraintMatrix objects for each level, - // and add to each of these constraints for - // each degree of freedom. Due to the way - // the ConstraintMatrix stores its data, - // the function to add a constraint on a - // single degree of freedom and force it to - // be zero is called - // Constraintmatrix::add_line(); doing so - // for several degrees of freedom at once - // can be done using - // Constraintmatrix::add_lines(): + // The indices just identified will later + // be used to decide where the assembled value + // has to be added into on each level. + // On the other hand, + // we also have to impose zero boundary + // conditions on the external boundary of + // each level. But this the MGConstraints + // knows it. So we simply ask for them by calling + // get_boundary_indices (). + // The third step is to construct + // constraints on all those degrees of + // freedom: their value should be zero + // after each application of the level + // operators. To this end, we construct + // ConstraintMatrix objects for each level, + // and add to each of these constraints for + // each degree of freedom. Due to the way + // the ConstraintMatrix stores its data, + // the function to add a constraint on a + // single degree of freedom and force it to + // be zero is called + // Constraintmatrix::add_line(); doing so + // for several degrees of freedom at once + // can be done using + // Constraintmatrix::add_lines(): std::vector boundary_constraints (triangulation.n_levels()); std::vector boundary_interface_constraints (triangulation.n_levels()); for (unsigned int level=0; levelassemble_system, with two - // exceptions: (i) we don't need a right - // hand side, and more significantly (ii) we - // don't just loop over all active cells, - // but in fact all cells, active or - // not. Consequently, the correct iterator - // to use is MGDoFHandler::cell_iterator - // rather than - // MGDoFHandler::active_cell_iterator. Let's - // go about it: + // Now that we're done with most of our + // preliminaries, let's start the + // integration loop. It looks mostly like + // the loop in + // assemble_system, with two + // exceptions: (i) we don't need a right + // hand side, and more significantly (ii) we + // don't just loop over all active cells, + // but in fact all cells, active or + // not. Consequently, the correct iterator + // to use is MGDoFHandler::cell_iterator + // rather than + // MGDoFHandler::active_cell_iterator. Let's + // go about it: typename MGDoFHandler::cell_iterator cell = mg_dof_handler.begin(), - endc = mg_dof_handler.end(); + endc = mg_dof_handler.end(); for (; cell!=endc; ++cell) { - cell_matrix = 0; - fe_values.reinit (cell); - - coefficient.value_list (fe_values.get_quadrature_points(), - coefficient_values); - - for (unsigned int q_point=0; q_pointget_mg_dof_indices (local_dof_indices); - - // Next, we need to copy local - // contributions into the level - // objects. We can do this in the same - // way as in the global assembly, using - // a constraint object that takes care - // of constrained degrees (which here - // are only boundary nodes, as the - // individual levels have no hanging - // node constraints). Note that the - // boundary_constraints - // object makes sure that the level - // matrices contains no contributions - // from degrees of freedom at the - // interface between cells of different - // refinement level. - boundary_constraints[cell->level()] - .distribute_local_to_global (cell_matrix, - local_dof_indices, - mg_matrices[cell->level()]); - - // The next step is again slightly more - // obscure (but explained in the @ref - // mg_paper): We need the remainder of - // the operator that we just copied - // into the mg_matrices - // object, namely the part on the - // interface between cells at the - // current level and cells one level - // coarser. This matrix exists in two - // directions: for interior DoFs (index - // $i$) of the current level to those - // sitting on the interface (index - // $j$), and the other way around. Of - // course, since we have a symmetric - // operator, one of these matrices is - // the transpose of the other. - // - // The way we assemble these matrices - // is as follows: since the are formed - // from parts of the local - // contributions, we first delete all - // those parts of the local - // contributions that we are not - // interested in, namely all those - // elements of the local matrix for - // which not $i$ is an interface DoF - // and $j$ is not. The result is one of - // the two matrices that we are - // interested in, and we then copy it - // into the - // mg_interface_matrices - // object. The - // boundary_interface_constraints - // object at the same time makes sure - // that we delete contributions from - // all degrees of freedom that are not - // only on the interface but also on - // the external boundary of the domain. - // - // The last part to remember is how to - // get the other matrix. Since it is - // only the transpose, we will later - // (in the solve() - // function) be able to just pass the - // transpose matrix where necessary. - for (unsigned int i=0; ilevel()][local_dof_indices[i]]==true && - interface_dofs[cell->level()][local_dof_indices[j]]==false)) - cell_matrix(i,j) = 0; - - boundary_interface_constraints[cell->level()] - .distribute_local_to_global (cell_matrix, - local_dof_indices, - mg_interface_matrices[cell->level()]); + cell_matrix = 0; + fe_values.reinit (cell); + + coefficient.value_list (fe_values.get_quadrature_points(), + coefficient_values); + + for (unsigned int q_point=0; q_pointget_mg_dof_indices (local_dof_indices); + + // Next, we need to copy local + // contributions into the level + // objects. We can do this in the same + // way as in the global assembly, using + // a constraint object that takes care + // of constrained degrees (which here + // are only boundary nodes, as the + // individual levels have no hanging + // node constraints). Note that the + // boundary_constraints + // object makes sure that the level + // matrices contains no contributions + // from degrees of freedom at the + // interface between cells of different + // refinement level. + boundary_constraints[cell->level()] + .distribute_local_to_global (cell_matrix, + local_dof_indices, + mg_matrices[cell->level()]); + + // The next step is again slightly more + // obscure (but explained in the @ref + // mg_paper): We need the remainder of + // the operator that we just copied + // into the mg_matrices + // object, namely the part on the + // interface between cells at the + // current level and cells one level + // coarser. This matrix exists in two + // directions: for interior DoFs (index + // $i$) of the current level to those + // sitting on the interface (index + // $j$), and the other way around. Of + // course, since we have a symmetric + // operator, one of these matrices is + // the transpose of the other. + // + // The way we assemble these matrices + // is as follows: since the are formed + // from parts of the local + // contributions, we first delete all + // those parts of the local + // contributions that we are not + // interested in, namely all those + // elements of the local matrix for + // which not $i$ is an interface DoF + // and $j$ is not. The result is one of + // the two matrices that we are + // interested in, and we then copy it + // into the + // mg_interface_matrices + // object. The + // boundary_interface_constraints + // object at the same time makes sure + // that we delete contributions from + // all degrees of freedom that are not + // only on the interface but also on + // the external boundary of the domain. + // + // The last part to remember is how to + // get the other matrix. Since it is + // only the transpose, we will later + // (in the solve() + // function) be able to just pass the + // transpose matrix where necessary. + for (unsigned int i=0; ilevel()][local_dof_indices[i]]==true && + interface_dofs[cell->level()][local_dof_indices[j]]==false)) + cell_matrix(i,j) = 0; + + boundary_interface_constraints[cell->level()] + .distribute_local_to_global (cell_matrix, + local_dof_indices, + mg_interface_matrices[cell->level()]); } } - // @sect4{LaplaceProblem::solve} - - // This is the other function that is - // significantly different in support of the - // multigrid solver (or, in fact, the - // preconditioner for which we use the - // multigrid method). - // - // Let us start out by setting up two of the - // components of multilevel methods: transfer - // operators between levels, and a solver on - // the coarsest level. In finite element - // methods, the transfer operators are - // derived from the finite element function - // spaces involved and can often be computed - // in a generic way independent of the - // problem under consideration. In that case, - // we can use the MGTransferPrebuilt class - // that, given the constraints on the global - // level and an MGDoFHandler object computes - // the matrices corresponding to these - // transfer operators. - // - // The second part of the following lines - // deals with the coarse grid solver. Since - // our coarse grid is very coarse indeed, we - // decide for a direct solver (a Householder - // decomposition of the coarsest level - // matrix), even if its implementation is not - // particularly sophisticated. If our coarse - // mesh had many more cells than the five we - // have here, something better suited would - // obviously be necessary here. + // @sect4{LaplaceProblem::solve} + + // This is the other function that is + // significantly different in support of the + // multigrid solver (or, in fact, the + // preconditioner for which we use the + // multigrid method). + // + // Let us start out by setting up two of the + // components of multilevel methods: transfer + // operators between levels, and a solver on + // the coarsest level. In finite element + // methods, the transfer operators are + // derived from the finite element function + // spaces involved and can often be computed + // in a generic way independent of the + // problem under consideration. In that case, + // we can use the MGTransferPrebuilt class + // that, given the constraints on the global + // level and an MGDoFHandler object computes + // the matrices corresponding to these + // transfer operators. + // + // The second part of the following lines + // deals with the coarse grid solver. Since + // our coarse grid is very coarse indeed, we + // decide for a direct solver (a Householder + // decomposition of the coarsest level + // matrix), even if its implementation is not + // particularly sophisticated. If our coarse + // mesh had many more cells than the five we + // have here, something better suited would + // obviously be necessary here. template void LaplaceProblem::solve () { - // Create the object that deals with the transfer - // between different refinement levels. We need to - // pass it the hanging node constraints. + // Create the object that deals with the transfer + // between different refinement levels. We need to + // pass it the hanging node constraints. MGTransferPrebuilt > mg_transfer(hanging_node_constraints, mg_constrained_dofs); - // Now the prolongation matrix has to be built. - // This matrix needs to take the boundary values on - // each level into account and needs to know about - // the indices at the refinement egdes. The - // MGConstraints knows about that so - // pass it as an argument. + // Now the prolongation matrix has to be built. + // This matrix needs to take the boundary values on + // each level into account and needs to know about + // the indices at the refinement egdes. The + // MGConstraints knows about that so + // pass it as an argument. mg_transfer.build_matrices(mg_dof_handler); FullMatrix coarse_matrix; @@ -778,60 +778,60 @@ namespace Step16 MGCoarseGridHouseholder<> coarse_grid_solver; coarse_grid_solver.initialize (coarse_matrix); - // The next component of a multilevel - // solver or preconditioner is that we need - // a smoother on each level. A common - // choice for this is to use the - // application of a relaxation method (such - // as the SOR, Jacobi or Richardson method) - // or a small number of iterations of a - // solver method (such as CG or GMRES). The - // MGSmootherRelaxation and - // MGSmootherPrecondition classes provide - // support for these two kinds of - // smoothers. Here, we opt for the - // application of a single SOR - // iteration. To this end, we define an - // appropriate typedef and - // then setup a smoother object. - // - // Since this smoother needs temporary - // vectors to store intermediate results, - // we need to provide a VectorMemory - // object. Since these vectors will be - // reused over and over, the - // GrowingVectorMemory is more time - // efficient than the PrimitiveVectorMemory - // class in the current case. - // - // The last step is to initialize the - // smoother object with our level matrices - // and to set some smoothing parameters. - // The initialize() function - // can optionally take additional arguments - // that will be passed to the smoother - // object on each level. In the current - // case for the SOR smoother, this could, - // for example, include a relaxation - // parameter. However, we here leave these - // at their default values. The call to - // set_steps() indicates that - // we will use two pre- and two - // post-smoothing steps on each level; to - // use a variable number of smoother steps - // on different levels, more options can be - // set in the constructor call to the - // mg_smoother object. - // - // The last step results from the fact that - // we use the SOR method as a smoother - - // which is not symmetric - but we use the - // conjugate gradient iteration (which - // requires a symmetric preconditioner) - // below, we need to let the multilevel - // preconditioner make sure that we get a - // symmetric operator even for nonsymmetric - // smoothers: + // The next component of a multilevel + // solver or preconditioner is that we need + // a smoother on each level. A common + // choice for this is to use the + // application of a relaxation method (such + // as the SOR, Jacobi or Richardson method) + // or a small number of iterations of a + // solver method (such as CG or GMRES). The + // MGSmootherRelaxation and + // MGSmootherPrecondition classes provide + // support for these two kinds of + // smoothers. Here, we opt for the + // application of a single SOR + // iteration. To this end, we define an + // appropriate typedef and + // then setup a smoother object. + // + // Since this smoother needs temporary + // vectors to store intermediate results, + // we need to provide a VectorMemory + // object. Since these vectors will be + // reused over and over, the + // GrowingVectorMemory is more time + // efficient than the PrimitiveVectorMemory + // class in the current case. + // + // The last step is to initialize the + // smoother object with our level matrices + // and to set some smoothing parameters. + // The initialize() function + // can optionally take additional arguments + // that will be passed to the smoother + // object on each level. In the current + // case for the SOR smoother, this could, + // for example, include a relaxation + // parameter. However, we here leave these + // at their default values. The call to + // set_steps() indicates that + // we will use two pre- and two + // post-smoothing steps on each level; to + // use a variable number of smoother steps + // on different levels, more options can be + // set in the constructor call to the + // mg_smoother object. + // + // The last step results from the fact that + // we use the SOR method as a smoother - + // which is not symmetric - but we use the + // conjugate gradient iteration (which + // requires a symmetric preconditioner) + // below, we need to let the multilevel + // preconditioner make sure that we get a + // symmetric operator even for nonsymmetric + // smoothers: typedef PreconditionSOR > Smoother; GrowingVectorMemory<> vector_memory; MGSmootherRelaxation, Smoother, Vector > @@ -840,87 +840,87 @@ namespace Step16 mg_smoother.set_steps(2); mg_smoother.set_symmetric(true); - // The next preparatory step is that we - // must wrap our level and interface - // matrices in an object having the - // required multiplication functions. We - // will create two objects for the - // interface objects going from coarse to - // fine and the other way around; the - // multigrid algorithm will later use the - // transpose operator for the latter - // operation, allowing us to initialize - // both up and down versions of the - // operator with the matrices we already - // built: + // The next preparatory step is that we + // must wrap our level and interface + // matrices in an object having the + // required multiplication functions. We + // will create two objects for the + // interface objects going from coarse to + // fine and the other way around; the + // multigrid algorithm will later use the + // transpose operator for the latter + // operation, allowing us to initialize + // both up and down versions of the + // operator with the matrices we already + // built: MGMatrix<> mg_matrix(&mg_matrices); MGMatrix<> mg_interface_up(&mg_interface_matrices); MGMatrix<> mg_interface_down(&mg_interface_matrices); - // Now, we are ready to set up the - // V-cycle operator and the - // multilevel preconditioner. + // Now, we are ready to set up the + // V-cycle operator and the + // multilevel preconditioner. Multigrid > mg(mg_dof_handler, - mg_matrix, - coarse_grid_solver, - mg_transfer, - mg_smoother, - mg_smoother); + mg_matrix, + coarse_grid_solver, + mg_transfer, + mg_smoother, + mg_smoother); mg.set_edge_matrices(mg_interface_down, mg_interface_up); PreconditionMG, MGTransferPrebuilt > > preconditioner(mg_dof_handler, mg, mg_transfer); - // With all this together, we can finally - // get about solving the linear system in - // the usual way: + // With all this together, we can finally + // get about solving the linear system in + // the usual way: SolverControl solver_control (1000, 1e-12); SolverCG<> cg (solver_control); solution = 0; cg.solve (system_matrix, solution, system_rhs, - preconditioner); + preconditioner); constraints.distribute (solution); std::cout << " " << solver_control.last_step() - << " CG iterations needed to obtain convergence." - << std::endl; + << " CG iterations needed to obtain convergence." + << std::endl; } - // @sect4{Postprocessing} - - // The following two functions postprocess a - // solution once it is computed. In - // particular, the first one refines the mesh - // at the beginning of each cycle while the - // second one outputs results at the end of - // each such cycle. The functions are almost - // unchanged from those in step-6, with the - // exception of two minor differences: The - // KellyErrorEstimator::estimate function - // wants an argument of type DoFHandler, not - // MGDoFHandler, and so we have to cast from - // derived to base class; and we generate - // output in VTK format, to use the more - // modern visualization programs available - // today compared to those that were - // available when step-6 was written. + // @sect4{Postprocessing} + + // The following two functions postprocess a + // solution once it is computed. In + // particular, the first one refines the mesh + // at the beginning of each cycle while the + // second one outputs results at the end of + // each such cycle. The functions are almost + // unchanged from those in step-6, with the + // exception of two minor differences: The + // KellyErrorEstimator::estimate function + // wants an argument of type DoFHandler, not + // MGDoFHandler, and so we have to cast from + // derived to base class; and we generate + // output in VTK format, to use the more + // modern visualization programs available + // today compared to those that were + // available when step-6 was written. template void LaplaceProblem::refine_grid () { Vector estimated_error_per_cell (triangulation.n_active_cells()); KellyErrorEstimator::estimate (static_cast&>(mg_dof_handler), - QGauss(3), - typename FunctionMap::type(), - solution, - estimated_error_per_cell); + QGauss(3), + typename FunctionMap::type(), + solution, + estimated_error_per_cell); GridRefinement::refine_and_coarsen_fixed_number (triangulation, - estimated_error_per_cell, - 0.3, 0.03); + estimated_error_per_cell, + 0.3, 0.03); triangulation.execute_coarsening_and_refinement (); } @@ -937,73 +937,73 @@ namespace Step16 std::ostringstream filename; filename << "solution-" - << cycle - << ".vtk"; + << cycle + << ".vtk"; std::ofstream output (filename.str().c_str()); data_out.write_vtk (output); } - // @sect4{LaplaceProblem::run} + // @sect4{LaplaceProblem::run} - // Like several of the functions above, this - // is almost exactly a copy of of the - // corresponding function in step-6. The only - // difference is the call to - // assemble_multigrid that takes - // care of forming the matrices on every - // level that we need in the multigrid - // method. + // Like several of the functions above, this + // is almost exactly a copy of of the + // corresponding function in step-6. The only + // difference is the call to + // assemble_multigrid that takes + // care of forming the matrices on every + // level that we need in the multigrid + // method. template void LaplaceProblem::run () { for (unsigned int cycle=0; cycle<8; ++cycle) { - std::cout << "Cycle " << cycle << ':' << std::endl; + std::cout << "Cycle " << cycle << ':' << std::endl; - if (cycle == 0) - { - GridGenerator::hyper_ball (triangulation); + if (cycle == 0) + { + GridGenerator::hyper_ball (triangulation); - static const HyperBallBoundary boundary; - triangulation.set_boundary (0, boundary); + static const HyperBallBoundary boundary; + triangulation.set_boundary (0, boundary); - triangulation.refine_global (1); - } - else - refine_grid (); + triangulation.refine_global (1); + } + else + refine_grid (); - std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl; + std::cout << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl; - setup_system (); + setup_system (); - std::cout << " Number of degrees of freedom: " - << mg_dof_handler.n_dofs() - << " (by level: "; - for (unsigned int level=0; level - // We are going to query the number - // of processes and the number of the - // present process by calling the - // respective functions in the - // Utilities::MPI namespace. + // We are going to query the number + // of processes and the number of the + // present process by calling the + // respective functions in the + // Utilities::MPI namespace. #include - // Then, we are + // Then, we are // going to replace all linear algebra // components that involve the (global) // linear system by classes that wrap @@ -96,25 +96,25 @@ #include #include - // The last step is as in all - // previous programs: + // The last step is as in all + // previous programs: namespace Step17 { using namespace dealii; - // Now, here comes the declaration of the - // main class and of various other things - // below it. As mentioned in the - // introduction, almost all of this has been - // copied verbatim from step-8, so we only - // comment on the few things that are - // different. There is one (cosmetic) change - // in that we let solve return a value, - // namely the number of iterations it took to - // converge, so that we can output this to - // the screen at the appropriate place. In - // addition, we introduce a stream-like - // variable pcout, explained below: + // Now, here comes the declaration of the + // main class and of various other things + // below it. As mentioned in the + // introduction, almost all of this has been + // copied verbatim from step-8, so we only + // comment on the few things that are + // different. There is one (cosmetic) change + // in that we let solve return a value, + // namely the number of iterations it took to + // converge, so that we can output this to + // the screen at the appropriate place. In + // addition, we introduce a stream-like + // variable pcout, explained below: template class ElasticProblem { @@ -130,33 +130,33 @@ namespace Step17 void refine_grid (); void output_results (const unsigned int cycle) const; - // The first variable is basically only - // for convenience: in %parallel program, - // if each process outputs status - // information, then there quickly is a - // lot of clutter. Rather, we would want - // to only have one process output - // everything once, for example the one - // with process number - // zero. ConditionalOStream does - // exactly this: it acts as if it were a - // stream, but only forwards to a real, - // underlying stream if a flag is set. By - // setting this condition to - // this_mpi_process==0, we make sure - // that output is only generated from the - // first process and that we don't get - // the same lines of output over and over - // again, once per process. - // - // With this simple trick, we make sure - // that we don't have to guard each and - // every write to std::cout by a - // prefixed if(this_mpi_process==0). + // The first variable is basically only + // for convenience: in %parallel program, + // if each process outputs status + // information, then there quickly is a + // lot of clutter. Rather, we would want + // to only have one process output + // everything once, for example the one + // with process number + // zero. ConditionalOStream does + // exactly this: it acts as if it were a + // stream, but only forwards to a real, + // underlying stream if a flag is set. By + // setting this condition to + // this_mpi_process==0, we make sure + // that output is only generated from the + // first process and that we don't get + // the same lines of output over and over + // again, once per process. + // + // With this simple trick, we make sure + // that we don't have to guard each and + // every write to std::cout by a + // prefixed if(this_mpi_process==0). ConditionalOStream pcout; - // The next few variables are taken - // verbatim from step-8: + // The next few variables are taken + // verbatim from step-8: Triangulation triangulation; DoFHandler dof_handler; @@ -164,64 +164,64 @@ namespace Step17 ConstraintMatrix hanging_node_constraints; - // In step-8, this would have been the - // place where we would have declared the - // member variables for the sparsity - // pattern, the system matrix, right - // hand, and solution vector. We change - // these declarations to use %parallel - // PETSc objects instead (note that the - // fact that we use the %parallel versions - // is denoted the fact that we use the - // classes from the - // PETScWrappers::MPI namespace; - // sequential versions of these classes - // are in the PETScWrappers - // namespace, i.e. without the MPI - // part). Note also that we do not use a - // separate sparsity pattern, since PETSc - // manages that as part of its matrix - // data structures. + // In step-8, this would have been the + // place where we would have declared the + // member variables for the sparsity + // pattern, the system matrix, right + // hand, and solution vector. We change + // these declarations to use %parallel + // PETSc objects instead (note that the + // fact that we use the %parallel versions + // is denoted the fact that we use the + // classes from the + // PETScWrappers::MPI namespace; + // sequential versions of these classes + // are in the PETScWrappers + // namespace, i.e. without the MPI + // part). Note also that we do not use a + // separate sparsity pattern, since PETSc + // manages that as part of its matrix + // data structures. PETScWrappers::MPI::SparseMatrix system_matrix; PETScWrappers::MPI::Vector solution; PETScWrappers::MPI::Vector system_rhs; - // The next change is that we have to - // declare a variable that indicates the - // MPI communicator over which we are - // supposed to distribute our - // computations. Note that if this is a - // sequential job without support by MPI, - // then PETSc provides some dummy type - // for MPI_Comm, so we do not have to - // care here whether the job is really a - // %parallel one: + // The next change is that we have to + // declare a variable that indicates the + // MPI communicator over which we are + // supposed to distribute our + // computations. Note that if this is a + // sequential job without support by MPI, + // then PETSc provides some dummy type + // for MPI_Comm, so we do not have to + // care here whether the job is really a + // %parallel one: MPI_Comm mpi_communicator; - // Then we have two variables that tell - // us where in the %parallel world we - // are. The first of the following - // variables, n_mpi_processes tells - // us how many MPI processes there exist - // in total, while the second one, - // this_mpi_process, indicates which - // is the number of the present process - // within this space of processes. The - // latter variable will have a unique - // value for each process between zero - // and (less than) - // n_mpi_processes. If this program - // is run on a single machine without MPI - // support, then their values are 1 - // and 0, respectively. + // Then we have two variables that tell + // us where in the %parallel world we + // are. The first of the following + // variables, n_mpi_processes tells + // us how many MPI processes there exist + // in total, while the second one, + // this_mpi_process, indicates which + // is the number of the present process + // within this space of processes. The + // latter variable will have a unique + // value for each process between zero + // and (less than) + // n_mpi_processes. If this program + // is run on a single machine without MPI + // support, then their values are 1 + // and 0, respectively. const unsigned int n_mpi_processes; const unsigned int this_mpi_process; }; - // The following is again taken from step-8 - // without change: + // The following is again taken from step-8 + // without change: template class RightHandSide : public Function { @@ -229,26 +229,26 @@ namespace Step17 RightHandSide (); virtual void vector_value (const Point &p, - Vector &values) const; + Vector &values) const; virtual void vector_value_list (const std::vector > &points, - std::vector > &value_list) const; + std::vector > &value_list) const; }; template RightHandSide::RightHandSide () : - Function (dim) + Function (dim) {} template inline void RightHandSide::vector_value (const Point &p, - Vector &values) const + Vector &values) const { Assert (values.size() == dim, - ExcDimensionMismatch (values.size(), dim)); + ExcDimensionMismatch (values.size(), dim)); Assert (dim >= 2, ExcInternalError()); Point point_1, point_2; @@ -256,7 +256,7 @@ namespace Step17 point_2(0) = -0.5; if (((p-point_1).square() < 0.2*0.2) || - ((p-point_2).square() < 0.2*0.2)) + ((p-point_2).square() < 0.2*0.2)) values(0) = 1; else values(0) = 0; @@ -271,50 +271,50 @@ namespace Step17 template void RightHandSide::vector_value_list (const std::vector > &points, - std::vector > &value_list) const + std::vector > &value_list) const { const unsigned int n_points = points.size(); Assert (value_list.size() == n_points, - ExcDimensionMismatch (value_list.size(), n_points)); + ExcDimensionMismatch (value_list.size(), n_points)); for (unsigned int p=0; p::vector_value (points[p], - value_list[p]); + value_list[p]); } - // The first step in the actual - // implementation of things is the - // constructor of the main class. Apart from - // initializing the same member variables - // that we already had in step-8, we here - // initialize the MPI communicator variable - // we shall use with the global MPI - // communicator linking all processes - // together (in more complex applications, - // one could here use a communicator object - // that only links a subset of all - // processes), and call the Utilities helper - // functions to determine the number of - // processes and where the present one fits - // into this picture. In addition, we make - // sure that output is only generated by the - // (globally) first process. As, - // this_mpi_process is determined after - // creation of pcout, we cannot set the - // condition through the constructor, i.e. by - // pcout(std::cout, this_mpi_process==0), but - // set the condition separately. + // The first step in the actual + // implementation of things is the + // constructor of the main class. Apart from + // initializing the same member variables + // that we already had in step-8, we here + // initialize the MPI communicator variable + // we shall use with the global MPI + // communicator linking all processes + // together (in more complex applications, + // one could here use a communicator object + // that only links a subset of all + // processes), and call the Utilities helper + // functions to determine the number of + // processes and where the present one fits + // into this picture. In addition, we make + // sure that output is only generated by the + // (globally) first process. As, + // this_mpi_process is determined after + // creation of pcout, we cannot set the + // condition through the constructor, i.e. by + // pcout(std::cout, this_mpi_process==0), but + // set the condition separately. template ElasticProblem::ElasticProblem () - : - pcout (std::cout), - dof_handler (triangulation), - fe (FE_Q(1), dim), - mpi_communicator (MPI_COMM_WORLD), - n_mpi_processes (Utilities::MPI::n_mpi_processes(mpi_communicator)), - this_mpi_process (Utilities::MPI::this_mpi_process(mpi_communicator)) + : + pcout (std::cout), + dof_handler (triangulation), + fe (FE_Q(1), dim), + mpi_communicator (MPI_COMM_WORLD), + n_mpi_processes (Utilities::MPI::n_mpi_processes(mpi_communicator)), + this_mpi_process (Utilities::MPI::this_mpi_process(mpi_communicator)) { pcout.set_condition(this_mpi_process == 0); } @@ -328,194 +328,194 @@ namespace Step17 } - // The second step is the function in which - // we set up the various variables for the - // global linear system to be solved. + // The second step is the function in which + // we set up the various variables for the + // global linear system to be solved. template void ElasticProblem::setup_system () { - // Before we even start out setting up the - // system, there is one thing to do for a - // %parallel program: we need to assign - // cells to each of the processes. We do - // this by splitting (partitioning) the - // mesh cells into as many chunks - // (subdomains) as there are processes - // in this MPI job (if this is a sequential - // job, then there is only one job and all - // cells will get a zero as subdomain - // indicator). This is done using an - // interface to the METIS library that does - // this in a very efficient way, trying to - // minimize the number of nodes on the - // interfaces between subdomains. All this - // is hidden behind the following call to a - // deal.II library function: + // Before we even start out setting up the + // system, there is one thing to do for a + // %parallel program: we need to assign + // cells to each of the processes. We do + // this by splitting (partitioning) the + // mesh cells into as many chunks + // (subdomains) as there are processes + // in this MPI job (if this is a sequential + // job, then there is only one job and all + // cells will get a zero as subdomain + // indicator). This is done using an + // interface to the METIS library that does + // this in a very efficient way, trying to + // minimize the number of nodes on the + // interfaces between subdomains. All this + // is hidden behind the following call to a + // deal.II library function: GridTools::partition_triangulation (n_mpi_processes, triangulation); - // As for the linear system: First, we need - // to generate an enumeration for the - // degrees of freedom in our - // problem. Further below, we will show how - // we assign each cell to one of the MPI - // processes before we even get here. What - // we then need to do is to enumerate the - // degrees of freedom in a way so that all - // degrees of freedom associated with cells - // in subdomain zero (which resides on - // process zero) come before all DoFs - // associated with cells on subdomain one, - // before those on cells on process two, - // and so on. We need this since we have to - // split the global vectors for right hand - // side and solution, as well as the matrix - // into contiguous chunks of rows that live - // on each of the processors, and we will - // want to do this in a way that requires - // minimal communication. This is done - // using the following two functions, which - // first generates an initial ordering of - // all degrees of freedom, and then re-sort - // them according to above criterion: + // As for the linear system: First, we need + // to generate an enumeration for the + // degrees of freedom in our + // problem. Further below, we will show how + // we assign each cell to one of the MPI + // processes before we even get here. What + // we then need to do is to enumerate the + // degrees of freedom in a way so that all + // degrees of freedom associated with cells + // in subdomain zero (which resides on + // process zero) come before all DoFs + // associated with cells on subdomain one, + // before those on cells on process two, + // and so on. We need this since we have to + // split the global vectors for right hand + // side and solution, as well as the matrix + // into contiguous chunks of rows that live + // on each of the processors, and we will + // want to do this in a way that requires + // minimal communication. This is done + // using the following two functions, which + // first generates an initial ordering of + // all degrees of freedom, and then re-sort + // them according to above criterion: dof_handler.distribute_dofs (fe); DoFRenumbering::subdomain_wise (dof_handler); - // While we're at it, let us also count how - // many degrees of freedom there exist on - // the present process: + // While we're at it, let us also count how + // many degrees of freedom there exist on + // the present process: const unsigned int n_local_dofs = DoFTools::count_dofs_with_subdomain_association (dof_handler, - this_mpi_process); - - // Then we initialize the system matrix, - // solution, and right hand side - // vectors. Since they all need to work in - // %parallel, we have to pass them an MPI - // communication object, as well as their - // global sizes (both dimensions are equal - // to the number of degrees of freedom), - // and also how many rows out of this - // global size are to be stored locally - // (n_local_dofs). In addition, PETSc - // needs to know how to partition the - // columns in the chunk of the matrix that - // is stored locally; for square matrices, - // the columns should be partitioned in the - // same way as the rows (indicated by the - // second n_local_dofs in the call) but - // in the case of rectangular matrices one - // has to partition the columns in the same - // way as vectors are partitioned with - // which the matrix is multiplied, while - // rows have to partitioned in the same way - // as destination vectors of matrix-vector - // multiplications: + this_mpi_process); + + // Then we initialize the system matrix, + // solution, and right hand side + // vectors. Since they all need to work in + // %parallel, we have to pass them an MPI + // communication object, as well as their + // global sizes (both dimensions are equal + // to the number of degrees of freedom), + // and also how many rows out of this + // global size are to be stored locally + // (n_local_dofs). In addition, PETSc + // needs to know how to partition the + // columns in the chunk of the matrix that + // is stored locally; for square matrices, + // the columns should be partitioned in the + // same way as the rows (indicated by the + // second n_local_dofs in the call) but + // in the case of rectangular matrices one + // has to partition the columns in the same + // way as vectors are partitioned with + // which the matrix is multiplied, while + // rows have to partitioned in the same way + // as destination vectors of matrix-vector + // multiplications: system_matrix.reinit (mpi_communicator, - dof_handler.n_dofs(), - dof_handler.n_dofs(), - n_local_dofs, - n_local_dofs, - dof_handler.max_couplings_between_dofs()); + dof_handler.n_dofs(), + dof_handler.n_dofs(), + n_local_dofs, + n_local_dofs, + dof_handler.max_couplings_between_dofs()); solution.reinit (mpi_communicator, dof_handler.n_dofs(), n_local_dofs); system_rhs.reinit (mpi_communicator, dof_handler.n_dofs(), n_local_dofs); - // Finally, we need to initialize the - // objects denoting hanging node - // constraints for the present grid. Note - // that since PETSc handles the sparsity - // pattern internally to the matrix, there - // is no need to set up an independent - // sparsity pattern here, and to condense - // it for constraints, as we have done in - // all other example programs. + // Finally, we need to initialize the + // objects denoting hanging node + // constraints for the present grid. Note + // that since PETSc handles the sparsity + // pattern internally to the matrix, there + // is no need to set up an independent + // sparsity pattern here, and to condense + // it for constraints, as we have done in + // all other example programs. hanging_node_constraints.clear (); DoFTools::make_hanging_node_constraints (dof_handler, - hanging_node_constraints); + hanging_node_constraints); hanging_node_constraints.close (); } - // The third step is to actually assemble the - // matrix and right hand side of the - // problem. There are some things worth - // mentioning before we go into - // detail. First, we will be assembling the - // system in %parallel, i.e. each process will - // be responsible for assembling on cells - // that belong to this particular - // processor. Note that the degrees of - // freedom are split in a way such that all - // DoFs in the interior of cells and between - // cells belonging to the same subdomain - // belong to the process that owns the - // cell. However, even then we sometimes need - // to assemble on a cell with a neighbor that - // belongs to a different process, and in - // these cases when we write the local - // contributions into the global matrix or - // right hand side vector, we actually have - // to transfer these entries to the other - // process. Fortunately, we don't have to do - // this by hand, PETSc does all this for us - // by caching these elements locally, and - // sending them to the other processes as - // necessary when we call the compress() - // functions on the matrix and vector at the - // end of this function. - // - // The second point is that once we - // have handed over matrix and vector - // contributions to PETSc, it is a) - // hard, and b) very inefficient to - // get them back for - // modifications. This is not only - // the fault of PETSc, it is also a - // consequence of the distributed - // nature of this program: if an - // entry resides on another - // processor, then it is necessarily - // expensive to get it. The - // consequence of this is that where - // we previously first assembled the - // matrix and right hand side as if - // there were no hanging node - // constraints and boundary values, - // and then eliminated these in a - // second step, we should now try to - // do that while still assembling the - // local systems, and before handing - // these entries over to PETSc. At - // least as far as eliminating - // hanging nodes is concerned, this - // is actually possible, though - // removing boundary nodes isn't that - // simple. deal.II provides functions - // to do this first part: instead of - // copying elements by hand into the - // global matrix, we use the - // distribute_local_to_global - // functions below to take care of - // hanging nodes at the same - // time. The second step, elimination - // of boundary nodes, is then done in - // exactly the same way as in all - // previous example programs. - // - // So, here is the actual implementation: + // The third step is to actually assemble the + // matrix and right hand side of the + // problem. There are some things worth + // mentioning before we go into + // detail. First, we will be assembling the + // system in %parallel, i.e. each process will + // be responsible for assembling on cells + // that belong to this particular + // processor. Note that the degrees of + // freedom are split in a way such that all + // DoFs in the interior of cells and between + // cells belonging to the same subdomain + // belong to the process that owns the + // cell. However, even then we sometimes need + // to assemble on a cell with a neighbor that + // belongs to a different process, and in + // these cases when we write the local + // contributions into the global matrix or + // right hand side vector, we actually have + // to transfer these entries to the other + // process. Fortunately, we don't have to do + // this by hand, PETSc does all this for us + // by caching these elements locally, and + // sending them to the other processes as + // necessary when we call the compress() + // functions on the matrix and vector at the + // end of this function. + // + // The second point is that once we + // have handed over matrix and vector + // contributions to PETSc, it is a) + // hard, and b) very inefficient to + // get them back for + // modifications. This is not only + // the fault of PETSc, it is also a + // consequence of the distributed + // nature of this program: if an + // entry resides on another + // processor, then it is necessarily + // expensive to get it. The + // consequence of this is that where + // we previously first assembled the + // matrix and right hand side as if + // there were no hanging node + // constraints and boundary values, + // and then eliminated these in a + // second step, we should now try to + // do that while still assembling the + // local systems, and before handing + // these entries over to PETSc. At + // least as far as eliminating + // hanging nodes is concerned, this + // is actually possible, though + // removing boundary nodes isn't that + // simple. deal.II provides functions + // to do this first part: instead of + // copying elements by hand into the + // global matrix, we use the + // distribute_local_to_global + // functions below to take care of + // hanging nodes at the same + // time. The second step, elimination + // of boundary nodes, is then done in + // exactly the same way as in all + // previous example programs. + // + // So, here is the actual implementation: template void ElasticProblem::assemble_system () { - // The infrastructure to assemble linear - // systems is the same as in all the other - // programs, and in particular unchanged - // from step-8. Note that we still use the - // deal.II full matrix and vector types for - // the local systems. + // The infrastructure to assemble linear + // systems is the same as in all the other + // programs, and in particular unchanged + // from step-8. Note that we still use the + // deal.II full matrix and vector types for + // the local systems. QGauss quadrature_formula(2); FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -532,679 +532,679 @@ namespace Step17 RightHandSide right_hand_side; std::vector > rhs_values (n_q_points, - Vector(dim)); - - - // The next thing is the loop over all - // elements. Note that we do not have to do - // all the work: our job here is only to - // assemble the system on cells that - // actually belong to this MPI process, all - // other cells will be taken care of by - // other processes. This is what the - // if-clause immediately after the for-loop - // takes care of: it queries the subdomain - // identifier of each cell, which is a - // number associated with each cell that - // tells which process handles it. In more - // generality, the subdomain id is used to - // split a domain into several parts (we do - // this above, at the beginning of - // setup_system), and which allows to - // identify which subdomain a cell is - // living on. In this application, we have - // each process handle exactly one - // subdomain, so we identify the terms - // subdomain and MPI process with - // each other. - // - // Apart from this, assembling the local - // system is relatively uneventful if you - // have understood how this is done in - // step-8, and only becomes interesting - // again once we start distributing it into - // the global matrix and right hand sides. + Vector(dim)); + + + // The next thing is the loop over all + // elements. Note that we do not have to do + // all the work: our job here is only to + // assemble the system on cells that + // actually belong to this MPI process, all + // other cells will be taken care of by + // other processes. This is what the + // if-clause immediately after the for-loop + // takes care of: it queries the subdomain + // identifier of each cell, which is a + // number associated with each cell that + // tells which process handles it. In more + // generality, the subdomain id is used to + // split a domain into several parts (we do + // this above, at the beginning of + // setup_system), and which allows to + // identify which subdomain a cell is + // living on. In this application, we have + // each process handle exactly one + // subdomain, so we identify the terms + // subdomain and MPI process with + // each other. + // + // Apart from this, assembling the local + // system is relatively uneventful if you + // have understood how this is done in + // step-8, and only becomes interesting + // again once we start distributing it into + // the global matrix and right hand sides. typename DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), endc = dof_handler.end(); for (; cell!=endc; ++cell) if (cell->subdomain_id() == this_mpi_process) - { - cell_matrix = 0; - cell_rhs = 0; + { + cell_matrix = 0; + cell_rhs = 0; - fe_values.reinit (cell); + fe_values.reinit (cell); - lambda.value_list (fe_values.get_quadrature_points(), lambda_values); - mu.value_list (fe_values.get_quadrature_points(), mu_values); + lambda.value_list (fe_values.get_quadrature_points(), lambda_values); + mu.value_list (fe_values.get_quadrature_points(), mu_values); - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - hanging_node_constraints - .distribute_local_to_global (cell_matrix, - local_dof_indices, - system_matrix); - - hanging_node_constraints - .distribute_local_to_global (cell_rhs, - local_dof_indices, - system_rhs); - } - - // The global matrix and right hand side - // vectors have now been formed. Note that - // since we took care of this already - // above, we do not have to condense away - // hanging node constraints any more. - // - // However, we still have to apply boundary - // values, in the same way as we always do: + * + fe_values.JxW(q_point); + } + } + } + + right_hand_side.vector_value_list (fe_values.get_quadrature_points(), + rhs_values); + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + hanging_node_constraints + .distribute_local_to_global (cell_matrix, + local_dof_indices, + system_matrix); + + hanging_node_constraints + .distribute_local_to_global (cell_rhs, + local_dof_indices, + system_rhs); + } + + // The global matrix and right hand side + // vectors have now been formed. Note that + // since we took care of this already + // above, we do not have to condense away + // hanging node constraints any more. + // + // However, we still have to apply boundary + // values, in the same way as we always do: std::map boundary_values; VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction(dim), - boundary_values); + 0, + ZeroFunction(dim), + boundary_values); MatrixTools::apply_boundary_values (boundary_values, - system_matrix, solution, - system_rhs, false); - // The last argument to the call just - // performed allows for some - // optimizations. It controls - // whether we should also delete the - // column corresponding to a boundary - // node, or keep it (and passing - // true as above means: yes, do - // eliminate the column). If we do, - // then the resulting matrix will be - // symmetric again if it was before; - // if we don't, then it won't. The - // solution of the resulting system - // should be the same, though. The - // only reason why we may want to - // make the system symmetric again is - // that we would like to use the CG - // method, which only works with - // symmetric matrices. Experience - // tells that CG also works (and - // works almost as well) if we don't - // remove the columns associated with - // boundary nodes, which can be - // easily explained by the special - // structure of the - // non-symmetry. Since eliminating - // columns from dense matrices is not - // expensive, though, we let the - // function do it; not doing so is - // more important if the linear - // system is either non-symmetric - // anyway, or we are using the - // non-local version of this function - // (as in all the other example - // programs before) and want to save - // a few cycles during this - // operation. + system_matrix, solution, + system_rhs, false); + // The last argument to the call just + // performed allows for some + // optimizations. It controls + // whether we should also delete the + // column corresponding to a boundary + // node, or keep it (and passing + // true as above means: yes, do + // eliminate the column). If we do, + // then the resulting matrix will be + // symmetric again if it was before; + // if we don't, then it won't. The + // solution of the resulting system + // should be the same, though. The + // only reason why we may want to + // make the system symmetric again is + // that we would like to use the CG + // method, which only works with + // symmetric matrices. Experience + // tells that CG also works (and + // works almost as well) if we don't + // remove the columns associated with + // boundary nodes, which can be + // easily explained by the special + // structure of the + // non-symmetry. Since eliminating + // columns from dense matrices is not + // expensive, though, we let the + // function do it; not doing so is + // more important if the linear + // system is either non-symmetric + // anyway, or we are using the + // non-local version of this function + // (as in all the other example + // programs before) and want to save + // a few cycles during this + // operation. } - // The fourth step is to solve the linear - // system, with its distributed matrix and - // vector objects. Fortunately, PETSc offers - // a variety of sequential and %parallel - // solvers, for which we have written - // wrappers that have almost the same - // interface as is used for the deal.II - // solvers used in all previous example - // programs. + // The fourth step is to solve the linear + // system, with its distributed matrix and + // vector objects. Fortunately, PETSc offers + // a variety of sequential and %parallel + // solvers, for which we have written + // wrappers that have almost the same + // interface as is used for the deal.II + // solvers used in all previous example + // programs. template unsigned int ElasticProblem::solve () { - // First, we have to set up a convergence - // monitor, and assign it the accuracy to - // which we would like to solve the linear - // system. Next, an actual solver object - // using PETSc's CG solver which also works - // with %parallel (distributed) vectors and - // matrices. And finally a preconditioner; - // we choose to use a block Jacobi - // preconditioner which works by computing - // an incomplete LU decomposition on each - // block (i.e. the chunk of matrix that is - // stored on each MPI process). That means - // that if you run the program with only - // one process, then you will use an ILU(0) - // as a preconditioner, while if it is run - // on many processes, then we will have a - // number of blocks on the diagonal and the - // preconditioner is the ILU(0) of each of - // these blocks. + // First, we have to set up a convergence + // monitor, and assign it the accuracy to + // which we would like to solve the linear + // system. Next, an actual solver object + // using PETSc's CG solver which also works + // with %parallel (distributed) vectors and + // matrices. And finally a preconditioner; + // we choose to use a block Jacobi + // preconditioner which works by computing + // an incomplete LU decomposition on each + // block (i.e. the chunk of matrix that is + // stored on each MPI process). That means + // that if you run the program with only + // one process, then you will use an ILU(0) + // as a preconditioner, while if it is run + // on many processes, then we will have a + // number of blocks on the diagonal and the + // preconditioner is the ILU(0) of each of + // these blocks. SolverControl solver_control (solution.size(), - 1e-8*system_rhs.l2_norm()); + 1e-8*system_rhs.l2_norm()); PETScWrappers::SolverCG cg (solver_control, - mpi_communicator); + mpi_communicator); PETScWrappers::PreconditionBlockJacobi preconditioner(system_matrix); - // Then solve the system: + // Then solve the system: cg.solve (system_matrix, solution, system_rhs, - preconditioner); - - // The next step is to distribute hanging - // node constraints. This is a little - // tricky, since to fill in the value of a - // constrained node you need access to the - // values of the nodes to which it is - // constrained (for example, for a Q1 - // element in 2d, we need access to the two - // nodes on the big side of a hanging node - // face, to compute the value of the - // constrained node in the middle). Since - // PETSc (and, for that matter, the MPI - // model on which it is built) does not - // allow to query the value of another node - // in a simple way if we should need it, - // what we do here is to get a copy of the - // distributed vector where we keep all - // elements locally. This is simple, since - // the deal.II wrappers have a conversion - // constructor for the non-MPI vector - // class: + preconditioner); + + // The next step is to distribute hanging + // node constraints. This is a little + // tricky, since to fill in the value of a + // constrained node you need access to the + // values of the nodes to which it is + // constrained (for example, for a Q1 + // element in 2d, we need access to the two + // nodes on the big side of a hanging node + // face, to compute the value of the + // constrained node in the middle). Since + // PETSc (and, for that matter, the MPI + // model on which it is built) does not + // allow to query the value of another node + // in a simple way if we should need it, + // what we do here is to get a copy of the + // distributed vector where we keep all + // elements locally. This is simple, since + // the deal.II wrappers have a conversion + // constructor for the non-MPI vector + // class: PETScWrappers::Vector localized_solution (solution); - // Then we distribute hanging node - // constraints on this local copy, i.e. we - // compute the values of all constrained - // nodes: + // Then we distribute hanging node + // constraints on this local copy, i.e. we + // compute the values of all constrained + // nodes: hanging_node_constraints.distribute (localized_solution); - // Then transfer everything back - // into the global vector. The - // following operation copies those - // elements of the localized - // solution that we store locally - // in the distributed solution, and - // does not touch the other - // ones. Since we do the same - // operation on all processors, we - // end up with a distributed vector - // that has all the constrained - // nodes fixed. + // Then transfer everything back + // into the global vector. The + // following operation copies those + // elements of the localized + // solution that we store locally + // in the distributed solution, and + // does not touch the other + // ones. Since we do the same + // operation on all processors, we + // end up with a distributed vector + // that has all the constrained + // nodes fixed. solution = localized_solution; - // After this has happened, flush the PETSc - // buffers. This may or may not be strictly - // necessary here (the PETSc documentation - // is not very verbose on these things), - // but certainly doesn't hurt either. + // After this has happened, flush the PETSc + // buffers. This may or may not be strictly + // necessary here (the PETSc documentation + // is not very verbose on these things), + // but certainly doesn't hurt either. solution.compress (); - // Finally return the number of iterations - // it took to converge, to allow for some - // output: + // Finally return the number of iterations + // it took to converge, to allow for some + // output: return solver_control.last_step(); } - // Step five is to output the results we - // computed in this iteration. This is - // actually the same as done in step-8 - // before, with two small differences. First, - // all processes call this function, but not - // all of them need to do the work associated - // with generating output. In fact, they - // shouldn't, since we would try to write to - // the same file multiple times at once. So - // we let only the first job do this, and all - // the other ones idle around during this - // time (or start their work for the next - // iteration, or simply yield their CPUs to - // other jobs that happen to run at the same - // time). The second thing is that we not - // only output the solution vector, but also - // a vector that indicates which subdomain - // each cell belongs to. This will make for - // some nice pictures of partitioned domains. - // - // In practice, the present implementation of - // the output function is a major bottleneck - // of this program, since generating - // graphical output is expensive and doing so - // only on one process does, of course, not - // scale if we significantly increase the - // number of processes. In effect, this - // function will consume most of the run-time - // if you go to very large numbers of - // unknowns and processes, and real - // applications should limit the number of - // times they generate output through this - // function. - // - // The solution to this is to have - // each process generate output data - // only for it's own local cells, and - // write them to separate files, one - // file per process. This would - // distribute the work of generating - // the output to all processes - // equally. In a second step, - // separate from running this - // program, we would then take all - // the output files for a given cycle - // and merge these parts into one - // single output file. This has to be - // done sequentially, but can be done - // on a different machine, and should - // be relatively cheap. However, the - // necessary functionality for this - // is not yet implemented in the - // library, and since we are too - // close to the next release, we do - // not want to do such major - // destabilizing changes any - // more. This has been fixed in the - // meantime, though, and a better way - // to do things is explained in the - // step-18 example program. + // Step five is to output the results we + // computed in this iteration. This is + // actually the same as done in step-8 + // before, with two small differences. First, + // all processes call this function, but not + // all of them need to do the work associated + // with generating output. In fact, they + // shouldn't, since we would try to write to + // the same file multiple times at once. So + // we let only the first job do this, and all + // the other ones idle around during this + // time (or start their work for the next + // iteration, or simply yield their CPUs to + // other jobs that happen to run at the same + // time). The second thing is that we not + // only output the solution vector, but also + // a vector that indicates which subdomain + // each cell belongs to. This will make for + // some nice pictures of partitioned domains. + // + // In practice, the present implementation of + // the output function is a major bottleneck + // of this program, since generating + // graphical output is expensive and doing so + // only on one process does, of course, not + // scale if we significantly increase the + // number of processes. In effect, this + // function will consume most of the run-time + // if you go to very large numbers of + // unknowns and processes, and real + // applications should limit the number of + // times they generate output through this + // function. + // + // The solution to this is to have + // each process generate output data + // only for it's own local cells, and + // write them to separate files, one + // file per process. This would + // distribute the work of generating + // the output to all processes + // equally. In a second step, + // separate from running this + // program, we would then take all + // the output files for a given cycle + // and merge these parts into one + // single output file. This has to be + // done sequentially, but can be done + // on a different machine, and should + // be relatively cheap. However, the + // necessary functionality for this + // is not yet implemented in the + // library, and since we are too + // close to the next release, we do + // not want to do such major + // destabilizing changes any + // more. This has been fixed in the + // meantime, though, and a better way + // to do things is explained in the + // step-18 example program. template void ElasticProblem::output_results (const unsigned int cycle) const { - // One point to realize is that when we - // want to generate output on process zero - // only, we need to have access to all - // elements of the solution vector. So we - // need to get a local copy of the - // distributed vector, which is in fact - // simple: + // One point to realize is that when we + // want to generate output on process zero + // only, we need to have access to all + // elements of the solution vector. So we + // need to get a local copy of the + // distributed vector, which is in fact + // simple: const PETScWrappers::Vector localized_solution (solution); - // The thing to notice, however, is that - // we do this localization operation on all - // processes, not only the one that - // actually needs the data. This can't be - // avoided, however, with the communication - // model of MPI: MPI does not have a way to - // query data on another process, both - // sides have to initiate a communication - // at the same time. So even though most of - // the processes do not need the localized - // solution, we have to place the call here - // so that all processes execute it. - // - // (In reality, part of this work can in - // fact be avoided. What we do is send the - // local parts of all processes to all - // other processes. What we would really - // need to do is to initiate an operation - // on all processes where each process - // simply sends its local chunk of data to - // process zero, since this is the only one - // that actually needs it, i.e. we need - // something like a gather operation. PETSc - // can do this, but for simplicity's sake - // we don't attempt to make use of this - // here. We don't, since what we do is not - // very expensive in the grand scheme of - // things: it is one vector communication - // among all processes , which has to be - // compared to the number of communications - // we have to do when solving the linear - // system, setting up the block-ILU for the - // preconditioner, and other operations.) - - // This being done, process zero goes ahead - // with setting up the output file as in - // step-8, and attaching the (localized) - // solution vector to the output - // object:. (The code to generate the output - // file name is stolen and slightly - // modified from step-5, since we expect - // that we can do a number of cycles - // greater than 10, which is the maximum of - // what the code in step-8 could handle.) + // The thing to notice, however, is that + // we do this localization operation on all + // processes, not only the one that + // actually needs the data. This can't be + // avoided, however, with the communication + // model of MPI: MPI does not have a way to + // query data on another process, both + // sides have to initiate a communication + // at the same time. So even though most of + // the processes do not need the localized + // solution, we have to place the call here + // so that all processes execute it. + // + // (In reality, part of this work can in + // fact be avoided. What we do is send the + // local parts of all processes to all + // other processes. What we would really + // need to do is to initiate an operation + // on all processes where each process + // simply sends its local chunk of data to + // process zero, since this is the only one + // that actually needs it, i.e. we need + // something like a gather operation. PETSc + // can do this, but for simplicity's sake + // we don't attempt to make use of this + // here. We don't, since what we do is not + // very expensive in the grand scheme of + // things: it is one vector communication + // among all processes , which has to be + // compared to the number of communications + // we have to do when solving the linear + // system, setting up the block-ILU for the + // preconditioner, and other operations.) + + // This being done, process zero goes ahead + // with setting up the output file as in + // step-8, and attaching the (localized) + // solution vector to the output + // object:. (The code to generate the output + // file name is stolen and slightly + // modified from step-5, since we expect + // that we can do a number of cycles + // greater than 10, which is the maximum of + // what the code in step-8 could handle.) if (this_mpi_process == 0) { - std::ostringstream filename; - filename << "solution-" << cycle << ".gmv"; - - std::ofstream output (filename.str().c_str()); - - DataOut data_out; - data_out.attach_dof_handler (dof_handler); - - std::vector solution_names; - switch (dim) - { - case 1: - solution_names.push_back ("displacement"); - break; - case 2: - solution_names.push_back ("x_displacement"); - solution_names.push_back ("y_displacement"); - break; - case 3: - solution_names.push_back ("x_displacement"); - solution_names.push_back ("y_displacement"); - solution_names.push_back ("z_displacement"); - break; - default: - Assert (false, ExcInternalError()); - } - - data_out.add_data_vector (localized_solution, solution_names); - - // The only thing we do here - // additionally is that we also output - // one value per cell indicating which - // subdomain (i.e. MPI process) it - // belongs to. This requires some - // conversion work, since the data the - // library provides us with is not the - // one the output class expects, but - // this is not difficult. First, set up - // a vector of integers, one per cell, - // that is then filled by the number of - // subdomain each cell is in: - std::vector partition_int (triangulation.n_active_cells()); - GridTools::get_subdomain_association (triangulation, partition_int); - - // Then convert this integer vector - // into a floating point vector just as - // the output functions want to see: - const Vector partitioning(partition_int.begin(), - partition_int.end()); - - // And finally add this vector as well: - data_out.add_data_vector (partitioning, "partitioning"); - - // This all being done, generate the - // intermediate format and write it out - // in GMV output format: - data_out.build_patches (); - data_out.write_gmv (output); + std::ostringstream filename; + filename << "solution-" << cycle << ".gmv"; + + std::ofstream output (filename.str().c_str()); + + DataOut data_out; + data_out.attach_dof_handler (dof_handler); + + std::vector solution_names; + switch (dim) + { + case 1: + solution_names.push_back ("displacement"); + break; + case 2: + solution_names.push_back ("x_displacement"); + solution_names.push_back ("y_displacement"); + break; + case 3: + solution_names.push_back ("x_displacement"); + solution_names.push_back ("y_displacement"); + solution_names.push_back ("z_displacement"); + break; + default: + Assert (false, ExcInternalError()); + } + + data_out.add_data_vector (localized_solution, solution_names); + + // The only thing we do here + // additionally is that we also output + // one value per cell indicating which + // subdomain (i.e. MPI process) it + // belongs to. This requires some + // conversion work, since the data the + // library provides us with is not the + // one the output class expects, but + // this is not difficult. First, set up + // a vector of integers, one per cell, + // that is then filled by the number of + // subdomain each cell is in: + std::vector partition_int (triangulation.n_active_cells()); + GridTools::get_subdomain_association (triangulation, partition_int); + + // Then convert this integer vector + // into a floating point vector just as + // the output functions want to see: + const Vector partitioning(partition_int.begin(), + partition_int.end()); + + // And finally add this vector as well: + data_out.add_data_vector (partitioning, "partitioning"); + + // This all being done, generate the + // intermediate format and write it out + // in GMV output format: + data_out.build_patches (); + data_out.write_gmv (output); } } - // The sixth step is to take the solution - // just computed, and evaluate some kind of - // refinement indicator to refine the - // mesh. The problem is basically the same as - // with distributing hanging node - // constraints: in order to compute the error - // indicator, we need access to all elements - // of the solution vector. We then compute - // the indicators for the cells that belong - // to the present process, but then we need - // to distribute the refinement indicators - // into a distributed vector so that all - // processes have the values of the - // refinement indicator for all cells. But - // then, in order for each process to refine - // its copy of the mesh, they need to have - // acces to all refinement indicators - // locally, so they have to copy the global - // vector back into a local one. That's a - // little convoluted, but thinking about it - // quite straightforward nevertheless. So - // here's how we do it: + // The sixth step is to take the solution + // just computed, and evaluate some kind of + // refinement indicator to refine the + // mesh. The problem is basically the same as + // with distributing hanging node + // constraints: in order to compute the error + // indicator, we need access to all elements + // of the solution vector. We then compute + // the indicators for the cells that belong + // to the present process, but then we need + // to distribute the refinement indicators + // into a distributed vector so that all + // processes have the values of the + // refinement indicator for all cells. But + // then, in order for each process to refine + // its copy of the mesh, they need to have + // acces to all refinement indicators + // locally, so they have to copy the global + // vector back into a local one. That's a + // little convoluted, but thinking about it + // quite straightforward nevertheless. So + // here's how we do it: template void ElasticProblem::refine_grid () { - // So, first part: get a local copy of the - // distributed solution vector. This is - // necessary since the error estimator - // needs to get at the value of neighboring - // cells even if they do not belong to the - // subdomain associated with the present - // MPI process: + // So, first part: get a local copy of the + // distributed solution vector. This is + // necessary since the error estimator + // needs to get at the value of neighboring + // cells even if they do not belong to the + // subdomain associated with the present + // MPI process: const PETScWrappers::Vector localized_solution (solution); - // Second part: set up a vector of error - // indicators for all cells and let the - // Kelly class compute refinement - // indicators for all cells belonging to - // the present subdomain/process. Note that - // the last argument of the call indicates - // which subdomain we are interested - // in. The three arguments before it are - // various other default arguments that one - // usually doesn't need (and doesn't state - // values for, but rather uses the - // defaults), but which we have to state - // here explicitly since we want to modify - // the value of a following argument - // (i.e. the one indicating the subdomain): + // Second part: set up a vector of error + // indicators for all cells and let the + // Kelly class compute refinement + // indicators for all cells belonging to + // the present subdomain/process. Note that + // the last argument of the call indicates + // which subdomain we are interested + // in. The three arguments before it are + // various other default arguments that one + // usually doesn't need (and doesn't state + // values for, but rather uses the + // defaults), but which we have to state + // here explicitly since we want to modify + // the value of a following argument + // (i.e. the one indicating the subdomain): Vector local_error_per_cell (triangulation.n_active_cells()); KellyErrorEstimator::estimate (dof_handler, - QGauss(2), - typename FunctionMap::type(), - localized_solution, - local_error_per_cell, - std::vector(), - 0, - multithread_info.n_default_threads, - this_mpi_process); - - // Now all processes have computed error - // indicators for their own cells and - // stored them in the respective elements - // of the local_error_per_cell - // vector. The elements of this vector for - // cells not on the present process are - // zero. However, since all processes have - // a copy of a copy of the entire - // triangulation and need to keep these - // copies in synch, they need the values of - // refinement indicators for all cells of - // the triangulation. Thus, we need to - // distribute our results. We do this by - // creating a distributed vector where each - // process has its share, and sets the - // elements it has computed. We will then - // later generate a local sequential copy - // of this distributed vector to allow each - // process to access all elements of this - // vector. - // - // So in the first step, we need to set up - // a %parallel vector. For simplicity, every - // process will own a chunk with as many - // elements as this process owns cells, so - // that the first chunk of elements is - // stored with process zero, the next chunk - // with process one, and so on. It is - // important to remark, however, that these - // elements are not necessarily the ones we - // will write to. This is so, since the - // order in which cells are arranged, - // i.e. the order in which the elements of - // the vector correspond to cells, is not - // ordered according to the subdomain these - // cells belong to. In other words, if on - // this process we compute indicators for - // cells of a certain subdomain, we may - // write the results to more or less random - // elements if the distributed vector, that - // do not necessarily lie within the chunk - // of vector we own on the present - // process. They will subsequently have to - // be copied into another process's memory - // space then, an operation that PETSc does - // for us when we call the compress - // function. This inefficiency could be - // avoided with some more code, but we - // refrain from it since it is not a major - // factor in the program's total runtime. - // - // So here's how we do it: count how many - // cells belong to this process, set up a - // distributed vector with that many - // elements to be stored locally, and copy - // over the elements we computed locally, - // then compress the result. In fact, we - // really only copy the elements that are - // nonzero, so we may miss a few that we - // computed to zero, but this won't hurt - // since the original values of the vector - // is zero anyway. + QGauss(2), + typename FunctionMap::type(), + localized_solution, + local_error_per_cell, + std::vector(), + 0, + multithread_info.n_default_threads, + this_mpi_process); + + // Now all processes have computed error + // indicators for their own cells and + // stored them in the respective elements + // of the local_error_per_cell + // vector. The elements of this vector for + // cells not on the present process are + // zero. However, since all processes have + // a copy of a copy of the entire + // triangulation and need to keep these + // copies in synch, they need the values of + // refinement indicators for all cells of + // the triangulation. Thus, we need to + // distribute our results. We do this by + // creating a distributed vector where each + // process has its share, and sets the + // elements it has computed. We will then + // later generate a local sequential copy + // of this distributed vector to allow each + // process to access all elements of this + // vector. + // + // So in the first step, we need to set up + // a %parallel vector. For simplicity, every + // process will own a chunk with as many + // elements as this process owns cells, so + // that the first chunk of elements is + // stored with process zero, the next chunk + // with process one, and so on. It is + // important to remark, however, that these + // elements are not necessarily the ones we + // will write to. This is so, since the + // order in which cells are arranged, + // i.e. the order in which the elements of + // the vector correspond to cells, is not + // ordered according to the subdomain these + // cells belong to. In other words, if on + // this process we compute indicators for + // cells of a certain subdomain, we may + // write the results to more or less random + // elements if the distributed vector, that + // do not necessarily lie within the chunk + // of vector we own on the present + // process. They will subsequently have to + // be copied into another process's memory + // space then, an operation that PETSc does + // for us when we call the compress + // function. This inefficiency could be + // avoided with some more code, but we + // refrain from it since it is not a major + // factor in the program's total runtime. + // + // So here's how we do it: count how many + // cells belong to this process, set up a + // distributed vector with that many + // elements to be stored locally, and copy + // over the elements we computed locally, + // then compress the result. In fact, we + // really only copy the elements that are + // nonzero, so we may miss a few that we + // computed to zero, but this won't hurt + // since the original values of the vector + // is zero anyway. const unsigned int n_local_cells = GridTools::count_cells_with_subdomain_association (triangulation, - this_mpi_process); + this_mpi_process); PETScWrappers::MPI::Vector distributed_all_errors (mpi_communicator, - triangulation.n_active_cells(), - n_local_cells); + triangulation.n_active_cells(), + n_local_cells); for (unsigned int i=0; i localized_all_errors (distributed_all_errors); - // ...which we can the subsequently use to - // finally refine the grid: + // ...which we can the subsequently use to + // finally refine the grid: GridRefinement::refine_and_coarsen_fixed_number (triangulation, - localized_all_errors, - 0.3, 0.03); + localized_all_errors, + 0.3, 0.03); triangulation.execute_coarsening_and_refinement (); } - // Lastly, here is the driver function. It is - // almost unchanged from step-8, with the - // exception that we replace std::cout by - // the pcout stream. Apart from this, the - // only other cosmetic change is that we - // output how many degrees of freedom there - // are per process, and how many iterations - // it took for the linear solver to converge: + // Lastly, here is the driver function. It is + // almost unchanged from step-8, with the + // exception that we replace std::cout by + // the pcout stream. Apart from this, the + // only other cosmetic change is that we + // output how many degrees of freedom there + // are per process, and how many iterations + // it took for the linear solver to converge: template void ElasticProblem::run () { for (unsigned int cycle=0; cycle<10; ++cycle) { - pcout << "Cycle " << cycle << ':' << std::endl; - - if (cycle == 0) - { - GridGenerator::hyper_cube (triangulation, -1, 1); - triangulation.refine_global (3); - } - else - refine_grid (); - - pcout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl; - - setup_system (); - - pcout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << " (by partition:"; - for (unsigned int p=0; p #include - // As mentioned in the first few tutorial - // programs, all names in deal.II are - // declared in a namespace - // dealii. To make using these - // function and class names simpler, we - // import the entire content of that - // namespace into the global scope. As done - // for all previous programs already, we'll - // also place everything we do here into a - // namespace of its own: + // As mentioned in the first few tutorial + // programs, all names in deal.II are + // declared in a namespace + // dealii. To make using these + // function and class names simpler, we + // import the entire content of that + // namespace into the global scope. As done + // for all previous programs already, we'll + // also place everything we do here into a + // namespace of its own: namespace Step19 { using namespace dealii; - // Before we start with the actual program, - // let us declare a few global variables that - // will be used to hold the parameters this - // program is going to use. Usually, global - // variables are frowned upon for a good - // reason, but since we have such a short - // program here that does only a single - // thing, we may stray from our usual line - // and make these variables global, rather - // than passing them around to all functions - // or encapsulating them into a class. - // - // The variables we have are: first, an - // object that will hold parameters of - // operation, such as output format (unless - // given on the command line); second, the - // names of input and output files; and third, - // the format in which the output is to be - // written: + // Before we start with the actual program, + // let us declare a few global variables that + // will be used to hold the parameters this + // program is going to use. Usually, global + // variables are frowned upon for a good + // reason, but since we have such a short + // program here that does only a single + // thing, we may stray from our usual line + // and make these variables global, rather + // than passing them around to all functions + // or encapsulating them into a class. + // + // The variables we have are: first, an + // object that will hold parameters of + // operation, such as output format (unless + // given on the command line); second, the + // names of input and output files; and third, + // the format in which the output is to be + // written: ParameterHandler prm; std::vector input_file_names; std::string output_file; std::string output_format; - // All the stuff this program does can be - // done from here on. As described in the - // introduction, what we have to do is - // declare what values the parameter file can - // have, parse the command line, read the - // input files, then write the output. We - // will do this in this order of operation, - // but before that let us declare a function - // that prints a message about how this - // program is to be used; the function first - // prints a general message, and then goes on - // to list the parameters that are allowed in - // the parameter file (the - // ParameterHandler class has a function - // to do exactly this; see the results - // section for what it prints): + // All the stuff this program does can be + // done from here on. As described in the + // introduction, what we have to do is + // declare what values the parameter file can + // have, parse the command line, read the + // input files, then write the output. We + // will do this in this order of operation, + // but before that let us declare a function + // that prints a message about how this + // program is to be used; the function first + // prints a general message, and then goes on + // to list the parameters that are allowed in + // the parameter file (the + // ParameterHandler class has a function + // to do exactly this; see the results + // section for what it prints): void print_usage_message () { @@ -106,383 +106,383 @@ namespace Step19 } - // @sect4{Declaring parameters for the input file} - - // The second function is used to declare the - // parameters this program accepts from the - // input file. While we don't actually take - // many parameters from the input file except - // for, possibly, the output file name and - // format, we nevertheless want to show how - // to work with parameter files. - // - // In short, the ParameterHandler class - // works as follows: one declares the entries - // of parameters that can be given in input - // files together, and later on one can read - // an input file in which these parameters - // are set to their values. If a parameter is - // not listed in the input file, the default - // value specified in the declaration of that - // parameter is used. After that, the program - // can query the values assigned to certain - // parameters from the ParameterHandler - // object. - // - // Declaring parameters can be done using the - // ParameterHandler::declare_entry - // function. It's arguments are the name of a - // parameter, a default value (given as a - // string, even if the parameter is numeric - // in nature, and thirdly an object that - // describes constraints on values that may - // be passed to this parameter. In the - // example below, we use an object of type - // Patterns::Anything to denote that - // there are no constraints on file names - // (this is, of course, not true -- the - // operating system does have constraints, - // but from an application standpoint, almost - // all names are valid). In other cases, one - // may, for example, use - // Patterns::Integer to make sure that - // only parameters are accepted that can be - // interpreted as integer values (it is also - // possible to specify bounds for integer - // values, and all values outside this range - // are rejected), Patterns::Double for - // floating point values, classes that make - // sure that the given parameter value is a - // comma separated list of things, etc. Take - // a look at the Patterns namespace to - // see what is possible. - // - // The fourth argument to declare_entry - // is a help string that can be printed to - // document what this parameter is meant to - // be used for and other information you may - // consider important when declaring this - // parameter. The default value of this - // fourth argument is the empty string. - // - // I always wanted to have an example program - // describing the ParameterHandler class, - // because it is so particularly useful. It - // would have been useful in a number of - // previous example programs (for example, in - // order to let the tolerance for linear - // solvers, or the number of refinement steps - // be determined by a run-time parameter, - // rather than hard-coding them into the - // program), but it turned out that trying to - // explain this class there would have - // overloaded them with things that would - // have distracted from the main - // purpose. However, while writing this - // program, I realized that there aren't all - // that many parameters this program can - // usefully ask for, or better, it turned - // out: declaring and querying these - // parameters was already done centralized in - // one place of the libray, namely the - // DataOutInterface class that handles - // exactly this -- managing parameters for - // input and output. - // - // So the second function call in this - // function is to let the - // DataOutInterface declare a good number - // of parameters that control everything from - // the output format to what kind of output - // should be generated if output is written - // in a specific graphical format. For - // example, when writing data in encapsulated - // postscript (EPS) format, the result is - // just a 2d projection, not data that can be - // viewed and rotated with a - // viewer. Therefore, one has to choose the - // viewing angle and a number of other - // options up front, when output is - // generated, rather than playing around with - // them later on. The call to - // DataOutInterface::declare_parameters - // declares entries that allow to specify - // them in the parameter input file during - // run-time. If the parameter file does not - // contain entries for them, defaults are - // taken. - // - // As a final note: DataOutInterface is a - // template, because it is usually used to - // write output for a specific space - // dimension. However, this program is - // supposed to be used for all dimensions at - // the same time, so we don't know at compile - // time what the right dimension is when - // specifying the template - // parameter. Fortunately, declaring - // parameters is something that is space - // dimension independent, so we can just pick - // one arbitrarily. We pick 1, but it - // could have been any other number as well. + // @sect4{Declaring parameters for the input file} + + // The second function is used to declare the + // parameters this program accepts from the + // input file. While we don't actually take + // many parameters from the input file except + // for, possibly, the output file name and + // format, we nevertheless want to show how + // to work with parameter files. + // + // In short, the ParameterHandler class + // works as follows: one declares the entries + // of parameters that can be given in input + // files together, and later on one can read + // an input file in which these parameters + // are set to their values. If a parameter is + // not listed in the input file, the default + // value specified in the declaration of that + // parameter is used. After that, the program + // can query the values assigned to certain + // parameters from the ParameterHandler + // object. + // + // Declaring parameters can be done using the + // ParameterHandler::declare_entry + // function. It's arguments are the name of a + // parameter, a default value (given as a + // string, even if the parameter is numeric + // in nature, and thirdly an object that + // describes constraints on values that may + // be passed to this parameter. In the + // example below, we use an object of type + // Patterns::Anything to denote that + // there are no constraints on file names + // (this is, of course, not true -- the + // operating system does have constraints, + // but from an application standpoint, almost + // all names are valid). In other cases, one + // may, for example, use + // Patterns::Integer to make sure that + // only parameters are accepted that can be + // interpreted as integer values (it is also + // possible to specify bounds for integer + // values, and all values outside this range + // are rejected), Patterns::Double for + // floating point values, classes that make + // sure that the given parameter value is a + // comma separated list of things, etc. Take + // a look at the Patterns namespace to + // see what is possible. + // + // The fourth argument to declare_entry + // is a help string that can be printed to + // document what this parameter is meant to + // be used for and other information you may + // consider important when declaring this + // parameter. The default value of this + // fourth argument is the empty string. + // + // I always wanted to have an example program + // describing the ParameterHandler class, + // because it is so particularly useful. It + // would have been useful in a number of + // previous example programs (for example, in + // order to let the tolerance for linear + // solvers, or the number of refinement steps + // be determined by a run-time parameter, + // rather than hard-coding them into the + // program), but it turned out that trying to + // explain this class there would have + // overloaded them with things that would + // have distracted from the main + // purpose. However, while writing this + // program, I realized that there aren't all + // that many parameters this program can + // usefully ask for, or better, it turned + // out: declaring and querying these + // parameters was already done centralized in + // one place of the libray, namely the + // DataOutInterface class that handles + // exactly this -- managing parameters for + // input and output. + // + // So the second function call in this + // function is to let the + // DataOutInterface declare a good number + // of parameters that control everything from + // the output format to what kind of output + // should be generated if output is written + // in a specific graphical format. For + // example, when writing data in encapsulated + // postscript (EPS) format, the result is + // just a 2d projection, not data that can be + // viewed and rotated with a + // viewer. Therefore, one has to choose the + // viewing angle and a number of other + // options up front, when output is + // generated, rather than playing around with + // them later on. The call to + // DataOutInterface::declare_parameters + // declares entries that allow to specify + // them in the parameter input file during + // run-time. If the parameter file does not + // contain entries for them, defaults are + // taken. + // + // As a final note: DataOutInterface is a + // template, because it is usually used to + // write output for a specific space + // dimension. However, this program is + // supposed to be used for all dimensions at + // the same time, so we don't know at compile + // time what the right dimension is when + // specifying the template + // parameter. Fortunately, declaring + // parameters is something that is space + // dimension independent, so we can just pick + // one arbitrarily. We pick 1, but it + // could have been any other number as well. void declare_parameters () { prm.declare_entry ("Output file", "", - Patterns::Anything(), - "The name of the output file to be generated"); + Patterns::Anything(), + "The name of the output file to be generated"); DataOutInterface<1>::declare_parameters (prm); - // Since everything that this program can - // usefully request in terms of input - // parameters is already handled by now, - // let us nevertheless show how to use - // input parameters in other - // circumstances. First, parameters are - // like files in a directory tree: they can - // be in the top-level directory, but you - // can also group them into subdirectories - // to make it easier to find them or to be - // able to use the same parameter name in - // different contexts. - // - // Let us first declare a dummy parameter - // in the top-level section; we assume that - // it will denote the number of iterations, - // and that useful numbers of iterations - // that a user should be able to specify - // are in the range 1...1000, with a - // default value of 42: + // Since everything that this program can + // usefully request in terms of input + // parameters is already handled by now, + // let us nevertheless show how to use + // input parameters in other + // circumstances. First, parameters are + // like files in a directory tree: they can + // be in the top-level directory, but you + // can also group them into subdirectories + // to make it easier to find them or to be + // able to use the same parameter name in + // different contexts. + // + // Let us first declare a dummy parameter + // in the top-level section; we assume that + // it will denote the number of iterations, + // and that useful numbers of iterations + // that a user should be able to specify + // are in the range 1...1000, with a + // default value of 42: prm.declare_entry ("Dummy iterations", "42", - Patterns::Integer (1,1000), - "A dummy parameter asking for an integer"); - - // Next, let us declare a sub-section (the - // equivalent to a subdirectory). When - // entered, all following parameter - // declarations will be within this - // subsection. To also visually group these - // declarations with the subsection name, I - // like to use curly braces to force my - // editor to indent everything that goes - // into this sub-section by one level of - // indentation. In this sub-section, we - // shall have two entries, one that takes a - // boolean parameter and one that takes a - // selection list of values, separated by - // the '|' character: + Patterns::Integer (1,1000), + "A dummy parameter asking for an integer"); + + // Next, let us declare a sub-section (the + // equivalent to a subdirectory). When + // entered, all following parameter + // declarations will be within this + // subsection. To also visually group these + // declarations with the subsection name, I + // like to use curly braces to force my + // editor to indent everything that goes + // into this sub-section by one level of + // indentation. In this sub-section, we + // shall have two entries, one that takes a + // boolean parameter and one that takes a + // selection list of values, separated by + // the '|' character: prm.enter_subsection ("Dummy subsection"); { prm.declare_entry ("Dummy generate output", "true", - Patterns::Bool(), - "A dummy parameter that can be fed with either " - "'true' or 'false'"); + Patterns::Bool(), + "A dummy parameter that can be fed with either " + "'true' or 'false'"); prm.declare_entry ("Dummy color of output", "red", - Patterns::Selection("red|black|blue"), - "A dummy parameter that shows how one can define a " - "parameter that can be assigned values from a finite " - "set of values"); + Patterns::Selection("red|black|blue"), + "A dummy parameter that shows how one can define a " + "parameter that can be assigned values from a finite " + "set of values"); } prm.leave_subsection (); - // After this, we have left the subsection - // again. You should have gotten the idea - // by now how one can nest subsections to - // separate parameters. There are a number - // of other possible patterns describing - // possible values of parameters; in all - // cases, if you try to pass a parameter to - // the program that does not match the - // expectations of the pattern, it will - // reject the parameter file and ask you to - // fix it. After all, it does not make much - // sense if you had an entry that contained - // the entry "red" for the parameter - // "Generate output". + // After this, we have left the subsection + // again. You should have gotten the idea + // by now how one can nest subsections to + // separate parameters. There are a number + // of other possible patterns describing + // possible values of parameters; in all + // cases, if you try to pass a parameter to + // the program that does not match the + // expectations of the pattern, it will + // reject the parameter file and ask you to + // fix it. After all, it does not make much + // sense if you had an entry that contained + // the entry "red" for the parameter + // "Generate output". } - // @sect4{Parsing the command line} - - // Our next task is to see what information - // has been provided on the command - // line. First, we need to be sure that there - // is at least one parameter: an input - // file. The format and the output file can - // be specified in the parameter file, but - // the list of input files can't, so at least - // one parameter needs to be there. Together - // with the name of the program (the zeroth - // parameter), argc must therefore be at - // least 2. If this is not the case, we print - // an error message and exit: + // @sect4{Parsing the command line} + + // Our next task is to see what information + // has been provided on the command + // line. First, we need to be sure that there + // is at least one parameter: an input + // file. The format and the output file can + // be specified in the parameter file, but + // the list of input files can't, so at least + // one parameter needs to be there. Together + // with the name of the program (the zeroth + // parameter), argc must therefore be at + // least 2. If this is not the case, we print + // an error message and exit: void parse_command_line (const int argc, - char *const * argv) + char *const * argv) { if (argc < 2) { - print_usage_message (); - exit (1); + print_usage_message (); + exit (1); } - // Next, collect all parameters in a list - // that will be somewhat simpler to handle - // than the argc/argv mechanism. We - // omit the name of the executable at the - // zeroth index: + // Next, collect all parameters in a list + // that will be somewhat simpler to handle + // than the argc/argv mechanism. We + // omit the name of the executable at the + // zeroth index: std::list args; for (int i=1; i-p, then there must be a - // parameter file following (which - // we should then read), in case of - // -x it is the name of an - // output format. Finally, for - // -o it is the name of the - // output file. In all cases, once - // we've treated a parameter, we - // remove it from the list of - // parameters: + // Then process all these + // parameters. If the parameter is + // -p, then there must be a + // parameter file following (which + // we should then read), in case of + // -x it is the name of an + // output format. Finally, for + // -o it is the name of the + // output file. In all cases, once + // we've treated a parameter, we + // remove it from the list of + // parameters: while (args.size()) { - if (args.front() == std::string("-p")) - { - if (args.size() == 1) - { - std::cerr << "Error: flag '-p' must be followed by the " - << "name of a parameter file." - << std::endl; - print_usage_message (); - exit (1); - } - args.pop_front (); - const std::string parameter_file = args.front (); - args.pop_front (); - - // Now read the input file: - prm.read_input (parameter_file); - - // Both the output file name as - // well as the format can be - // specified on the command - // line. We have therefore given - // them global variables that hold - // their values, but they can also - // be set in the parameter file. We - // therefore need to extract them - // from the parameter file here, - // because they may be overridden - // by later command line - // parameters: - if (output_file == "") - output_file = prm.get ("Output file"); - - if (output_format == "") - output_format = prm.get ("Output format"); - - // Finally, let us note that if we - // were interested in the values of - // the parameters declared above in - // the dummy subsection, we would - // write something like this to - // extract the value of the boolean - // flag (the prm.get function - // returns the value of a parameter - // as a string, whereas the - // prm.get_X functions return a - // value already converted to a - // different type): - prm.enter_subsection ("Dummy subsection"); - { - prm.get_bool ("Dummy generate output"); - } - prm.leave_subsection (); - // We would assign the result to a - // variable, or course, but don't - // here in order not to generate an - // unused variable that the - // compiler might warn about. - // - // Alas, let's move on to handling - // of output formats: - } - else if (args.front() == std::string("-x")) - { - if (args.size() == 1) - { - std::cerr << "Error: flag '-x' must be followed by the " - << "name of an output format." - << std::endl; - print_usage_message (); - exit (1); - } - args.pop_front (); - output_format = args.front(); - args.pop_front (); - } - else if (args.front() == std::string("-o")) - { - if (args.size() == 1) - { - std::cerr << "Error: flag '-o' must be followed by the " - << "name of an output file." - << std::endl; - print_usage_message (); - exit (1); - } - args.pop_front (); - output_file = args.front(); - args.pop_front (); - } - - // Otherwise, this is not a parameter - // that starts with a known minus - // sequence, and we should consider it - // to be the name of an input file. Let - // us therefore add this file to the - // list of input files: - else - { - input_file_names.push_back (args.front()); - args.pop_front (); - } + if (args.front() == std::string("-p")) + { + if (args.size() == 1) + { + std::cerr << "Error: flag '-p' must be followed by the " + << "name of a parameter file." + << std::endl; + print_usage_message (); + exit (1); + } + args.pop_front (); + const std::string parameter_file = args.front (); + args.pop_front (); + + // Now read the input file: + prm.read_input (parameter_file); + + // Both the output file name as + // well as the format can be + // specified on the command + // line. We have therefore given + // them global variables that hold + // their values, but they can also + // be set in the parameter file. We + // therefore need to extract them + // from the parameter file here, + // because they may be overridden + // by later command line + // parameters: + if (output_file == "") + output_file = prm.get ("Output file"); + + if (output_format == "") + output_format = prm.get ("Output format"); + + // Finally, let us note that if we + // were interested in the values of + // the parameters declared above in + // the dummy subsection, we would + // write something like this to + // extract the value of the boolean + // flag (the prm.get function + // returns the value of a parameter + // as a string, whereas the + // prm.get_X functions return a + // value already converted to a + // different type): + prm.enter_subsection ("Dummy subsection"); + { + prm.get_bool ("Dummy generate output"); + } + prm.leave_subsection (); + // We would assign the result to a + // variable, or course, but don't + // here in order not to generate an + // unused variable that the + // compiler might warn about. + // + // Alas, let's move on to handling + // of output formats: + } + else if (args.front() == std::string("-x")) + { + if (args.size() == 1) + { + std::cerr << "Error: flag '-x' must be followed by the " + << "name of an output format." + << std::endl; + print_usage_message (); + exit (1); + } + args.pop_front (); + output_format = args.front(); + args.pop_front (); + } + else if (args.front() == std::string("-o")) + { + if (args.size() == 1) + { + std::cerr << "Error: flag '-o' must be followed by the " + << "name of an output file." + << std::endl; + print_usage_message (); + exit (1); + } + args.pop_front (); + output_file = args.front(); + args.pop_front (); + } + + // Otherwise, this is not a parameter + // that starts with a known minus + // sequence, and we should consider it + // to be the name of an input file. Let + // us therefore add this file to the + // list of input files: + else + { + input_file_names.push_back (args.front()); + args.pop_front (); + } } - // Next check a few things and create - // errors if the checks fail. Firstly, - // there must be at least one input file + // Next check a few things and create + // errors if the checks fail. Firstly, + // there must be at least one input file if (input_file_names.size() == 0) { - std::cerr << "Error: No input file specified." << std::endl; - print_usage_message (); - exit (1); + std::cerr << "Error: No input file specified." << std::endl; + print_usage_message (); + exit (1); } } - // @sect4{Generating output} - - // Now that we have all the information, we - // need to read all the input files, merge - // them, and generate a single output - // file. This, after all, was the motivation, - // borne from the necessity encountered in - // the step-18 tutorial program, to write - // this program in the first place. - // - // So what we do first is to declare an - // object into which we will merge the data - // from all the input file, and read in the - // first file through a stream. Note that - // every time we open a file, we use the - // AssertThrow macro to check whether the - // file is really readable -- if it isn't - // then this will trigger an exception and - // corresponding output will be generated - // from the exception handler in main(): + // @sect4{Generating output} + + // Now that we have all the information, we + // need to read all the input files, merge + // them, and generate a single output + // file. This, after all, was the motivation, + // borne from the necessity encountered in + // the step-18 tutorial program, to write + // this program in the first place. + // + // So what we do first is to declare an + // object into which we will merge the data + // from all the input file, and read in the + // first file through a stream. Note that + // every time we open a file, we use the + // AssertThrow macro to check whether the + // file is really readable -- if it isn't + // then this will trigger an exception and + // corresponding output will be generated + // from the exception handler in main(): template void do_convert () { @@ -495,85 +495,85 @@ namespace Step19 merged_data.read (input); } - // For all the other input files, we read - // their data into an intermediate object, - // and then merge that into the first - // object declared above: + // For all the other input files, we read + // their data into an intermediate object, + // and then merge that into the first + // object declared above: for (unsigned int i=1; i additional_data; - additional_data.read (input); - merged_data.merge (additional_data); + DataOutReader additional_data; + additional_data.read (input); + merged_data.merge (additional_data); } - // Once we have this, let us open an output - // stream, and parse what we got as the - // name of the output format into an - // identifier. Fortunately, the - // DataOutBase class has a function - // that does this parsing for us, i.e. it - // knows about all the presently supported - // output formats and makes sure that they - // can be specified in the parameter file - // or on the command line. Note that this - // ensures that if the library acquires the - // ability to output in other output - // formats, this program will be able to - // make use of this ability without having - // to be changed! + // Once we have this, let us open an output + // stream, and parse what we got as the + // name of the output format into an + // identifier. Fortunately, the + // DataOutBase class has a function + // that does this parsing for us, i.e. it + // knows about all the presently supported + // output formats and makes sure that they + // can be specified in the parameter file + // or on the command line. Note that this + // ensures that if the library acquires the + // ability to output in other output + // formats, this program will be able to + // make use of this ability without having + // to be changed! std::ofstream output_stream (output_file.c_str()); AssertThrow (output_stream, ExcIO()); const DataOutBase::OutputFormat format = DataOutBase::parse_output_format (output_format); - // Finally, write the merged data to the - // output: + // Finally, write the merged data to the + // output: merged_data.write(output_stream, format); } - // @sect4{Dispatching output generation} - - // The function above takes template - // parameters relating to the space dimension - // of the output, and the dimension of the - // objects to be output. (For example, when - // outputting whole cells, these two - // dimensions are the same, but the - // intermediate files may contain only data - // pertaining to the faces of cells, in which - // case the first parameter will be one less - // than the space dimension.) - // - // The problem is: at compile time, we of - // course don't know the dimensions used in - // the input files. We have to plan for all - // cases, therefore. This is a little clumsy, - // since we need to specify the dimensions - // statically at compile time, even though we - // will only know about them at run time. - // - // So here is what we do: from the first - // input file, we determine (using a function - // in DataOutBase that exists for this - // purpose) these dimensions. We then have a - // series of switches that dispatch, - // statically, to the do_convert - // functions with different template - // arguments. Not pretty, but works. Apart - // from this, the function does nothing -- - // except making sure that it covered the - // dimensions for which it was called, using - // the AssertThrow macro at places in the - // code that shouldn't be reached: + // @sect4{Dispatching output generation} + + // The function above takes template + // parameters relating to the space dimension + // of the output, and the dimension of the + // objects to be output. (For example, when + // outputting whole cells, these two + // dimensions are the same, but the + // intermediate files may contain only data + // pertaining to the faces of cells, in which + // case the first parameter will be one less + // than the space dimension.) + // + // The problem is: at compile time, we of + // course don't know the dimensions used in + // the input files. We have to plan for all + // cases, therefore. This is a little clumsy, + // since we need to specify the dimensions + // statically at compile time, even though we + // will only know about them at run time. + // + // So here is what we do: from the first + // input file, we determine (using a function + // in DataOutBase that exists for this + // purpose) these dimensions. We then have a + // series of switches that dispatch, + // statically, to the do_convert + // functions with different template + // arguments. Not pretty, but works. Apart + // from this, the function does nothing -- + // except making sure that it covered the + // dimensions for which it was called, using + // the AssertThrow macro at places in the + // code that shouldn't be reached: void convert () { AssertThrow (input_file_names.size() > 0, - ExcMessage ("No input files specified.")); + ExcMessage ("No input files specified.")); std::ifstream input(input_file_names[0].c_str()); AssertThrow (input, ExcIO()); @@ -583,40 +583,40 @@ namespace Step19 switch (dimensions.first) { - case 1: - switch (dimensions.second) - { - case 1: - do_convert <1,1> (); - return; - - case 2: - do_convert <1,2> (); - return; - } - AssertThrow (false, ExcNotImplemented()); - - case 2: - switch (dimensions.second) - { - case 2: - do_convert <2,2> (); - return; - - case 3: - do_convert <2,3> (); - return; - } - AssertThrow (false, ExcNotImplemented()); - - case 3: - switch (dimensions.second) - { - case 3: - do_convert <3,3> (); - return; - } - AssertThrow (false, ExcNotImplemented()); + case 1: + switch (dimensions.second) + { + case 1: + do_convert <1,1> (); + return; + + case 2: + do_convert <1,2> (); + return; + } + AssertThrow (false, ExcNotImplemented()); + + case 2: + switch (dimensions.second) + { + case 2: + do_convert <2,2> (); + return; + + case 3: + do_convert <2,3> (); + return; + } + AssertThrow (false, ExcNotImplemented()); + + case 3: + switch (dimensions.second) + { + case 3: + do_convert <3,3> (); + return; + } + AssertThrow (false, ExcNotImplemented()); } AssertThrow (false, ExcNotImplemented()); @@ -625,18 +625,18 @@ namespace Step19 - // @sect4{main()} + // @sect4{main()} - // Finally, the main program. There is not - // much more to do than to make sure - // parameters are declared, the command line - // is parsed (which includes reading - // parameter files), and finally making sure - // the input files are read and output is - // generated. Everything else just has to do - // with handling exceptions and making sure - // that appropriate output is generated if - // one is thrown. + // Finally, the main program. There is not + // much more to do than to make sure + // parameters are declared, the command line + // is parsed (which includes reading + // parameter files), and finally making sure + // the input files are read and output is + // generated. Everything else just has to do + // with handling exceptions and making sure + // that appropriate output is generated if + // one is thrown. int main (int argc, char ** argv) { try @@ -651,25 +651,25 @@ int main (int argc, char ** argv) catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; }; diff --git a/deal.II/examples/step-2/step-2.cc b/deal.II/examples/step-2/step-2.cc index c9e00366fb..ead86f462b 100644 --- a/deal.II/examples/step-2/step-2.cc +++ b/deal.II/examples/step-2/step-2.cc @@ -9,77 +9,77 @@ /* to the file deal.II/doc/license.html for the text and */ /* further information on this license. */ - // The first few includes are just - // like in the previous program, so - // do not require additional comments: + // The first few includes are just + // like in the previous program, so + // do not require additional comments: #include #include #include #include #include - // However, the next file is new. We need - // this include file for the association of - // degrees of freedom ("DoF"s) to vertices, - // lines, and cells: + // However, the next file is new. We need + // this include file for the association of + // degrees of freedom ("DoF"s) to vertices, + // lines, and cells: #include - // The following include contains the - // description of the bilinear finite - // element, including the facts that - // it has one degree of freedom on - // each vertex of the triangulation, - // but none on faces and none in the - // interior of the cells. - // - // (In fact, the file contains the - // description of Lagrange elements in - // general, i.e. also the quadratic, cubic, - // etc versions, and not only for 2d but also - // 1d and 3d.) + // The following include contains the + // description of the bilinear finite + // element, including the facts that + // it has one degree of freedom on + // each vertex of the triangulation, + // but none on faces and none in the + // interior of the cells. + // + // (In fact, the file contains the + // description of Lagrange elements in + // general, i.e. also the quadratic, cubic, + // etc versions, and not only for 2d but also + // 1d and 3d.) #include - // In the following file, several - // tools for manipulating degrees of - // freedom can be found: + // In the following file, several + // tools for manipulating degrees of + // freedom can be found: #include - // We will use a sparse matrix to - // visualize the pattern of nonzero - // entries resulting from the - // distribution of degrees of freedom - // on the grid. That class can be - // found here: + // We will use a sparse matrix to + // visualize the pattern of nonzero + // entries resulting from the + // distribution of degrees of freedom + // on the grid. That class can be + // found here: #include - // We will also need to use an - // intermediate sparsity patter - // structure, which is found in this - // file: + // We will also need to use an + // intermediate sparsity patter + // structure, which is found in this + // file: #include - // We will want to use a special - // algorithm to renumber degrees of - // freedom. It is declared here: + // We will want to use a special + // algorithm to renumber degrees of + // freedom. It is declared here: #include - // And this is again needed for C++ output: + // And this is again needed for C++ output: #include - // Finally, as in step-1, we import - // the deal.II namespace into the - // global scope: + // Finally, as in step-1, we import + // the deal.II namespace into the + // global scope: using namespace dealii; // @sect3{Mesh generation} - // This is the function that produced the - // circular grid in the previous step-1 - // example program. The sole difference is - // that it returns the grid it produces via - // its argument. - // - // The details of what the function does are - // explained in step-1. The only thing we - // would like to comment on is this: - // + // This is the function that produced the + // circular grid in the previous step-1 + // example program. The sole difference is + // that it returns the grid it produces via + // its argument. + // + // The details of what the function does are + // explained in step-1. The only thing we + // would like to comment on is this: + // // Since we want to export the triangulation // through this function's parameter, we need // to make sure that the boundary object @@ -98,9 +98,9 @@ void make_grid (Triangulation<2> &triangulation) { const Point<2> center (1,0); const double inner_radius = 0.5, - outer_radius = 1.0; + outer_radius = 1.0; GridGenerator::hyper_shell (triangulation, - center, inner_radius, outer_radius, + center, inner_radius, outer_radius, 10); static const HyperShellBoundary<2> boundary_description(center); @@ -109,23 +109,23 @@ void make_grid (Triangulation<2> &triangulation) for (unsigned int step=0; step<5; ++step) { Triangulation<2>::active_cell_iterator - cell = triangulation.begin_active(), - endc = triangulation.end(); + cell = triangulation.begin_active(), + endc = triangulation.end(); for (; cell!=endc; ++cell) - for (unsigned int v=0; - v < GeometryInfo<2>::vertices_per_cell; - ++v) - { + for (unsigned int v=0; + v < GeometryInfo<2>::vertices_per_cell; + ++v) + { const double distance_from_center = center.distance (cell->vertex(v)); - if (std::fabs(distance_from_center - inner_radius) < 1e-10) - { - cell->set_refine_flag (); - break; - } - } + if (std::fabs(distance_from_center - inner_radius) < 1e-10) + { + cell->set_refine_flag (); + break; + } + } triangulation.execute_coarsening_and_refinement (); } @@ -133,20 +133,20 @@ void make_grid (Triangulation<2> &triangulation) // @sect3{Creation of a DoFHandler} - // Up to now, we only have a grid, i.e. some - // geometrical (the position of the vertices) - // and some topological information (how - // vertices are connected to lines, and lines - // to cells, as well as which cells neighbor - // which other cells). To use numerical - // algorithms, one needs some logic - // information in addition to that: we would - // like to associate degree of freedom - // numbers to each vertex (or line, or cell, - // in case we were using higher order - // elements) to later generate matrices and - // vectors which describe a finite element - // field on the triangulation. + // Up to now, we only have a grid, i.e. some + // geometrical (the position of the vertices) + // and some topological information (how + // vertices are connected to lines, and lines + // to cells, as well as which cells neighbor + // which other cells). To use numerical + // algorithms, one needs some logic + // information in addition to that: we would + // like to associate degree of freedom + // numbers to each vertex (or line, or cell, + // in case we were using higher order + // elements) to later generate matrices and + // vectors which describe a finite element + // field on the triangulation. // // This function shows how to do this. The // object to consider is the DoFHandler @@ -203,193 +203,193 @@ void distribute_dofs (DoFHandler<2> &dof_handler) static const FE_Q<2> finite_element(1); dof_handler.distribute_dofs (finite_element); - // Now that we have associated a degree of - // freedom with a global number to each - // vertex, we wonder how to visualize this? - // There is no simple way to directly - // visualize the DoF number associated with - // each vertex. However, such information - // would hardly ever be truly important, - // since the numbering itself is more or - // less arbitrary. There are more important - // factors, of which we will demonstrate - // one in the following. - // - // Associated with each vertex of the - // triangulation is a shape - // function. Assume we want to solve - // something like Laplace's equation, then - // the different matrix entries will be the - // integrals over the gradient of each pair - // of such shape functions. Obviously, - // since the shape functions are nonzero - // only on the cells adjacent to the vertex - // they are associated with, matrix entries - // will be nonzero only if the supports of - // the shape functions associated to that - // column and row %numbers intersect. This - // is only the case for adjacent shape - // functions, and therefore only for - // adjacent vertices. Now, since the - // vertices are numbered more or less - // randomly by the above function - // (DoFHandler::distribute_dofs), the - // pattern of nonzero entries in the matrix - // will be somewhat ragged, and we will - // take a look at it now. - // - // First we have to create a - // structure which we use to store - // the places of nonzero - // elements. This can then later be - // used by one or more sparse - // matrix objects that store the - // values of the entries in the - // locations stored by this - // sparsity pattern. The class that - // stores the locations is the - // SparsityPattern class. As it - // turns out, however, this class - // has some drawbacks when we try - // to fill it right away: its data - // structures are set up in such a - // way that we need to have an - // estimate for the maximal number - // of entries we may wish to have - // in each row. In two space - // dimensions, reasonable values - // for this estimate are available - // through the - // DoFHandler::max_couplings_between_dofs() - // function, but in three - // dimensions the function almost - // always severely overestimates - // the true number, leading to a - // lot of wasted memory, sometimes - // too much for the machine used, - // even if the unused memory can be - // released immediately after - // computing the sparsity - // pattern. In order to avoid this, - // we use an intermediate object of - // type CompressedSparsityPattern - // that uses a different %internal - // data structure and that we can - // later copy into the - // SparsityPattern object without - // much overhead. (Some more - // information on these data - // structures can be found in the - // @ref Sparsity module.) In order - // to initialize this intermediate - // data structure, we have to give - // it the size of the matrix, which - // in our case will be square with - // as many rows and columns as - // there are degrees of freedom on - // the grid: + // Now that we have associated a degree of + // freedom with a global number to each + // vertex, we wonder how to visualize this? + // There is no simple way to directly + // visualize the DoF number associated with + // each vertex. However, such information + // would hardly ever be truly important, + // since the numbering itself is more or + // less arbitrary. There are more important + // factors, of which we will demonstrate + // one in the following. + // + // Associated with each vertex of the + // triangulation is a shape + // function. Assume we want to solve + // something like Laplace's equation, then + // the different matrix entries will be the + // integrals over the gradient of each pair + // of such shape functions. Obviously, + // since the shape functions are nonzero + // only on the cells adjacent to the vertex + // they are associated with, matrix entries + // will be nonzero only if the supports of + // the shape functions associated to that + // column and row %numbers intersect. This + // is only the case for adjacent shape + // functions, and therefore only for + // adjacent vertices. Now, since the + // vertices are numbered more or less + // randomly by the above function + // (DoFHandler::distribute_dofs), the + // pattern of nonzero entries in the matrix + // will be somewhat ragged, and we will + // take a look at it now. + // + // First we have to create a + // structure which we use to store + // the places of nonzero + // elements. This can then later be + // used by one or more sparse + // matrix objects that store the + // values of the entries in the + // locations stored by this + // sparsity pattern. The class that + // stores the locations is the + // SparsityPattern class. As it + // turns out, however, this class + // has some drawbacks when we try + // to fill it right away: its data + // structures are set up in such a + // way that we need to have an + // estimate for the maximal number + // of entries we may wish to have + // in each row. In two space + // dimensions, reasonable values + // for this estimate are available + // through the + // DoFHandler::max_couplings_between_dofs() + // function, but in three + // dimensions the function almost + // always severely overestimates + // the true number, leading to a + // lot of wasted memory, sometimes + // too much for the machine used, + // even if the unused memory can be + // released immediately after + // computing the sparsity + // pattern. In order to avoid this, + // we use an intermediate object of + // type CompressedSparsityPattern + // that uses a different %internal + // data structure and that we can + // later copy into the + // SparsityPattern object without + // much overhead. (Some more + // information on these data + // structures can be found in the + // @ref Sparsity module.) In order + // to initialize this intermediate + // data structure, we have to give + // it the size of the matrix, which + // in our case will be square with + // as many rows and columns as + // there are degrees of freedom on + // the grid: CompressedSparsityPattern compressed_sparsity_pattern(dof_handler.n_dofs(), - dof_handler.n_dofs()); + dof_handler.n_dofs()); - // We then fill this object with the - // places where nonzero elements will be - // located given the present numbering of - // degrees of freedom: + // We then fill this object with the + // places where nonzero elements will be + // located given the present numbering of + // degrees of freedom: DoFTools::make_sparsity_pattern (dof_handler, compressed_sparsity_pattern); - // Now we are ready to create the actual - // sparsity pattern that we could later use - // for our matrix. It will just contain the - // data already assembled in the - // CompressedSparsityPattern. + // Now we are ready to create the actual + // sparsity pattern that we could later use + // for our matrix. It will just contain the + // data already assembled in the + // CompressedSparsityPattern. SparsityPattern sparsity_pattern; sparsity_pattern.copy_from (compressed_sparsity_pattern); - // With this, we can now write the results - // to a file: + // With this, we can now write the results + // to a file: std::ofstream out ("sparsity_pattern.1"); sparsity_pattern.print_gnuplot (out); - // The result is in GNUPLOT format, - // where in each line of the output - // file, the coordinates of one - // nonzero entry are listed. The - // output will be shown below. - // - // If you look at it, you will note that - // the sparsity pattern is symmetric. This - // should not come as a surprise, since we - // have not given the - // DoFTools::make_sparsity_pattern any - // information that would indicate that our - // bilinear form may couple shape functions - // in a non-symmetric way. You will also - // note that it has several distinct - // region, which stem from the fact that - // the numbering starts from the coarsest - // cells and moves on to the finer ones; - // since they are all distributed - // symmetrically around the origin, this - // shows up again in the sparsity pattern. + // The result is in GNUPLOT format, + // where in each line of the output + // file, the coordinates of one + // nonzero entry are listed. The + // output will be shown below. + // + // If you look at it, you will note that + // the sparsity pattern is symmetric. This + // should not come as a surprise, since we + // have not given the + // DoFTools::make_sparsity_pattern any + // information that would indicate that our + // bilinear form may couple shape functions + // in a non-symmetric way. You will also + // note that it has several distinct + // region, which stem from the fact that + // the numbering starts from the coarsest + // cells and moves on to the finer ones; + // since they are all distributed + // symmetrically around the origin, this + // shows up again in the sparsity pattern. } // @sect3{Renumbering of DoFs} - // In the sparsity pattern produced above, - // the nonzero entries extended quite far off - // from the diagonal. For some algorithms, - // for example for incomplete LU - // decompositions or Gauss-Seidel - // preconditioners, this is unfavorable, and - // we will show a simple way how to improve - // this situation. - // - // Remember that for an entry $(i,j)$ - // in the matrix to be nonzero, the - // supports of the shape functions i - // and j needed to intersect - // (otherwise in the integral, the - // integrand would be zero everywhere - // since either the one or the other - // shape function is zero at some - // point). However, the supports of - // shape functions intersected only - // if they were adjacent to each - // other, so in order to have the - // nonzero entries clustered around - // the diagonal (where $i$ equals $j$), - // we would like to have adjacent - // shape functions to be numbered - // with indices (DoF numbers) that - // differ not too much. - // - // This can be accomplished by a - // simple front marching algorithm, - // where one starts at a given vertex - // and gives it the index zero. Then, - // its neighbors are numbered - // successively, making their indices - // close to the original one. Then, - // their neighbors, if not yet - // numbered, are numbered, and so - // on. - // - // One algorithm that adds a little bit of - // sophistication along these lines is the - // one by Cuthill and McKee. We will use it - // in the following function to renumber the - // degrees of freedom such that the resulting - // sparsity pattern is more localized around - // the diagonal. The only interesting part of - // the function is the first call to - // DoFRenumbering::Cuthill_McKee, the - // rest is essentially as before: + // In the sparsity pattern produced above, + // the nonzero entries extended quite far off + // from the diagonal. For some algorithms, + // for example for incomplete LU + // decompositions or Gauss-Seidel + // preconditioners, this is unfavorable, and + // we will show a simple way how to improve + // this situation. + // + // Remember that for an entry $(i,j)$ + // in the matrix to be nonzero, the + // supports of the shape functions i + // and j needed to intersect + // (otherwise in the integral, the + // integrand would be zero everywhere + // since either the one or the other + // shape function is zero at some + // point). However, the supports of + // shape functions intersected only + // if they were adjacent to each + // other, so in order to have the + // nonzero entries clustered around + // the diagonal (where $i$ equals $j$), + // we would like to have adjacent + // shape functions to be numbered + // with indices (DoF numbers) that + // differ not too much. + // + // This can be accomplished by a + // simple front marching algorithm, + // where one starts at a given vertex + // and gives it the index zero. Then, + // its neighbors are numbered + // successively, making their indices + // close to the original one. Then, + // their neighbors, if not yet + // numbered, are numbered, and so + // on. + // + // One algorithm that adds a little bit of + // sophistication along these lines is the + // one by Cuthill and McKee. We will use it + // in the following function to renumber the + // degrees of freedom such that the resulting + // sparsity pattern is more localized around + // the diagonal. The only interesting part of + // the function is the first call to + // DoFRenumbering::Cuthill_McKee, the + // rest is essentially as before: void renumber_dofs (DoFHandler<2> &dof_handler) { DoFRenumbering::Cuthill_McKee (dof_handler); CompressedSparsityPattern compressed_sparsity_pattern(dof_handler.n_dofs(), - dof_handler.n_dofs()); + dof_handler.n_dofs()); DoFTools::make_sparsity_pattern (dof_handler, compressed_sparsity_pattern); SparsityPattern sparsity_pattern; @@ -431,12 +431,12 @@ void renumber_dofs (DoFHandler<2> &dof_handler) // @sect3{The main function} - // Finally, this is the main program. The - // only thing it does is to allocate and - // create the triangulation, then create a - // DoFHandler object and associate it to - // the triangulation, and finally call above - // two functions on it: + // Finally, this is the main program. The + // only thing it does is to allocate and + // create the triangulation, then create a + // DoFHandler object and associate it to + // the triangulation, and finally call above + // two functions on it: int main () { Triangulation<2> triangulation; diff --git a/deal.II/examples/step-20/step-20.cc b/deal.II/examples/step-20/step-20.cc index 4f1e083786..b0553e0126 100644 --- a/deal.II/examples/step-20/step-20.cc +++ b/deal.II/examples/step-20/step-20.cc @@ -11,14 +11,14 @@ // @sect3{Include files} - // Since this program is only an - // adaptation of step-4, there is not - // much new stuff in terms of header - // files. In deal.II, we usually list - // include files in the order - // base-lac-grid-dofs-fe-numerics, - // followed by C++ standard include - // files: + // Since this program is only an + // adaptation of step-4, there is not + // much new stuff in terms of header + // files. In deal.II, we usually list + // include files in the order + // base-lac-grid-dofs-fe-numerics, + // followed by C++ standard include + // files: #include #include #include @@ -27,11 +27,11 @@ #include #include #include - // For our Schur complement solver, - // we need two new objects. One is a - // matrix object which acts as the - // inverse of a matrix by calling an - // iterative solver. + // For our Schur complement solver, + // we need two new objects. One is a + // matrix object which acts as the + // inverse of a matrix by calling an + // iterative solver. #include #include @@ -52,47 +52,47 @@ #include #include - // This is the only significant new - // header, namely the one in which - // the Raviart-Thomas finite element - // is declared: + // This is the only significant new + // header, namely the one in which + // the Raviart-Thomas finite element + // is declared: #include - // Finally, as a bonus in this - // program, we will use a tensorial - // coefficient. Since it may have a - // spatial dependence, we consider it - // a tensor-valued function. The - // following include file provides - // the TensorFunction class that - // offers such functionality: + // Finally, as a bonus in this + // program, we will use a tensorial + // coefficient. Since it may have a + // spatial dependence, we consider it + // a tensor-valued function. The + // following include file provides + // the TensorFunction class that + // offers such functionality: #include - // The last step is as in all - // previous programs: + // The last step is as in all + // previous programs: namespace Step20 { using namespace dealii; - // @sect3{The MixedLaplaceProblem class template} - - // Again, since this is an adaptation - // of step-6, the main class is - // almost the same as the one in that - // tutorial program. In terms of - // member functions, the main - // differences are that the - // constructor takes the degree of - // the Raviart-Thomas element as an - // argument (and that there is a - // corresponding member variable to - // store this value) and the addition - // of the compute_error function - // in which, no surprise, we will - // compute the difference between the - // exact and the numerical solution - // to determine convergence of our - // computations: + // @sect3{The MixedLaplaceProblem class template} + + // Again, since this is an adaptation + // of step-6, the main class is + // almost the same as the one in that + // tutorial program. In terms of + // member functions, the main + // differences are that the + // constructor takes the degree of + // the Raviart-Thomas element as an + // argument (and that there is a + // corresponding member variable to + // store this value) and the addition + // of the compute_error function + // in which, no surprise, we will + // compute the difference between the + // exact and the numerical solution + // to determine convergence of our + // computations: template class MixedLaplaceProblem { @@ -113,19 +113,19 @@ namespace Step20 FESystem fe; DoFHandler dof_handler; - // The second difference is that - // the sparsity pattern, the - // system matrix, and solution - // and right hand side vectors - // are now blocked. What this - // means and what one can do with - // such objects is explained in - // the introduction to this - // program as well as further - // down below when we explain the - // linear solvers and - // preconditioners for this - // problem: + // The second difference is that + // the sparsity pattern, the + // system matrix, and solution + // and right hand side vectors + // are now blocked. What this + // means and what one can do with + // such objects is explained in + // the introduction to this + // program as well as further + // down below when we explain the + // linear solvers and + // preconditioners for this + // problem: BlockSparsityPattern sparsity_pattern; BlockSparseMatrix system_matrix; @@ -134,29 +134,29 @@ namespace Step20 }; - // @sect3{Right hand side, boundary values, and exact solution} - - // Our next task is to define the - // right hand side of our problem - // (i.e., the scalar right hand side - // for the pressure in the original - // Laplace equation), boundary values - // for the pressure, as well as a - // function that describes both the - // pressure and the velocity of the - // exact solution for later - // computations of the error. Note - // that these functions have one, - // one, and dim+1 components, - // respectively, and that we pass the - // number of components down to the - // Function@ base class. For - // the exact solution, we only - // declare the function that actually - // returns the entire solution vector - // (i.e. all components of it) at - // once. Here are the respective - // declarations: + // @sect3{Right hand side, boundary values, and exact solution} + + // Our next task is to define the + // right hand side of our problem + // (i.e., the scalar right hand side + // for the pressure in the original + // Laplace equation), boundary values + // for the pressure, as well as a + // function that describes both the + // pressure and the velocity of the + // exact solution for later + // computations of the error. Note + // that these functions have one, + // one, and dim+1 components, + // respectively, and that we pass the + // number of components down to the + // Function@ base class. For + // the exact solution, we only + // declare the function that actually + // returns the entire solution vector + // (i.e. all components of it) at + // once. Here are the respective + // declarations: template class RightHandSide : public Function { @@ -164,7 +164,7 @@ namespace Step20 RightHandSide () : Function(1) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; @@ -176,7 +176,7 @@ namespace Step20 PressureBoundaryValues () : Function(1) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; @@ -187,20 +187,20 @@ namespace Step20 ExactSolution () : Function(dim+1) {} virtual void vector_value (const Point &p, - Vector &value) const; + Vector &value) const; }; - // And then we also have to define - // these respective functions, of - // course. Given our discussion in - // the introduction of how the - // solution should look like, the - // following computations should be - // straightforward: + // And then we also have to define + // these respective functions, of + // course. Given our discussion in + // the introduction of how the + // solution should look like, the + // following computations should be + // straightforward: template double RightHandSide::value (const Point &/*p*/, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { return 0; } @@ -209,7 +209,7 @@ namespace Step20 template double PressureBoundaryValues::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { const double alpha = 0.3; const double beta = 1; @@ -221,10 +221,10 @@ namespace Step20 template void ExactSolution::vector_value (const Point &p, - Vector &values) const + Vector &values) const { Assert (values.size() == dim+1, - ExcDimensionMismatch (values.size(), dim+1)); + ExcDimensionMismatch (values.size(), dim+1)); const double alpha = 0.3; const double beta = 1; @@ -236,54 +236,54 @@ namespace Step20 - // @sect3{The inverse permeability tensor} - - // In addition to the other equation - // data, we also want to use a - // permeability tensor, or better -- - // because this is all that appears - // in the weak form -- the inverse of - // the permeability tensor, - // KInverse. For the purpose of - // verifying the exactness of the - // solution and determining - // convergence orders, this tensor is - // more in the way than helpful. We - // will therefore simply set it to - // the identity matrix. - // - // However, a spatially varying - // permeability tensor is - // indispensable in real-life porous - // media flow simulations, and we - // would like to use the opportunity - // to demonstrate the technique to - // use tensor valued functions. - // - // Possibly unsurprising, deal.II - // also has a base class not only for - // scalar and generally vector-valued - // functions (the Function base - // class) but also for functions that - // return tensors of fixed dimension - // and rank, the TensorFunction - // template. Here, the function under - // consideration returns a dim-by-dim - // matrix, i.e. a tensor of rank 2 - // and dimension dim. We then - // choose the template arguments of - // the base class appropriately. - // - // The interface that the - // TensorFunction class provides - // is essentially equivalent to the - // Function class. In particular, - // there exists a value_list - // function that takes a list of - // points at which to evaluate the - // function, and returns the values - // of the function in the second - // argument, a list of tensors: + // @sect3{The inverse permeability tensor} + + // In addition to the other equation + // data, we also want to use a + // permeability tensor, or better -- + // because this is all that appears + // in the weak form -- the inverse of + // the permeability tensor, + // KInverse. For the purpose of + // verifying the exactness of the + // solution and determining + // convergence orders, this tensor is + // more in the way than helpful. We + // will therefore simply set it to + // the identity matrix. + // + // However, a spatially varying + // permeability tensor is + // indispensable in real-life porous + // media flow simulations, and we + // would like to use the opportunity + // to demonstrate the technique to + // use tensor valued functions. + // + // Possibly unsurprising, deal.II + // also has a base class not only for + // scalar and generally vector-valued + // functions (the Function base + // class) but also for functions that + // return tensors of fixed dimension + // and rank, the TensorFunction + // template. Here, the function under + // consideration returns a dim-by-dim + // matrix, i.e. a tensor of rank 2 + // and dimension dim. We then + // choose the template arguments of + // the base class appropriately. + // + // The interface that the + // TensorFunction class provides + // is essentially equivalent to the + // Function class. In particular, + // there exists a value_list + // function that takes a list of + // points at which to evaluate the + // function, and returns the values + // of the function in the second + // argument, a list of tensors: template class KInverse : public TensorFunction<2,dim> { @@ -291,117 +291,117 @@ namespace Step20 KInverse () : TensorFunction<2,dim>() {} virtual void value_list (const std::vector > &points, - std::vector > &values) const; + std::vector > &values) const; }; - // The implementation is less - // interesting. As in previous - // examples, we add a check to the - // beginning of the class to make - // sure that the sizes of input and - // output parameters are the same - // (see step-5 for a discussion of - // this technique). Then we loop over - // all evaluation points, and for - // each one first clear the output - // tensor and then set all its - // diagonal elements to one - // (i.e. fill the tensor with the - // identity matrix): + // The implementation is less + // interesting. As in previous + // examples, we add a check to the + // beginning of the class to make + // sure that the sizes of input and + // output parameters are the same + // (see step-5 for a discussion of + // this technique). Then we loop over + // all evaluation points, and for + // each one first clear the output + // tensor and then set all its + // diagonal elements to one + // (i.e. fill the tensor with the + // identity matrix): template void KInverse::value_list (const std::vector > &points, - std::vector > &values) const + std::vector > &values) const { Assert (points.size() == values.size(), - ExcDimensionMismatch (points.size(), values.size())); + ExcDimensionMismatch (points.size(), values.size())); for (unsigned int p=0; pfe variable. The - // FESystem class to which this - // variable belongs has a number of - // different constructors that all - // refer to binding simpler elements - // together into one larger - // element. In the present case, we - // want to couple a single RT(degree) - // element with a single DQ(degree) - // element. The constructor to - // FESystem that does this - // requires us to specity first the - // first base element (the - // FE_RaviartThomas object of - // given degree) and then the number - // of copies for this base element, - // and then similarly the kind and - // number of FE_DGQ - // elements. Note that the Raviart - // Thomas element already has dim - // vector components, so that the - // coupled element will have - // dim+1 vector components, the - // first dim of which correspond - // to the velocity variable whereas the - // last one corresponds to the - // pressure. - // - // It is also worth comparing the way - // we constructed this element from - // its base elements, with the way we - // have done so in step-8: there, we - // have built it as fe - // (FE_Q@(1), dim), i.e. we - // have simply used dim copies of - // the FE_Q(1) element, one copy - // for the displacement in each - // coordinate direction. + // @sect3{MixedLaplaceProblem class implementation} + + // @sect4{MixedLaplaceProblem::MixedLaplaceProblem} + + // In the constructor of this class, + // we first store the value that was + // passed in concerning the degree of + // the finite elements we shall use + // (a degree of zero, for example, + // means to use RT(0) and DG(0)), and + // then construct the vector valued + // element belonging to the space X_h + // described in the introduction. The + // rest of the constructor is as in + // the early tutorial programs. + // + // The only thing worth describing + // here is the constructor call of + // the fe variable. The + // FESystem class to which this + // variable belongs has a number of + // different constructors that all + // refer to binding simpler elements + // together into one larger + // element. In the present case, we + // want to couple a single RT(degree) + // element with a single DQ(degree) + // element. The constructor to + // FESystem that does this + // requires us to specity first the + // first base element (the + // FE_RaviartThomas object of + // given degree) and then the number + // of copies for this base element, + // and then similarly the kind and + // number of FE_DGQ + // elements. Note that the Raviart + // Thomas element already has dim + // vector components, so that the + // coupled element will have + // dim+1 vector components, the + // first dim of which correspond + // to the velocity variable whereas the + // last one corresponds to the + // pressure. + // + // It is also worth comparing the way + // we constructed this element from + // its base elements, with the way we + // have done so in step-8: there, we + // have built it as fe + // (FE_Q@(1), dim), i.e. we + // have simply used dim copies of + // the FE_Q(1) element, one copy + // for the displacement in each + // coordinate direction. template MixedLaplaceProblem::MixedLaplaceProblem (const unsigned int degree) - : - degree (degree), - fe (FE_RaviartThomas(degree), 1, - FE_DGQ(degree), 1), - dof_handler (triangulation) + : + degree (degree), + fe (FE_RaviartThomas(degree), 1, + FE_DGQ(degree), 1), + dof_handler (triangulation) {} - // @sect4{MixedLaplaceProblem::make_grid_and_dofs} + // @sect4{MixedLaplaceProblem::make_grid_and_dofs} - // This next function starts out with - // well-known functions calls that - // create and refine a mesh, and then - // associate degrees of freedom with - // it: + // This next function starts out with + // well-known functions calls that + // create and refine a mesh, and then + // associate degrees of freedom with + // it: template void MixedLaplaceProblem::make_grid_and_dofs () { @@ -410,100 +410,100 @@ namespace Step20 dof_handler.distribute_dofs (fe); - // However, then things become - // different. As mentioned in the - // introduction, we want to - // subdivide the matrix into blocks - // corresponding to the two - // different kinds of variables, - // velocity and pressure. To this end, - // we first have to make sure that - // the indices corresponding to - // velocities and pressures are not - // intermingled: First all velocity - // degrees of freedom, then all - // pressure DoFs. This way, the - // global matrix separates nicely - // into a 2x2 system. To achieve - // this, we have to renumber - // degrees of freedom base on their - // vector component, an operation - // that conveniently is already - // implemented: + // However, then things become + // different. As mentioned in the + // introduction, we want to + // subdivide the matrix into blocks + // corresponding to the two + // different kinds of variables, + // velocity and pressure. To this end, + // we first have to make sure that + // the indices corresponding to + // velocities and pressures are not + // intermingled: First all velocity + // degrees of freedom, then all + // pressure DoFs. This way, the + // global matrix separates nicely + // into a 2x2 system. To achieve + // this, we have to renumber + // degrees of freedom base on their + // vector component, an operation + // that conveniently is already + // implemented: DoFRenumbering::component_wise (dof_handler); - // The next thing is that we want - // to figure out the sizes of these - // blocks, so that we can allocate - // an appropriate amount of - // space. To this end, we call the - // DoFTools::count_dofs_per_component - // function that counts how many - // shape functions are non-zero for - // a particular vector - // component. We have dim+1 - // vector components, and we have - // to use the knowledge that for - // Raviart-Thomas elements all - // shape functions are nonzero in - // all components. In other words, - // the number of velocity shape - // functions equals the number of - // overall shape functions that are - // nonzero in the zeroth vector - // component. On the other hand, - // the number of pressure variables - // equals the number of shape - // functions that are nonzero in - // the dim-th component. Let us - // compute these numbers and then - // create some nice output with - // that: + // The next thing is that we want + // to figure out the sizes of these + // blocks, so that we can allocate + // an appropriate amount of + // space. To this end, we call the + // DoFTools::count_dofs_per_component + // function that counts how many + // shape functions are non-zero for + // a particular vector + // component. We have dim+1 + // vector components, and we have + // to use the knowledge that for + // Raviart-Thomas elements all + // shape functions are nonzero in + // all components. In other words, + // the number of velocity shape + // functions equals the number of + // overall shape functions that are + // nonzero in the zeroth vector + // component. On the other hand, + // the number of pressure variables + // equals the number of shape + // functions that are nonzero in + // the dim-th component. Let us + // compute these numbers and then + // create some nice output with + // that: std::vector dofs_per_component (dim+1); DoFTools::count_dofs_per_component (dof_handler, dofs_per_component); const unsigned int n_u = dofs_per_component[0], - n_p = dofs_per_component[dim]; + n_p = dofs_per_component[dim]; std::cout << "Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << "Total number of cells: " - << triangulation.n_cells() - << std::endl - << "Number of degrees of freedom: " - << dof_handler.n_dofs() - << " (" << n_u << '+' << n_p << ')' - << std::endl; - - // The next task is to allocate a - // sparsity pattern for the matrix - // that we will create. The way - // this works is that we first - // obtain a guess for the maximal - // number of nonzero entries per - // row (this could be done more - // efficiently in this case, but we - // only want to solve relatively - // small problems for which this is - // not so important). In the second - // step, we allocate a 2x2 block - // pattern and then reinitialize - // each of the blocks to its - // correct size using the n_u - // and n_p variables defined - // above that hold the number of - // velocity and pressure - // variables. In this second step, - // we only operate on the - // individual blocks of the - // system. In the third step, we - // therefore have to instruct the - // overlying block system to update - // its knowledge about the sizes of - // the blocks it manages; this - // happens with the - // sparsity_pattern.collect_sizes() - // call: + << triangulation.n_active_cells() + << std::endl + << "Total number of cells: " + << triangulation.n_cells() + << std::endl + << "Number of degrees of freedom: " + << dof_handler.n_dofs() + << " (" << n_u << '+' << n_p << ')' + << std::endl; + + // The next task is to allocate a + // sparsity pattern for the matrix + // that we will create. The way + // this works is that we first + // obtain a guess for the maximal + // number of nonzero entries per + // row (this could be done more + // efficiently in this case, but we + // only want to solve relatively + // small problems for which this is + // not so important). In the second + // step, we allocate a 2x2 block + // pattern and then reinitialize + // each of the blocks to its + // correct size using the n_u + // and n_p variables defined + // above that hold the number of + // velocity and pressure + // variables. In this second step, + // we only operate on the + // individual blocks of the + // system. In the third step, we + // therefore have to instruct the + // overlying block system to update + // its knowledge about the sizes of + // the blocks it manages; this + // happens with the + // sparsity_pattern.collect_sizes() + // call: const unsigned int n_couplings = dof_handler.max_couplings_between_dofs(); @@ -514,22 +514,22 @@ namespace Step20 sparsity_pattern.block(1,1).reinit (n_p, n_p, n_couplings); sparsity_pattern.collect_sizes(); - // Now that the sparsity pattern - // and its blocks have the correct - // sizes, we actually need to - // construct the content of this - // pattern, and as usual compress - // it, before we also initialize a - // block matrix with this block - // sparsity pattern: + // Now that the sparsity pattern + // and its blocks have the correct + // sizes, we actually need to + // construct the content of this + // pattern, and as usual compress + // it, before we also initialize a + // block matrix with this block + // sparsity pattern: DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); sparsity_pattern.compress(); system_matrix.reinit (sparsity_pattern); - // Then we have to resize the - // solution and right hand side - // vectors in exactly the same way: + // Then we have to resize the + // solution and right hand side + // vectors in exactly the same way: solution.reinit (2); solution.block(0).reinit (n_u); solution.block(1).reinit (n_p); @@ -542,25 +542,25 @@ namespace Step20 } - // @sect4{MixedLaplaceProblem::assemble_system} - // Similarly, the function that - // assembles the linear system has - // mostly been discussed already in - // the introduction to this - // example. At its top, what happens - // are all the usual steps, with the - // addition that we do not only - // allocate quadrature and - // FEValues objects for the cell - // terms, but also for face - // terms. After that, we define the - // usual abbreviations for variables, - // and the allocate space for the - // local matrix and right hand side - // contributions, and the array that - // holds the global numbers of the - // degrees of freedom local to the - // present cell. + // @sect4{MixedLaplaceProblem::assemble_system} + // Similarly, the function that + // assembles the linear system has + // mostly been discussed already in + // the introduction to this + // example. At its top, what happens + // are all the usual steps, with the + // addition that we do not only + // allocate quadrature and + // FEValues objects for the cell + // terms, but also for face + // terms. After that, we define the + // usual abbreviations for variables, + // and the allocate space for the + // local matrix and right hand side + // contributions, and the array that + // holds the global numbers of the + // degrees of freedom local to the + // present cell. template void MixedLaplaceProblem::assemble_system () { @@ -568,11 +568,11 @@ namespace Step20 QGauss face_quadrature_formula(degree+2); FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); FEFaceValues fe_face_values (fe, face_quadrature_formula, - update_values | update_normal_vectors | - update_quadrature_points | update_JxW_values); + update_values | update_normal_vectors | + update_quadrature_points | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -583,20 +583,20 @@ namespace Step20 std::vector local_dof_indices (dofs_per_cell); - // The next step is to declare - // objects that represent the - // source term, pressure boundary - // value, and coefficient in the - // equation. In addition to these - // objects that represent - // continuous functions, we also - // need arrays to hold their values - // at the quadrature points of - // individual cells (or faces, for - // the boundary values). Note that - // in the case of the coefficient, - // the array has to be one of - // matrices. + // The next step is to declare + // objects that represent the + // source term, pressure boundary + // value, and coefficient in the + // equation. In addition to these + // objects that represent + // continuous functions, we also + // need arrays to hold their values + // at the quadrature points of + // individual cells (or faces, for + // the boundary values). Note that + // in the case of the coefficient, + // the array has to be one of + // matrices. const RightHandSide right_hand_side; const PressureBoundaryValues pressure_boundary_values; const KInverse k_inverse; @@ -605,180 +605,180 @@ namespace Step20 std::vector boundary_values (n_face_q_points); std::vector > k_inverse_values (n_q_points); - // Finally, we need a couple of extractors - // that we will use to get at the velocity - // and pressure components of vector-valued - // shape functions. Their function and use - // is described in detail in the @ref - // vector_valued report. Essentially, we - // will use them as subscripts on the - // FEValues objects below: the FEValues - // object describes all vector components - // of shape functions, while after - // subscription, it will only refer to the - // velocities (a set of dim - // components starting at component zero) - // or the pressure (a scalar component - // located at position dim): + // Finally, we need a couple of extractors + // that we will use to get at the velocity + // and pressure components of vector-valued + // shape functions. Their function and use + // is described in detail in the @ref + // vector_valued report. Essentially, we + // will use them as subscripts on the + // FEValues objects below: the FEValues + // object describes all vector components + // of shape functions, while after + // subscription, it will only refer to the + // velocities (a set of dim + // components starting at component zero) + // or the pressure (a scalar component + // located at position dim): const FEValuesExtractors::Vector velocities (0); const FEValuesExtractors::Scalar pressure (dim); - // With all this in place, we can - // go on with the loop over all - // cells. The body of this loop has - // been discussed in the - // introduction, and will not be - // commented any further here: + // With all this in place, we can + // go on with the loop over all + // cells. The body of this loop has + // been discussed in the + // introduction, and will not be + // commented any further here: typename DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), endc = dof_handler.end(); for (; cell!=endc; ++cell) { - fe_values.reinit (cell); - local_matrix = 0; - local_rhs = 0; - - right_hand_side.value_list (fe_values.get_quadrature_points(), - rhs_values); - k_inverse.value_list (fe_values.get_quadrature_points(), - k_inverse_values); - - for (unsigned int q=0; q phi_i_u = fe_values[velocities].value (i, q); - const double div_phi_i_u = fe_values[velocities].divergence (i, q); - const double phi_i_p = fe_values[pressure].value (i, q); - - for (unsigned int j=0; j phi_j_u = fe_values[velocities].value (j, q); - const double div_phi_j_u = fe_values[velocities].divergence (j, q); - const double phi_j_p = fe_values[pressure].value (j, q); - - local_matrix(i,j) += (phi_i_u * k_inverse_values[q] * phi_j_u - - div_phi_i_u * phi_j_p - - phi_i_p * div_phi_j_u) - * fe_values.JxW(q); - } - - local_rhs(i) += -phi_i_p * - rhs_values[q] * - fe_values.JxW(q); - } - - for (unsigned int face_no=0; - face_no::faces_per_cell; - ++face_no) - if (cell->at_boundary(face_no)) - { - fe_face_values.reinit (cell, face_no); - - pressure_boundary_values - .value_list (fe_face_values.get_quadrature_points(), - boundary_values); - - for (unsigned int q=0; qget_dof_indices (local_dof_indices); - for (unsigned int i=0; i phi_i_u = fe_values[velocities].value (i, q); + const double div_phi_i_u = fe_values[velocities].divergence (i, q); + const double phi_i_p = fe_values[pressure].value (i, q); + + for (unsigned int j=0; j phi_j_u = fe_values[velocities].value (j, q); + const double div_phi_j_u = fe_values[velocities].divergence (j, q); + const double phi_j_p = fe_values[pressure].value (j, q); + + local_matrix(i,j) += (phi_i_u * k_inverse_values[q] * phi_j_u + - div_phi_i_u * phi_j_p + - phi_i_p * div_phi_j_u) + * fe_values.JxW(q); + } + + local_rhs(i) += -phi_i_p * + rhs_values[q] * + fe_values.JxW(q); + } + + for (unsigned int face_no=0; + face_no::faces_per_cell; + ++face_no) + if (cell->at_boundary(face_no)) + { + fe_face_values.reinit (cell, face_no); + + pressure_boundary_values + .value_list (fe_face_values.get_quadrature_points(), + boundary_values); + + for (unsigned int q=0; qget_dof_indices (local_dof_indices); + for (unsigned int i=0; iSchurComplement class template} - - // The next class is the Schur - // complement class. Its rationale - // has also been discussed in length - // in the introduction. The only - // things we would like to note is - // that the class, too, is derived - // from the Subscriptor class and - // that as mentioned above it stores - // pointers to the entire block - // matrix and the inverse of the mass - // matrix block using - // SmartPointer objects. - // - // The vmult function requires - // two temporary vectors that we do - // not want to re-allocate and free - // every time we call this - // function. Since here, we have full - // control over the use of these - // vectors (unlike above, where a - // class called by the vmult - // function required these vectors, - // not the vmult function - // itself), we allocate them - // directly, rather than going - // through the VectorMemory - // mechanism. However, again, these - // member variables do not carry any - // state between successive calls to - // the member functions of this class - // (i.e., we never care what values - // they were set to the last time a - // member function was called), we - // mark these vectors as mutable. - // - // The rest of the (short) - // implementation of this class is - // straightforward if you know the - // order of matrix-vector - // multiplications performed by the - // vmult function: + // @sect3{Linear solvers and preconditioners} + + // The linear solvers and + // preconditioners we use in this + // example have been discussed in + // significant detail already in the + // introduction. We will therefore + // not discuss the rationale for + // these classes here any more, but + // rather only comment on + // implementational aspects. + + + // @sect4{The SchurComplement class template} + + // The next class is the Schur + // complement class. Its rationale + // has also been discussed in length + // in the introduction. The only + // things we would like to note is + // that the class, too, is derived + // from the Subscriptor class and + // that as mentioned above it stores + // pointers to the entire block + // matrix and the inverse of the mass + // matrix block using + // SmartPointer objects. + // + // The vmult function requires + // two temporary vectors that we do + // not want to re-allocate and free + // every time we call this + // function. Since here, we have full + // control over the use of these + // vectors (unlike above, where a + // class called by the vmult + // function required these vectors, + // not the vmult function + // itself), we allocate them + // directly, rather than going + // through the VectorMemory + // mechanism. However, again, these + // member variables do not carry any + // state between successive calls to + // the member functions of this class + // (i.e., we never care what values + // they were set to the last time a + // member function was called), we + // mark these vectors as mutable. + // + // The rest of the (short) + // implementation of this class is + // straightforward if you know the + // order of matrix-vector + // multiplications performed by the + // vmult function: class SchurComplement : public Subscriptor { public: SchurComplement (const BlockSparseMatrix &A, - const IterativeInverse > &Minv); + const IterativeInverse > &Minv); void vmult (Vector &dst, - const Vector &src) const; + const Vector &src) const; private: const SmartPointer > system_matrix; @@ -789,17 +789,17 @@ namespace Step20 SchurComplement::SchurComplement (const BlockSparseMatrix &A, - const IterativeInverse > &Minv) - : - system_matrix (&A), - m_inverse (&Minv), - tmp1 (A.block(0,0).m()), - tmp2 (A.block(0,0).m()) + const IterativeInverse > &Minv) + : + system_matrix (&A), + m_inverse (&Minv), + tmp1 (A.block(0,0).m()), + tmp2 (A.block(0,0).m()) {} void SchurComplement::vmult (Vector &dst, - const Vector &src) const + const Vector &src) const { system_matrix->block(0,1).vmult (tmp1, src); m_inverse->vmult (tmp2, tmp1); @@ -807,39 +807,39 @@ namespace Step20 } - // @sect4{The ApproximateSchurComplement class template} - - // The third component of our solver - // and preconditioner system is the - // class that approximates the Schur - // complement so we can form a - // an InverseIterate - // object that approximates the - // inverse of the Schur - // complement. It follows the same - // pattern as the Schur complement - // class, with the only exception - // that we do not multiply with the - // inverse mass matrix in vmult, - // but rather just do a single Jacobi - // step. Consequently, the class also - // does not have to store a pointer - // to an inverse mass matrix object. - // - // Since InverseIterate follows the - // standard convention for matrices, - // we need to provide a - // Tvmult function here as - // well. + // @sect4{The ApproximateSchurComplement class template} + + // The third component of our solver + // and preconditioner system is the + // class that approximates the Schur + // complement so we can form a + // an InverseIterate + // object that approximates the + // inverse of the Schur + // complement. It follows the same + // pattern as the Schur complement + // class, with the only exception + // that we do not multiply with the + // inverse mass matrix in vmult, + // but rather just do a single Jacobi + // step. Consequently, the class also + // does not have to store a pointer + // to an inverse mass matrix object. + // + // Since InverseIterate follows the + // standard convention for matrices, + // we need to provide a + // Tvmult function here as + // well. class ApproximateSchurComplement : public Subscriptor { public: ApproximateSchurComplement (const BlockSparseMatrix &A); void vmult (Vector &dst, - const Vector &src) const; + const Vector &src) const; void Tvmult (Vector &dst, - const Vector &src) const; + const Vector &src) const; private: const SmartPointer > system_matrix; @@ -849,15 +849,15 @@ namespace Step20 ApproximateSchurComplement::ApproximateSchurComplement (const BlockSparseMatrix &A) - : - system_matrix (&A), - tmp1 (A.block(0,0).m()), - tmp2 (A.block(0,0).m()) + : + system_matrix (&A), + tmp1 (A.block(0,0).m()), + tmp2 (A.block(0,0).m()) {} void ApproximateSchurComplement::vmult (Vector &dst, - const Vector &src) const + const Vector &src) const { system_matrix->block(0,1).vmult (tmp1, src); system_matrix->block(0,0).precondition_Jacobi (tmp2, tmp1); @@ -866,7 +866,7 @@ namespace Step20 void ApproximateSchurComplement::Tvmult (Vector &dst, - const Vector &src) const + const Vector &src) const { system_matrix->block(1,0).Tvmult (dst, tmp2); system_matrix->block(0,0).precondition_Jacobi (tmp2, tmp1); @@ -875,23 +875,23 @@ namespace Step20 - // @sect4{MixedLaplace::solve} - - // After all these preparations, we - // can finally write the function - // that actually solves the linear - // problem. We will go through the - // two parts it has that each solve - // one of the two equations, the - // first one for the pressure - // (component 1 of the solution), - // then the velocities (component 0 - // of the solution). Both parts need - // an object representing the inverse - // mass matrix and an auxiliary - // vector, and we therefore declare - // these objects at the beginning of - // this function. + // @sect4{MixedLaplace::solve} + + // After all these preparations, we + // can finally write the function + // that actually solves the linear + // problem. We will go through the + // two parts it has that each solve + // one of the two equations, the + // first one for the pressure + // (component 1 of the solution), + // then the velocities (component 0 + // of the solution). Both parts need + // an object representing the inverse + // mass matrix and an auxiliary + // vector, and we therefore declare + // these objects at the beginning of + // this function. template void MixedLaplaceProblem::solve () { @@ -905,19 +905,19 @@ namespace Step20 Vector tmp (solution.block(0).size()); - // Now on to the first - // equation. The right hand side of - // it is BM^{-1}F-G, which is what - // we compute in the first few - // lines. We then declare the - // objects representing the Schur - // complement, its approximation, - // and the inverse of the - // approximation. Finally, we - // declare a solver object and hand - // off all these matrices and - // vectors to it to compute block 1 - // (the pressure) of the solution: + // Now on to the first + // equation. The right hand side of + // it is BM^{-1}F-G, which is what + // we compute in the first few + // lines. We then declare the + // objects representing the Schur + // complement, its approximation, + // and the inverse of the + // approximation. Finally, we + // declare a solver object and hand + // off all these matrices and + // vectors to it to compute block 1 + // (the pressure) of the solution: { Vector schur_rhs (solution.block(1).size()); @@ -927,38 +927,38 @@ namespace Step20 SchurComplement - schur_complement (system_matrix, m_inverse); + schur_complement (system_matrix, m_inverse); ApproximateSchurComplement - approximate_schur_complement (system_matrix); + approximate_schur_complement (system_matrix); IterativeInverse > - preconditioner; + preconditioner; preconditioner.initialize(approximate_schur_complement, identity); preconditioner.solver.select("cg"); preconditioner.solver.set_control(inner_control); SolverControl solver_control (solution.block(1).size(), - 1e-12*schur_rhs.l2_norm()); + 1e-12*schur_rhs.l2_norm()); SolverCG<> cg (solver_control); cg.solve (schur_complement, solution.block(1), schur_rhs, - preconditioner); + preconditioner); std::cout << solver_control.last_step() - << " CG Schur complement iterations to obtain convergence." - << std::endl; + << " CG Schur complement iterations to obtain convergence." + << std::endl; } - // After we have the pressure, we - // can compute the velocity. The - // equation reads MU=-B^TP+F, and - // we solve it by first computing - // the right hand side, and then - // multiplying it with the object - // that represents the inverse of - // the mass matrix: + // After we have the pressure, we + // can compute the velocity. The + // equation reads MU=-B^TP+F, and + // we solve it by first computing + // the right hand side, and then + // multiplying it with the object + // that represents the inverse of + // the mass matrix: { system_matrix.block(0,1).vmult (tmp, solution.block(1)); tmp *= -1; @@ -969,77 +969,77 @@ namespace Step20 } - // @sect3{MixedLaplaceProblem class implementation (continued)} - - // @sect4{MixedLaplace::compute_errors} - - // After we have dealt with the - // linear solver and preconditioners, - // we continue with the - // implementation of our main - // class. In particular, the next - // task is to compute the errors in - // our numerical solution, in both - // the pressures as well as - // velocities. - // - // To compute errors in the solution, - // we have already introduced the - // VectorTools::integrate_difference - // function in step-7 and - // step-11. However, there we only - // dealt with scalar solutions, - // whereas here we have a - // vector-valued solution with - // components that even denote - // different quantities and may have - // different orders of convergence - // (this isn't the case here, by - // choice of the used finite - // elements, but is frequently the - // case in mixed finite element - // applications). What we therefore - // have to do is to `mask' the - // components that we are interested - // in. This is easily done: the - // VectorTools::integrate_difference - // function takes as its last - // argument a pointer to a weight - // function (the parameter defaults - // to the null pointer, meaning unit - // weights). What we simply have to - // do is to pass a function object - // that equals one in the components - // we are interested in, and zero in - // the other ones. For example, to - // compute the pressure error, we - // should pass a function that - // represents the constant vector - // with a unit value in component - // dim, whereas for the velocity - // the constant vector should be one - // in the first dim components, - // and zero in the location of the - // pressure. - // - // In deal.II, the - // ComponentSelectFunction does - // exactly this: it wants to know how - // many vector components the - // function it is to represent should - // have (in our case this would be - // dim+1, for the joint - // velocity-pressure space) and which - // individual or range of components - // should be equal to one. We - // therefore define two such masks at - // the beginning of the function, - // following by an object - // representing the exact solution - // and a vector in which we will - // store the cellwise errors as - // computed by - // integrate_difference: + // @sect3{MixedLaplaceProblem class implementation (continued)} + + // @sect4{MixedLaplace::compute_errors} + + // After we have dealt with the + // linear solver and preconditioners, + // we continue with the + // implementation of our main + // class. In particular, the next + // task is to compute the errors in + // our numerical solution, in both + // the pressures as well as + // velocities. + // + // To compute errors in the solution, + // we have already introduced the + // VectorTools::integrate_difference + // function in step-7 and + // step-11. However, there we only + // dealt with scalar solutions, + // whereas here we have a + // vector-valued solution with + // components that even denote + // different quantities and may have + // different orders of convergence + // (this isn't the case here, by + // choice of the used finite + // elements, but is frequently the + // case in mixed finite element + // applications). What we therefore + // have to do is to `mask' the + // components that we are interested + // in. This is easily done: the + // VectorTools::integrate_difference + // function takes as its last + // argument a pointer to a weight + // function (the parameter defaults + // to the null pointer, meaning unit + // weights). What we simply have to + // do is to pass a function object + // that equals one in the components + // we are interested in, and zero in + // the other ones. For example, to + // compute the pressure error, we + // should pass a function that + // represents the constant vector + // with a unit value in component + // dim, whereas for the velocity + // the constant vector should be one + // in the first dim components, + // and zero in the location of the + // pressure. + // + // In deal.II, the + // ComponentSelectFunction does + // exactly this: it wants to know how + // many vector components the + // function it is to represent should + // have (in our case this would be + // dim+1, for the joint + // velocity-pressure space) and which + // individual or range of components + // should be equal to one. We + // therefore define two such masks at + // the beginning of the function, + // following by an object + // representing the exact solution + // and a vector in which we will + // store the cellwise errors as + // computed by + // integrate_difference: template void MixedLaplaceProblem::compute_errors () const { @@ -1051,123 +1051,123 @@ namespace Step20 ExactSolution exact_solution; Vector cellwise_errors (triangulation.n_active_cells()); - // As already discussed in step-7, - // we have to realize that it is - // impossible to integrate the - // errors exactly. All we can do is - // approximate this integral using - // quadrature. This actually - // presents a slight twist here: if - // we naively chose an object of - // type QGauss@(degree+1) - // as one may be inclined to do - // (this is what we used for - // integrating the linear system), - // one realizes that the error is - // very small and does not follow - // the expected convergence curves - // at all. What is happening is - // that for the mixed finite - // elements used here, the Gauss - // points happen to be - // superconvergence points in which - // the pointwise error is much - // smaller (and converges with - // higher order) than anywhere - // else. These are therefore not - // particularly good points for - // ingration. To avoid this - // problem, we simply use a - // trapezoidal rule and iterate it - // degree+2 times in each - // coordinate direction (again as - // explained in step-7): + // As already discussed in step-7, + // we have to realize that it is + // impossible to integrate the + // errors exactly. All we can do is + // approximate this integral using + // quadrature. This actually + // presents a slight twist here: if + // we naively chose an object of + // type QGauss@(degree+1) + // as one may be inclined to do + // (this is what we used for + // integrating the linear system), + // one realizes that the error is + // very small and does not follow + // the expected convergence curves + // at all. What is happening is + // that for the mixed finite + // elements used here, the Gauss + // points happen to be + // superconvergence points in which + // the pointwise error is much + // smaller (and converges with + // higher order) than anywhere + // else. These are therefore not + // particularly good points for + // ingration. To avoid this + // problem, we simply use a + // trapezoidal rule and iterate it + // degree+2 times in each + // coordinate direction (again as + // explained in step-7): QTrapez<1> q_trapez; QIterated quadrature (q_trapez, degree+2); - // With this, we can then let the - // library compute the errors and - // output them to the screen: + // With this, we can then let the + // library compute the errors and + // output them to the screen: VectorTools::integrate_difference (dof_handler, solution, exact_solution, - cellwise_errors, quadrature, - VectorTools::L2_norm, - &pressure_mask); + cellwise_errors, quadrature, + VectorTools::L2_norm, + &pressure_mask); const double p_l2_error = cellwise_errors.l2_norm(); VectorTools::integrate_difference (dof_handler, solution, exact_solution, - cellwise_errors, quadrature, - VectorTools::L2_norm, - &velocity_mask); + cellwise_errors, quadrature, + VectorTools::L2_norm, + &velocity_mask); const double u_l2_error = cellwise_errors.l2_norm(); std::cout << "Errors: ||e_p||_L2 = " << p_l2_error - << ", ||e_u||_L2 = " << u_l2_error - << std::endl; + << ", ||e_u||_L2 = " << u_l2_error + << std::endl; } - // @sect4{MixedLaplace::output_results} - - // The last interesting function is - // the one in which we generate - // graphical output. Everything here - // looks obvious and familiar. Note - // how we construct unique names for - // all the solution variables at the - // beginning, like we did in step-8 - // and other programs later on. The - // only thing worth mentioning is - // that for higher order elements, in - // seems inappropriate to only show a - // single bilinear quadrilateral per - // cell in the graphical output. We - // therefore generate patches of size - // (degree+1)x(degree+1) to capture - // the full information content of - // the solution. See the step-7 - // tutorial program for more - // information on this. - // - // Note that we output the dim+1 - // components of the solution vector as a - // collection of individual scalars - // here. Most visualization programs will - // then only offer to visualize them - // individually, rather than allowing us to - // plot the flow field as a vector - // field. However, as explained in the - // corresponding function of step-22 or the - // @ref VVOutput "Generating graphical output" - // section of the @ref vector_valued module, - // instructing the DataOut class to identify - // components of the FESystem object as - // elements of a dim-dimensional - // vector is not actually very difficult and - // will then allow us to show results as - // vector plots. We skip this here for - // simplicity and refer to the links above - // for more information. + // @sect4{MixedLaplace::output_results} + + // The last interesting function is + // the one in which we generate + // graphical output. Everything here + // looks obvious and familiar. Note + // how we construct unique names for + // all the solution variables at the + // beginning, like we did in step-8 + // and other programs later on. The + // only thing worth mentioning is + // that for higher order elements, in + // seems inappropriate to only show a + // single bilinear quadrilateral per + // cell in the graphical output. We + // therefore generate patches of size + // (degree+1)x(degree+1) to capture + // the full information content of + // the solution. See the step-7 + // tutorial program for more + // information on this. + // + // Note that we output the dim+1 + // components of the solution vector as a + // collection of individual scalars + // here. Most visualization programs will + // then only offer to visualize them + // individually, rather than allowing us to + // plot the flow field as a vector + // field. However, as explained in the + // corresponding function of step-22 or the + // @ref VVOutput "Generating graphical output" + // section of the @ref vector_valued module, + // instructing the DataOut class to identify + // components of the FESystem object as + // elements of a dim-dimensional + // vector is not actually very difficult and + // will then allow us to show results as + // vector plots. We skip this here for + // simplicity and refer to the links above + // for more information. template void MixedLaplaceProblem::output_results () const { std::vector solution_names; switch (dim) { - case 2: - solution_names.push_back ("u"); - solution_names.push_back ("v"); - solution_names.push_back ("p"); - break; - - case 3: - solution_names.push_back ("u"); - solution_names.push_back ("v"); - solution_names.push_back ("w"); - solution_names.push_back ("p"); - break; - - default: - Assert (false, ExcNotImplemented()); + case 2: + solution_names.push_back ("u"); + solution_names.push_back ("v"); + solution_names.push_back ("p"); + break; + + case 3: + solution_names.push_back ("u"); + solution_names.push_back ("v"); + solution_names.push_back ("w"); + solution_names.push_back ("p"); + break; + + default: + Assert (false, ExcNotImplemented()); } @@ -1184,12 +1184,12 @@ namespace Step20 - // @sect4{MixedLaplace::run} + // @sect4{MixedLaplace::run} - // This is the final function of our - // main class. It's only job is to - // call the other functions in their - // natural order: + // This is the final function of our + // main class. It's only job is to + // call the other functions in their + // natural order: template void MixedLaplaceProblem::run () { @@ -1204,16 +1204,16 @@ namespace Step20 // @sect3{The main function} - // The main function we stole from - // step-6 instead of step-4. It is - // almost equal to the one in step-6 - // (apart from the changed class - // names, of course), the only - // exception is that we pass the - // degree of the finite element space - // to the constructor of the mixed - // laplace problem (here, we use - // zero-th order elements). + // The main function we stole from + // step-6 instead of step-4. It is + // almost equal to the one in step-6 + // (apart from the changed class + // names, of course), the only + // exception is that we pass the + // degree of the finite element space + // to the constructor of the mixed + // laplace problem (here, we use + // zero-th order elements). int main () { try @@ -1229,25 +1229,25 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-21/step-21.cc b/deal.II/examples/step-21/step-21.cc index 57cafb4db3..ac1853b7c8 100644 --- a/deal.II/examples/step-21/step-21.cc +++ b/deal.II/examples/step-21/step-21.cc @@ -70,37 +70,37 @@ namespace Step21 using namespace dealii; - // @sect3{The TwoPhaseFlowProblem class} - - // This is the main class of the program. It - // is close to the one of step-20, but with a - // few additional functions: - // - //
    - //
  • assemble_rhs_S assembles the - // right hand side of the saturation - // equation. As explained in the - // introduction, this can't be integrated - // into assemble_rhs since it depends - // on the velocity that is computed in the - // first part of the time step. - // - //
  • get_maximal_velocity does as its - // name suggests. This function is used in - // the computation of the time step size. - // - //
  • project_back_saturation resets - // all saturation degrees of freedom with - // values less than zero to zero, and all - // those with saturations greater than one - // to one. - //
- // - // The rest of the class should be pretty - // much obvious. The viscosity variable - // stores the viscosity $\mu$ that enters - // several of the formulas in the nonlinear - // equations. + // @sect3{The TwoPhaseFlowProblem class} + + // This is the main class of the program. It + // is close to the one of step-20, but with a + // few additional functions: + // + //
    + //
  • assemble_rhs_S assembles the + // right hand side of the saturation + // equation. As explained in the + // introduction, this can't be integrated + // into assemble_rhs since it depends + // on the velocity that is computed in the + // first part of the time step. + // + //
  • get_maximal_velocity does as its + // name suggests. This function is used in + // the computation of the time step size. + // + //
  • project_back_saturation resets + // all saturation degrees of freedom with + // values less than zero to zero, and all + // those with saturations greater than one + // to one. + //
+ // + // The rest of the class should be pretty + // much obvious. The viscosity variable + // stores the viscosity $\mu$ that enters + // several of the formulas in the nonlinear + // equations. template class TwoPhaseFlowProblem { @@ -138,14 +138,14 @@ namespace Step21 }; - // @sect3{Equation data} + // @sect3{Equation data} - // @sect4{Pressure right hand side} - // At present, the right hand side of the - // pressure equation is simply the zero - // function. However, the rest of the program - // is fully equipped to deal with anything - // else, if this is desired: + // @sect4{Pressure right hand side} + // At present, the right hand side of the + // pressure equation is simply the zero + // function. However, the rest of the program + // is fully equipped to deal with anything + // else, if this is desired: template class PressureRightHandSide : public Function { @@ -153,7 +153,7 @@ namespace Step21 PressureRightHandSide () : Function(1) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; @@ -161,16 +161,16 @@ namespace Step21 template double PressureRightHandSide::value (const Point &/*p*/, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { return 0; } - // @sect4{Pressure boundary values} - // The next are pressure boundary values. As - // mentioned in the introduction, we choose a - // linear pressure field: + // @sect4{Pressure boundary values} + // The next are pressure boundary values. As + // mentioned in the introduction, we choose a + // linear pressure field: template class PressureBoundaryValues : public Function { @@ -178,29 +178,29 @@ namespace Step21 PressureBoundaryValues () : Function(1) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; template double PressureBoundaryValues::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { return 1-p[0]; } - // @sect4{Saturation boundary values} + // @sect4{Saturation boundary values} - // Then we also need boundary values on the - // inflow portions of the boundary. The - // question whether something is an inflow - // part is decided when assembling the right - // hand side, we only have to provide a - // functional description of the boundary - // values. This is as explained in the - // introduction: + // Then we also need boundary values on the + // inflow portions of the boundary. The + // question whether something is an inflow + // part is decided when assembling the right + // hand side, we only have to provide a + // functional description of the boundary + // values. This is as explained in the + // introduction: template class SaturationBoundaryValues : public Function { @@ -208,7 +208,7 @@ namespace Step21 SaturationBoundaryValues () : Function(1) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; @@ -216,7 +216,7 @@ namespace Step21 template double SaturationBoundaryValues::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { if (p[0] == 0) return 1; @@ -226,25 +226,25 @@ namespace Step21 - // @sect4{Initial data} - - // Finally, we need initial data. In reality, - // we only need initial data for the - // saturation, but we are lazy, so we will - // later, before the first time step, simply - // interpolate the entire solution for the - // previous time step from a function that - // contains all vector components. - // - // We therefore simply create a function that - // returns zero in all components. We do that - // by simply forward every function to the - // ZeroFunction class. Why not use that right - // away in the places of this program where - // we presently use the InitialValues - // class? Because this way it is simpler to - // later go back and choose a different - // function for initial values. + // @sect4{Initial data} + + // Finally, we need initial data. In reality, + // we only need initial data for the + // saturation, but we are lazy, so we will + // later, before the first time step, simply + // interpolate the entire solution for the + // previous time step from a function that + // contains all vector components. + // + // We therefore simply create a function that + // returns zero in all components. We do that + // by simply forward every function to the + // ZeroFunction class. Why not use that right + // away in the places of this program where + // we presently use the InitialValues + // class? Because this way it is simpler to + // later go back and choose a different + // function for initial values. template class InitialValues : public Function { @@ -252,10 +252,10 @@ namespace Step21 InitialValues () : Function(dim+2) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void vector_value (const Point &p, - Vector &value) const; + Vector &value) const; }; @@ -263,7 +263,7 @@ namespace Step21 template double InitialValues::value (const Point &p, - const unsigned int component) const + const unsigned int component) const { return ZeroFunction(dim+2).value (p, component); } @@ -272,7 +272,7 @@ namespace Step21 template void InitialValues::vector_value (const Point &p, - Vector &values) const + Vector &values) const { ZeroFunction(dim+2).vector_value (p, values); } @@ -280,141 +280,141 @@ namespace Step21 - // @sect3{The inverse permeability tensor} + // @sect3{The inverse permeability tensor} - // As announced in the introduction, we - // implement two different permeability - // tensor fields. Each of them we put into a - // namespace of its own, so that it will be - // easy later to replace use of one by the - // other in the code. + // As announced in the introduction, we + // implement two different permeability + // tensor fields. Each of them we put into a + // namespace of its own, so that it will be + // easy later to replace use of one by the + // other in the code. - // @sect4{Single curving crack permeability} + // @sect4{Single curving crack permeability} - // The first function for the - // permeability was the one that - // models a single curving crack. It - // was already used at the end of - // step-20, and its functional form - // is given in the introduction of - // the present tutorial program. As - // in some previous programs, we have - // to declare a (seemingly - // unnecessary) default constructor - // of the KInverse class to avoid - // warnings from some compilers: + // The first function for the + // permeability was the one that + // models a single curving crack. It + // was already used at the end of + // step-20, and its functional form + // is given in the introduction of + // the present tutorial program. As + // in some previous programs, we have + // to declare a (seemingly + // unnecessary) default constructor + // of the KInverse class to avoid + // warnings from some compilers: namespace SingleCurvingCrack { template class KInverse : public TensorFunction<2,dim> { public: - KInverse () - : - TensorFunction<2,dim> () - {} + KInverse () + : + TensorFunction<2,dim> () + {} - virtual void value_list (const std::vector > &points, - std::vector > &values) const; + virtual void value_list (const std::vector > &points, + std::vector > &values) const; }; template void KInverse::value_list (const std::vector > &points, - std::vector > &values) const + std::vector > &values) const { Assert (points.size() == values.size(), - ExcDimensionMismatch (points.size(), values.size())); + ExcDimensionMismatch (points.size(), values.size())); for (unsigned int p=0; pget_centers that computes the list of - // center points when called. - // - // Note that this class works just fine in - // both 2d and 3d, with the only difference - // being that we use more points in 3d: by - // experimenting we find that we need more - // exponentials in 3d than in 2d (we have - // more ground to cover, after all, if we - // want to keep the distance between centers - // roughly equal), so we choose 40 in 2d and - // 100 in 3d. For any other dimension, the - // function does presently not know what to - // do so simply throws an exception - // indicating exactly this. + // @sect4{Random medium permeability} + + // This function does as announced in the + // introduction, i.e. it creates an overlay + // of exponentials at random places. There is + // one thing worth considering for this + // class. The issue centers around the + // problem that the class creates the centers + // of the exponentials using a random + // function. If we therefore created the + // centers each time we create an object of + // the present type, we would get a different + // list of centers each time. That's not what + // we expect from classes of this type: they + // should reliably represent the same + // function. + // + // The solution to this problem is to make + // the list of centers a static member + // variable of this class, i.e. there exists + // exactly one such variable for the entire + // program, rather than for each object of + // this type. That's exactly what we are + // going to do. + // + // The next problem, however, is that we need + // a way to initialize this variable. Since + // this variable is initialized at the + // beginning of the program, we can't use a + // regular member function for that since + // there may not be an object of this type + // around at the time. The C++ standard + // therefore says that only non-member and + // static member functions can be used to + // initialize a static variable. We use the + // latter possibility by defining a function + // get_centers that computes the list of + // center points when called. + // + // Note that this class works just fine in + // both 2d and 3d, with the only difference + // being that we use more points in 3d: by + // experimenting we find that we need more + // exponentials in 3d than in 2d (we have + // more ground to cover, after all, if we + // want to keep the distance between centers + // roughly equal), so we choose 40 in 2d and + // 100 in 3d. For any other dimension, the + // function does presently not know what to + // do so simply throws an exception + // indicating exactly this. namespace RandomMedium { template class KInverse : public TensorFunction<2,dim> { public: - KInverse () - : - TensorFunction<2,dim> () - {} + KInverse () + : + TensorFunction<2,dim> () + {} - virtual void value_list (const std::vector > &points, - std::vector > &values) const; + virtual void value_list (const std::vector > &points, + std::vector > &values) const; private: - static std::vector > centers; + static std::vector > centers; - static std::vector > get_centers (); + static std::vector > get_centers (); }; @@ -429,15 +429,15 @@ namespace Step21 KInverse::get_centers () { const unsigned int N = (dim == 2 ? - 40 : - (dim == 3 ? - 100 : - throw ExcNotImplemented())); + 40 : + (dim == 3 ? + 100 : + throw ExcNotImplemented())); std::vector > centers_list (N); for (unsigned int i=0; i(rand())/RAND_MAX; + for (unsigned int d=0; d(rand())/RAND_MAX; return centers_list; } @@ -447,46 +447,46 @@ namespace Step21 template void KInverse::value_list (const std::vector > &points, - std::vector > &values) const + std::vector > &values) const { Assert (points.size() == values.size(), - ExcDimensionMismatch (points.size(), values.size())); + ExcDimensionMismatch (points.size(), values.size())); for (unsigned int p=0; psrc.size() CG - // iterations before the solver in - // the vmult() function - // converges. (This is, of course, a - // result of numerical round-off, - // since we know that on paper, the - // CG method converges in at most - // src.size() steps.) As - // a consequence, we set the maximum - // number of iterations equal to the - // maximum of the size of the linear - // system and 200. + // @sect3{Linear solvers and preconditioners} + + // The linear solvers we use are also + // completely analogous to the ones + // used in step-20. The following + // classes are therefore copied + // verbatim from there. There is a + // single change: if the size of a + // linear system is small, i.e. when + // the mesh is very coarse, then it + // is sometimes not sufficient to set + // a maximum of + // src.size() CG + // iterations before the solver in + // the vmult() function + // converges. (This is, of course, a + // result of numerical round-off, + // since we know that on paper, the + // CG method converges in at most + // src.size() steps.) As + // a consequence, we set the maximum + // number of iterations equal to the + // maximum of the size of the linear + // system and 200. template class InverseMatrix : public Subscriptor { @@ -526,7 +526,7 @@ namespace Step21 InverseMatrix (const Matrix &m); void vmult (Vector &dst, - const Vector &src) const; + const Vector &src) const; private: const SmartPointer matrix; @@ -535,18 +535,18 @@ namespace Step21 template InverseMatrix::InverseMatrix (const Matrix &m) - : - matrix (&m) + : + matrix (&m) {} template void InverseMatrix::vmult (Vector &dst, - const Vector &src) const + const Vector &src) const { SolverControl solver_control (std::max(src.size(), 200U), - 1e-8*src.l2_norm()); + 1e-8*src.l2_norm()); SolverCG<> cg (solver_control); dst = 0; @@ -560,10 +560,10 @@ namespace Step21 { public: SchurComplement (const BlockSparseMatrix &A, - const InverseMatrix > &Minv); + const InverseMatrix > &Minv); void vmult (Vector &dst, - const Vector &src) const; + const Vector &src) const; private: const SmartPointer > system_matrix; @@ -576,17 +576,17 @@ namespace Step21 SchurComplement:: SchurComplement (const BlockSparseMatrix &A, - const InverseMatrix > &Minv) - : - system_matrix (&A), - m_inverse (&Minv), - tmp1 (A.block(0,0).m()), - tmp2 (A.block(0,0).m()) + const InverseMatrix > &Minv) + : + system_matrix (&A), + m_inverse (&Minv), + tmp1 (A.block(0,0).m()), + tmp2 (A.block(0,0).m()) {} void SchurComplement::vmult (Vector &dst, - const Vector &src) const + const Vector &src) const { system_matrix->block(0,1).vmult (tmp1, src); m_inverse->vmult (tmp2, tmp1); @@ -601,7 +601,7 @@ namespace Step21 ApproximateSchurComplement (const BlockSparseMatrix &A); void vmult (Vector &dst, - const Vector &src) const; + const Vector &src) const; private: const SmartPointer > system_matrix; @@ -612,15 +612,15 @@ namespace Step21 ApproximateSchurComplement:: ApproximateSchurComplement (const BlockSparseMatrix &A) - : - system_matrix (&A), - tmp1 (A.block(0,0).m()), - tmp2 (A.block(0,0).m()) + : + system_matrix (&A), + tmp1 (A.block(0,0).m()), + tmp2 (A.block(0,0).m()) {} void ApproximateSchurComplement::vmult (Vector &dst, - const Vector &src) const + const Vector &src) const { system_matrix->block(0,1).vmult (tmp1, src); system_matrix->block(0,0).precondition_Jacobi (tmp2, tmp1); @@ -631,45 +631,45 @@ namespace Step21 - // @sect3{TwoPhaseFlowProblem class implementation} + // @sect3{TwoPhaseFlowProblem class implementation} - // Here now the implementation of the main - // class. Much of it is actually copied from - // step-20, so we won't comment on it in much - // detail. You should try to get familiar - // with that program first, then most of what - // is happening here should be mostly clear. + // Here now the implementation of the main + // class. Much of it is actually copied from + // step-20, so we won't comment on it in much + // detail. You should try to get familiar + // with that program first, then most of what + // is happening here should be mostly clear. - // @sect4{TwoPhaseFlowProblem::TwoPhaseFlowProblem} - // First for the constructor. We use $RT_k - // \times DQ_k \times DQ_k$ spaces. The time - // step is set to zero initially, but will be - // computed before it is needed first, as - // described in a subsection of the - // introduction. + // @sect4{TwoPhaseFlowProblem::TwoPhaseFlowProblem} + // First for the constructor. We use $RT_k + // \times DQ_k \times DQ_k$ spaces. The time + // step is set to zero initially, but will be + // computed before it is needed first, as + // described in a subsection of the + // introduction. template TwoPhaseFlowProblem::TwoPhaseFlowProblem (const unsigned int degree) - : - degree (degree), - fe (FE_RaviartThomas(degree), 1, - FE_DGQ(degree), 1, - FE_DGQ(degree), 1), - dof_handler (triangulation), - n_refinement_steps (5), - time_step (0), - viscosity (0.2) + : + degree (degree), + fe (FE_RaviartThomas(degree), 1, + FE_DGQ(degree), 1, + FE_DGQ(degree), 1), + dof_handler (triangulation), + n_refinement_steps (5), + time_step (0), + viscosity (0.2) {} - // @sect4{TwoPhaseFlowProblem::make_grid_and_dofs} + // @sect4{TwoPhaseFlowProblem::make_grid_and_dofs} - // This next function starts out with - // well-known functions calls that create and - // refine a mesh, and then associate degrees - // of freedom with it. It does all the same - // things as in step-20, just now for three - // components instead of two. + // This next function starts out with + // well-known functions calls that create and + // refine a mesh, and then associate degrees + // of freedom with it. It does all the same + // things as in step-20, just now for three + // components instead of two. template void TwoPhaseFlowProblem::make_grid_and_dofs () { @@ -682,17 +682,17 @@ namespace Step21 std::vector dofs_per_component (dim+2); DoFTools::count_dofs_per_component (dof_handler, dofs_per_component); const unsigned int n_u = dofs_per_component[0], - n_p = dofs_per_component[dim], - n_s = dofs_per_component[dim+1]; + n_p = dofs_per_component[dim], + n_s = dofs_per_component[dim+1]; std::cout << "Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << "Number of degrees of freedom: " - << dof_handler.n_dofs() - << " (" << n_u << '+' << n_p << '+'<< n_s <<')' - << std::endl - << std::endl; + << triangulation.n_active_cells() + << std::endl + << "Number of degrees of freedom: " + << dof_handler.n_dofs() + << " (" << n_u << '+' << n_p << '+'<< n_s <<')' + << std::endl + << std::endl; const unsigned int n_couplings = dof_handler.max_couplings_between_dofs(); @@ -737,30 +737,30 @@ namespace Step21 } - // @sect4{TwoPhaseFlowProblem::assemble_system} - - // This is the function that assembles the - // linear system, or at least everything - // except the (1,3) block that depends on the - // still-unknown velocity computed during - // this time step (we deal with this in - // assemble_rhs_S). Much of it - // is again as in step-20, but we have to - // deal with some nonlinearity this time. - // However, the top of the function is pretty - // much as usual (note that we set matrix and - // right hand side to zero at the beginning - // — something we didn't have to do for - // stationary problems since there we use - // each matrix object only once and it is - // empty at the beginning anyway). - // - // Note that in its present form, the - // function uses the permeability implemented - // in the RandomMedium::KInverse - // class. Switching to the single curved - // crack permeability function is as simple - // as just changing the namespace name. + // @sect4{TwoPhaseFlowProblem::assemble_system} + + // This is the function that assembles the + // linear system, or at least everything + // except the (1,3) block that depends on the + // still-unknown velocity computed during + // this time step (we deal with this in + // assemble_rhs_S). Much of it + // is again as in step-20, but we have to + // deal with some nonlinearity this time. + // However, the top of the function is pretty + // much as usual (note that we set matrix and + // right hand side to zero at the beginning + // — something we didn't have to do for + // stationary problems since there we use + // each matrix object only once and it is + // empty at the beginning anyway). + // + // Note that in its present form, the + // function uses the permeability implemented + // in the RandomMedium::KInverse + // class. Switching to the single curved + // crack permeability function is as simple + // as just changing the namespace name. template void TwoPhaseFlowProblem::assemble_system () { @@ -771,11 +771,11 @@ namespace Step21 QGauss face_quadrature_formula(degree+2); FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); FEFaceValues fe_face_values (fe, face_quadrature_formula, - update_values | update_normal_vectors | - update_quadrature_points | update_JxW_values); + update_values | update_normal_vectors | + update_quadrature_points | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; @@ -797,7 +797,7 @@ namespace Step21 std::vector > old_solution_values(n_q_points, Vector(dim+2)); std::vector > > old_solution_grads(n_q_points, - std::vector > (dim+2)); + std::vector > (dim+2)); const FEValuesExtractors::Vector velocities (0); const FEValuesExtractors::Scalar pressure (dim); @@ -808,150 +808,150 @@ namespace Step21 endc = dof_handler.end(); for (; cell!=endc; ++cell) { - fe_values.reinit (cell); - local_matrix = 0; - local_rhs = 0; - - // Here's the first significant - // difference: We have to get the - // values of the saturation function of - // the previous time step at the - // quadrature points. To this end, we - // can use the - // FEValues::get_function_values - // (previously already used in step-9, - // step-14 and step-15), a function - // that takes a solution vector and - // returns a list of function values at - // the quadrature points of the present - // cell. In fact, it returns the - // complete vector-valued solution at - // each quadrature point, i.e. not only - // the saturation but also the - // velocities and pressure: - fe_values.get_function_values (old_solution, old_solution_values); - - // Then we also have to get the values - // of the pressure right hand side and - // of the inverse permeability tensor - // at the quadrature points: - pressure_right_hand_side.value_list (fe_values.get_quadrature_points(), - pressure_rhs_values); - k_inverse.value_list (fe_values.get_quadrature_points(), - k_inverse_values); - - // With all this, we can now loop over - // all the quadrature points and shape - // functions on this cell and assemble - // those parts of the matrix and right - // hand side that we deal with in this - // function. The individual terms in - // the contributions should be - // self-explanatory given the explicit - // form of the bilinear form stated in - // the introduction: - for (unsigned int q=0; q phi_i_u = fe_values[velocities].value (i, q); - const double div_phi_i_u = fe_values[velocities].divergence (i, q); - const double phi_i_p = fe_values[pressure].value (i, q); - const double phi_i_s = fe_values[saturation].value (i, q); - - for (unsigned int j=0; j phi_j_u = fe_values[velocities].value (j, q); - const double div_phi_j_u = fe_values[velocities].divergence (j, q); - const double phi_j_p = fe_values[pressure].value (j, q); - const double phi_j_s = fe_values[saturation].value (j, q); - - local_matrix(i,j) += (phi_i_u * k_inverse_values[q] * - mobility_inverse(old_s,viscosity) * phi_j_u - - div_phi_i_u * phi_j_p - - phi_i_p * div_phi_j_u - + phi_i_s * phi_j_s) - * fe_values.JxW(q); - } - - local_rhs(i) += (-phi_i_p * pressure_rhs_values[q])* - fe_values.JxW(q); - } - - - // Next, we also have to deal with the - // pressure boundary values. This, - // again is as in step-20: - for (unsigned int face_no=0; - face_no::faces_per_cell; - ++face_no) - if (cell->at_boundary(face_no)) - { - fe_face_values.reinit (cell, face_no); - - pressure_boundary_values - .value_list (fe_face_values.get_quadrature_points(), - boundary_values); - - for (unsigned int q=0; q - phi_i_u = fe_face_values[velocities].value (i, q); - - local_rhs(i) += -(phi_i_u * - fe_face_values.normal_vector(q) * - boundary_values[q] * - fe_face_values.JxW(q)); - } - } - - // The final step in the loop - // over all cells is to - // transfer local contributions - // into the global matrix and - // right hand side vector: - cell->get_dof_indices (local_dof_indices); - for (unsigned int i=0; i phi_i_u = fe_values[velocities].value (i, q); + const double div_phi_i_u = fe_values[velocities].divergence (i, q); + const double phi_i_p = fe_values[pressure].value (i, q); + const double phi_i_s = fe_values[saturation].value (i, q); + + for (unsigned int j=0; j phi_j_u = fe_values[velocities].value (j, q); + const double div_phi_j_u = fe_values[velocities].divergence (j, q); + const double phi_j_p = fe_values[pressure].value (j, q); + const double phi_j_s = fe_values[saturation].value (j, q); + + local_matrix(i,j) += (phi_i_u * k_inverse_values[q] * + mobility_inverse(old_s,viscosity) * phi_j_u + - div_phi_i_u * phi_j_p + - phi_i_p * div_phi_j_u + + phi_i_s * phi_j_s) + * fe_values.JxW(q); + } + + local_rhs(i) += (-phi_i_p * pressure_rhs_values[q])* + fe_values.JxW(q); + } + + + // Next, we also have to deal with the + // pressure boundary values. This, + // again is as in step-20: + for (unsigned int face_no=0; + face_no::faces_per_cell; + ++face_no) + if (cell->at_boundary(face_no)) + { + fe_face_values.reinit (cell, face_no); + + pressure_boundary_values + .value_list (fe_face_values.get_quadrature_points(), + boundary_values); + + for (unsigned int q=0; q + phi_i_u = fe_face_values[velocities].value (i, q); + + local_rhs(i) += -(phi_i_u * + fe_face_values.normal_vector(q) * + boundary_values[q] * + fe_face_values.JxW(q)); + } + } + + // The final step in the loop + // over all cells is to + // transfer local contributions + // into the global matrix and + // right hand side vector: + cell->get_dof_indices (local_dof_indices); + for (unsigned int i=0; i void TwoPhaseFlowProblem::assemble_rhs_S () { QGauss quadrature_formula(degree+2); QGauss face_quadrature_formula(degree+2); FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); FEFaceValues fe_face_values (fe, face_quadrature_formula, - update_values | update_normal_vectors | - update_quadrature_points | update_JxW_values); + update_values | update_normal_vectors | + update_quadrature_points | update_JxW_values); FEFaceValues fe_face_values_neighbor (fe, face_quadrature_formula, - update_values); + update_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -977,125 +977,125 @@ namespace Step21 endc = dof_handler.end(); for (; cell!=endc; ++cell) { - local_rhs = 0; - fe_values.reinit (cell); - - fe_values.get_function_values (old_solution, old_solution_values); - fe_values.get_function_values (solution, present_solution_values); - - // First for the cell terms. These are, - // following the formulas in the - // introduction, $(S^n,\sigma)-(F(S^n) - // \mathbf{v}^{n+1},\nabla \sigma)$, - // where $\sigma$ is the saturation - // component of the test function: - for (unsigned int q=0; q present_u; - for (unsigned int d=0; d grad_phi_i_s = fe_values[saturation].gradient (i, q); - - local_rhs(i) += (time_step * - fractional_flow(old_s,viscosity) * - present_u * - grad_phi_i_s - + - old_s * phi_i_s) - * - fe_values.JxW(q); - } - - // Secondly, we have to deal with the - // flux parts on the face - // boundaries. This was a bit more - // involved because we first have to - // determine which are the influx and - // outflux parts of the cell - // boundary. If we have an influx - // boundary, we need to evaluate the - // saturation on the other side of the - // face (or the boundary values, if we - // are at the boundary of the domain). - // - // All this is a bit tricky, but has - // been explained in some detail - // already in step-9. Take a look there - // how this is supposed to work! - for (unsigned int face_no=0; face_no::faces_per_cell; - ++face_no) - { - fe_face_values.reinit (cell, face_no); - - fe_face_values.get_function_values (old_solution, old_solution_values_face); - fe_face_values.get_function_values (solution, present_solution_values_face); - - if (cell->at_boundary(face_no)) - saturation_boundary_values - .value_list (fe_face_values.get_quadrature_points(), - neighbor_saturation); - else - { - const typename DoFHandler::active_cell_iterator - neighbor = cell->neighbor(face_no); - const unsigned int - neighbor_face = cell->neighbor_of_neighbor(face_no); - - fe_face_values_neighbor.reinit (neighbor, neighbor_face); - - fe_face_values_neighbor - .get_function_values (old_solution, - old_solution_values_face_neighbor); - - for (unsigned int q=0; q present_u_face; - for (unsigned int d=0; d= 0); - - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - for (unsigned int i=0; i present_u; + for (unsigned int d=0; d grad_phi_i_s = fe_values[saturation].gradient (i, q); + + local_rhs(i) += (time_step * + fractional_flow(old_s,viscosity) * + present_u * + grad_phi_i_s + + + old_s * phi_i_s) + * + fe_values.JxW(q); + } + + // Secondly, we have to deal with the + // flux parts on the face + // boundaries. This was a bit more + // involved because we first have to + // determine which are the influx and + // outflux parts of the cell + // boundary. If we have an influx + // boundary, we need to evaluate the + // saturation on the other side of the + // face (or the boundary values, if we + // are at the boundary of the domain). + // + // All this is a bit tricky, but has + // been explained in some detail + // already in step-9. Take a look there + // how this is supposed to work! + for (unsigned int face_no=0; face_no::faces_per_cell; + ++face_no) + { + fe_face_values.reinit (cell, face_no); + + fe_face_values.get_function_values (old_solution, old_solution_values_face); + fe_face_values.get_function_values (solution, present_solution_values_face); + + if (cell->at_boundary(face_no)) + saturation_boundary_values + .value_list (fe_face_values.get_quadrature_points(), + neighbor_saturation); + else + { + const typename DoFHandler::active_cell_iterator + neighbor = cell->neighbor(face_no); + const unsigned int + neighbor_face = cell->neighbor_of_neighbor(face_no); + + fe_face_values_neighbor.reinit (neighbor, neighbor_face); + + fe_face_values_neighbor + .get_function_values (old_solution, + old_solution_values_face_neighbor); + + for (unsigned int q=0; q present_u_face; + for (unsigned int d=0; d= 0); + + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + for (unsigned int i=0; i void TwoPhaseFlowProblem::solve () { @@ -1106,9 +1106,9 @@ namespace Step21 Vector tmp2 (solution.block(2).size()); - // First the pressure, using the pressure - // Schur complement of the first two - // equations: + // First the pressure, using the pressure + // Schur complement of the first two + // equations: { m_inverse.vmult (tmp, system_rhs.block(0)); system_matrix.block(1,0).vmult (schur_rhs, tmp); @@ -1116,29 +1116,29 @@ namespace Step21 SchurComplement - schur_complement (system_matrix, m_inverse); + schur_complement (system_matrix, m_inverse); ApproximateSchurComplement - approximate_schur_complement (system_matrix); + approximate_schur_complement (system_matrix); InverseMatrix - preconditioner (approximate_schur_complement); + preconditioner (approximate_schur_complement); SolverControl solver_control (solution.block(1).size(), - 1e-12*schur_rhs.l2_norm()); + 1e-12*schur_rhs.l2_norm()); SolverCG<> cg (solver_control); cg.solve (schur_complement, solution.block(1), schur_rhs, - preconditioner); + preconditioner); std::cout << " " - << solver_control.last_step() - << " CG Schur complement iterations for pressure." - << std::endl; + << solver_control.last_step() + << " CG Schur complement iterations for pressure." + << std::endl; } - // Now the velocity: + // Now the velocity: { system_matrix.block(0,1).vmult (tmp, solution.block(1)); tmp *= -1; @@ -1147,51 +1147,51 @@ namespace Step21 m_inverse.vmult (solution.block(0), tmp); } - // Finally, we have to take care of the - // saturation equation. The first business - // we have here is to determine the time - // step using the formula in the - // introduction. Knowing the shape of our - // domain and that we created the mesh by - // regular subdivision of cells, we can - // compute the diameter of each of our - // cells quite easily (in fact we use the - // linear extensions in coordinate - // directions of the cells, not the - // diameter). Note that we will learn a - // more general way to do this in step-24, - // where we use the - // GridTools::minimal_cell_diameter - // function. - // - // The maximal velocity we compute using a - // helper function to compute the maximal - // velocity defined below, and with all - // this we can evaluate our new time step - // length: + // Finally, we have to take care of the + // saturation equation. The first business + // we have here is to determine the time + // step using the formula in the + // introduction. Knowing the shape of our + // domain and that we created the mesh by + // regular subdivision of cells, we can + // compute the diameter of each of our + // cells quite easily (in fact we use the + // linear extensions in coordinate + // directions of the cells, not the + // diameter). Note that we will learn a + // more general way to do this in step-24, + // where we use the + // GridTools::minimal_cell_diameter + // function. + // + // The maximal velocity we compute using a + // helper function to compute the maximal + // velocity defined below, and with all + // this we can evaluate our new time step + // length: time_step = std::pow(0.5, double(n_refinement_steps)) / - get_maximal_velocity(); + get_maximal_velocity(); - // The next step is to assemble the right - // hand side, and then to pass everything - // on for solution. At the end, we project - // back saturations onto the physically - // reasonable range: + // The next step is to assemble the right + // hand side, and then to pass everything + // on for solution. At the end, we project + // back saturations onto the physically + // reasonable range: assemble_rhs_S (); { SolverControl solver_control (system_matrix.block(2,2).m(), - 1e-8*system_rhs.block(2).l2_norm()); + 1e-8*system_rhs.block(2).l2_norm()); SolverCG<> cg (solver_control); cg.solve (system_matrix.block(2,2), solution.block(2), system_rhs.block(2), - PreconditionIdentity()); + PreconditionIdentity()); project_back_saturation (); std::cout << " " - << solver_control.last_step() - << " CG iterations for saturation." - << std::endl; + << solver_control.last_step() + << " CG iterations for saturation." + << std::endl; } @@ -1199,12 +1199,12 @@ namespace Step21 } - // @sect4{TwoPhaseFlowProblem::output_results} + // @sect4{TwoPhaseFlowProblem::output_results} - // There is nothing surprising here. Since - // the program will do a lot of time steps, - // we create an output file only every fifth - // time step. + // There is nothing surprising here. Since + // the program will do a lot of time steps, + // we create an output file only every fifth + // time step. template void TwoPhaseFlowProblem::output_results () const { @@ -1214,23 +1214,23 @@ namespace Step21 std::vector solution_names; switch (dim) { - case 2: - solution_names.push_back ("u"); - solution_names.push_back ("v"); - solution_names.push_back ("p"); - solution_names.push_back ("S"); - break; - - case 3: - solution_names.push_back ("u"); - solution_names.push_back ("v"); - solution_names.push_back ("w"); - solution_names.push_back ("p"); - solution_names.push_back ("S"); - break; - - default: - Assert (false, ExcNotImplemented()); + case 2: + solution_names.push_back ("u"); + solution_names.push_back ("v"); + solution_names.push_back ("p"); + solution_names.push_back ("S"); + break; + + case 3: + solution_names.push_back ("u"); + solution_names.push_back ("v"); + solution_names.push_back ("w"); + solution_names.push_back ("p"); + solution_names.push_back ("S"); + break; + + default: + Assert (false, ExcNotImplemented()); } DataOut data_out; @@ -1249,53 +1249,53 @@ namespace Step21 - // @sect4{TwoPhaseFlowProblem::project_back_saturation} - - // In this function, we simply run over all - // saturation degrees of freedom and make - // sure that if they should have left the - // physically reasonable range, that they be - // reset to the interval $[0,1]$. To do this, - // we only have to loop over all saturation - // components of the solution vector; these - // are stored in the block 2 (block 0 are the - // velocities, block 1 are the pressures). - // - // It may be instructive to note that this - // function almost never triggers when the - // time step is chosen as mentioned in the - // introduction. However, if we choose the - // timestep only slightly larger, we get - // plenty of values outside the proper - // range. Strictly speaking, the function is - // therefore unnecessary if we choose the - // time step small enough. In a sense, the - // function is therefore only a safety device - // to avoid situations where our entire - // solution becomes unphysical because - // individual degrees of freedom have become - // unphysical a few time steps earlier. + // @sect4{TwoPhaseFlowProblem::project_back_saturation} + + // In this function, we simply run over all + // saturation degrees of freedom and make + // sure that if they should have left the + // physically reasonable range, that they be + // reset to the interval $[0,1]$. To do this, + // we only have to loop over all saturation + // components of the solution vector; these + // are stored in the block 2 (block 0 are the + // velocities, block 1 are the pressures). + // + // It may be instructive to note that this + // function almost never triggers when the + // time step is chosen as mentioned in the + // introduction. However, if we choose the + // timestep only slightly larger, we get + // plenty of values outside the proper + // range. Strictly speaking, the function is + // therefore unnecessary if we choose the + // time step small enough. In a sense, the + // function is therefore only a safety device + // to avoid situations where our entire + // solution becomes unphysical because + // individual degrees of freedom have become + // unphysical a few time steps earlier. template void TwoPhaseFlowProblem::project_back_saturation () { for (unsigned int i=0; i 1) - solution.block(2)(i) = 1; + if (solution.block(2)(i) > 1) + solution.block(2)(i) = 1; } - // @sect4{TwoPhaseFlowProblem::get_maximal_velocity} + // @sect4{TwoPhaseFlowProblem::get_maximal_velocity} - // The following function is used in - // determining the maximal allowable time - // step. What it does is to loop over all - // quadrature points in the domain and find - // what the maximal magnitude of the velocity - // is. + // The following function is used in + // determining the maximal allowable time + // step. What it does is to loop over all + // quadrature points in the domain and find + // what the maximal magnitude of the velocity + // is. template double TwoPhaseFlowProblem::get_maximal_velocity () const @@ -1305,9 +1305,9 @@ namespace Step21 = quadrature_formula.size(); FEValues fe_values (fe, quadrature_formula, - update_values); + update_values); std::vector > solution_values(n_q_points, - Vector(dim+2)); + Vector(dim+2)); double max_velocity = 0; typename DoFHandler::active_cell_iterator @@ -1315,56 +1315,56 @@ namespace Step21 endc = dof_handler.end(); for (; cell!=endc; ++cell) { - fe_values.reinit (cell); - fe_values.get_function_values (solution, solution_values); - - for (unsigned int q=0; q velocity; - for (unsigned int i=0; i velocity; + for (unsigned int i=0; iConstraintMatrix() as the - // second argument. - // - // The second point worth mentioning is that - // we only compute the length of the present - // time step in the middle of solving the - // linear system corresponding to each time - // step. We can therefore output the present - // end time of a time step only at the end of - // the time step. + // @sect4{TwoPhaseFlowProblem::run} + + // This is the final function of our main + // class. Its brevity speaks for + // itself. There are only two points worth + // noting: First, the function projects the + // initial values onto the finite element + // space at the beginning; the + // VectorTools::project function doing this + // requires an argument indicating the + // hanging node constraints. We have none in + // this program (we compute on a uniformly + // refined mesh), but the function requires + // the argument anyway, of course. So we have + // to create a constraint object. In its + // original state, constraint objects are + // unsorted, and have to be sorted (using the + // ConstraintMatrix::close function) before + // they can be used. This is what we do here, + // and which is why we can't simply call the + // VectorTools::project function with an + // anonymous temporary object + // ConstraintMatrix() as the + // second argument. + // + // The second point worth mentioning is that + // we only compute the length of the present + // time step in the middle of solving the + // linear system corresponding to each time + // step. We can therefore output the present + // end time of a time step only at the end of + // the time step. template void TwoPhaseFlowProblem::run () { @@ -1375,10 +1375,10 @@ namespace Step21 constraints.close(); VectorTools::project (dof_handler, - constraints, - QGauss(degree+2), - InitialValues(), - old_solution); + constraints, + QGauss(degree+2), + InitialValues(), + old_solution); } timestep_number = 1; @@ -1386,21 +1386,21 @@ namespace Step21 do { - std::cout << "Timestep " << timestep_number - << std::endl; + std::cout << "Timestep " << timestep_number + << std::endl; - assemble_system (); + assemble_system (); - solve (); + solve (); - output_results (); + output_results (); - time += time_step; - ++timestep_number; - std::cout << " Now at t=" << time - << ", dt=" << time_step << '.' - << std::endl - << std::endl; + time += time_step; + ++timestep_number; + std::cout << " Now at t=" << time + << ", dt=" << time_step << '.' + << std::endl + << std::endl; } while (time <= 250); } diff --git a/deal.II/examples/step-22/step-22.cc b/deal.II/examples/step-22/step-22.cc index 4f7f5340ef..83a6f0cf26 100644 --- a/deal.II/examples/step-22/step-22.cc +++ b/deal.II/examples/step-22/step-22.cc @@ -92,8 +92,8 @@ namespace Step22 typedef SparseDirectUMFPACK type; }; - // And the ILU preconditioning in 3D, called - // by SparseILU: + // And the ILU preconditioning in 3D, called + // by SparseILU: template <> struct InnerPreconditioner<3> { @@ -101,20 +101,20 @@ namespace Step22 }; - // @sect3{The StokesProblem class template} - - // This is an adaptation of step-20, so the - // main class and the data types are the - // same as used there. In this example we - // also use adaptive grid refinement, which - // is handled in analogy to - // step-6. According to the discussion in - // the introduction, we are also going to - // use the ConstraintMatrix for - // implementing Dirichlet boundary - // conditions. Hence, we change the name - // hanging_node_constraints - // into constraints. + // @sect3{The StokesProblem class template} + + // This is an adaptation of step-20, so the + // main class and the data types are the + // same as used there. In this example we + // also use adaptive grid refinement, which + // is handled in analogy to + // step-6. According to the discussion in + // the introduction, we are also going to + // use the ConstraintMatrix for + // implementing Dirichlet boundary + // conditions. Hence, we change the name + // hanging_node_constraints + // into constraints. template class StokesProblem { @@ -143,68 +143,68 @@ namespace Step22 BlockVector solution; BlockVector system_rhs; - // This one is new: We shall use a - // so-called shared pointer structure to - // access the preconditioner. Shared - // pointers are essentially just a - // convenient form of pointers. Several - // shared pointers can point to the same - // object (just like regular pointers), - // but when the last shared pointer - // object to point to a preconditioner - // object is deleted (for example if a - // shared pointer object goes out of - // scope, if the class of which it is a - // member is destroyed, or if the pointer - // is assigned a different preconditioner - // object) then the preconditioner object - // pointed to is also destroyed. This - // ensures that we don't have to manually - // track in how many places a - // preconditioner object is still - // referenced, it can never create a - // memory leak, and can never produce a - // dangling pointer to an already - // destroyed object: + // This one is new: We shall use a + // so-called shared pointer structure to + // access the preconditioner. Shared + // pointers are essentially just a + // convenient form of pointers. Several + // shared pointers can point to the same + // object (just like regular pointers), + // but when the last shared pointer + // object to point to a preconditioner + // object is deleted (for example if a + // shared pointer object goes out of + // scope, if the class of which it is a + // member is destroyed, or if the pointer + // is assigned a different preconditioner + // object) then the preconditioner object + // pointed to is also destroyed. This + // ensures that we don't have to manually + // track in how many places a + // preconditioner object is still + // referenced, it can never create a + // memory leak, and can never produce a + // dangling pointer to an already + // destroyed object: std_cxx1x::shared_ptr::type> A_preconditioner; }; - // @sect3{Boundary values and right hand side} - - // As in step-20 and most other - // example programs, the next task is - // to define the data for the PDE: - // For the Stokes problem, we are - // going to use natural boundary - // values on parts of the boundary - // (i.e. homogenous Neumann-type) for - // which we won't have to do anything - // special (the homogeneity implies - // that the corresponding terms in - // the weak form are simply zero), - // and boundary conditions on the - // velocity (Dirichlet-type) on the - // rest of the boundary, as described - // in the introduction. - // - // In order to enforce the Dirichlet - // boundary values on the velocity, - // we will use the - // VectorTools::interpolate_boundary_values - // function as usual which requires - // us to write a function object with - // as many components as the finite - // element has. In other words, we - // have to define the function on the - // $(u,p)$-space, but we are going to - // filter out the pressure component - // when interpolating the boundary - // values. - - // The following function object is a - // representation of the boundary - // values described in the - // introduction: + // @sect3{Boundary values and right hand side} + + // As in step-20 and most other + // example programs, the next task is + // to define the data for the PDE: + // For the Stokes problem, we are + // going to use natural boundary + // values on parts of the boundary + // (i.e. homogenous Neumann-type) for + // which we won't have to do anything + // special (the homogeneity implies + // that the corresponding terms in + // the weak form are simply zero), + // and boundary conditions on the + // velocity (Dirichlet-type) on the + // rest of the boundary, as described + // in the introduction. + // + // In order to enforce the Dirichlet + // boundary values on the velocity, + // we will use the + // VectorTools::interpolate_boundary_values + // function as usual which requires + // us to write a function object with + // as many components as the finite + // element has. In other words, we + // have to define the function on the + // $(u,p)$-space, but we are going to + // filter out the pressure component + // when interpolating the boundary + // values. + + // The following function object is a + // representation of the boundary + // values described in the + // introduction: template class BoundaryValues : public Function { @@ -212,20 +212,20 @@ namespace Step22 BoundaryValues () : Function(dim+1) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void vector_value (const Point &p, - Vector &value) const; + Vector &value) const; }; template double BoundaryValues::value (const Point &p, - const unsigned int component) const + const unsigned int component) const { Assert (component < this->n_components, - ExcIndexRange (component, 0, this->n_components)); + ExcIndexRange (component, 0, this->n_components)); if (component == 0) return (p[0] < 0 ? -1 : (p[0] > 0 ? 1 : 0)); @@ -236,7 +236,7 @@ namespace Step22 template void BoundaryValues::vector_value (const Point &p, - Vector &values) const + Vector &values) const { for (unsigned int c=0; cn_components; ++c) values(c) = BoundaryValues::value (p, c); @@ -244,9 +244,9 @@ namespace Step22 - // We implement similar functions for - // the right hand side which for the - // current example is simply zero: + // We implement similar functions for + // the right hand side which for the + // current example is simply zero: template class RightHandSide : public Function { @@ -254,10 +254,10 @@ namespace Step22 RightHandSide () : Function(dim+1) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void vector_value (const Point &p, - Vector &value) const; + Vector &value) const; }; @@ -265,7 +265,7 @@ namespace Step22 template double RightHandSide::value (const Point &/*p*/, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { return 0; } @@ -274,56 +274,56 @@ namespace Step22 template void RightHandSide::vector_value (const Point &p, - Vector &values) const + Vector &values) const { for (unsigned int c=0; cn_components; ++c) values(c) = RightHandSide::value (p, c); } - // @sect3{Linear solvers and preconditioners} - - // The linear solvers and preconditioners are - // discussed extensively in the - // introduction. Here, we create the - // respective objects that will be used. - - // @sect4{The InverseMatrix class template} - - // The InverseMatrix - // class represents the data - // structure for an inverse - // matrix. It is derived from the one - // in step-20. The only difference is - // that we now do include a - // preconditioner to the matrix since - // we will apply this class to - // different kinds of matrices that - // will require different - // preconditioners (in step-20 we did - // not use a preconditioner in this - // class at all). The types of matrix - // and preconditioner are passed to - // this class via template - // parameters, and matrix and - // preconditioner objects of these - // types will then be passed to the - // constructor when an - // InverseMatrix object - // is created. The member function - // vmult is, as in - // step-20, a multiplication with a - // vector, obtained by solving a - // linear system: + // @sect3{Linear solvers and preconditioners} + + // The linear solvers and preconditioners are + // discussed extensively in the + // introduction. Here, we create the + // respective objects that will be used. + + // @sect4{The InverseMatrix class template} + + // The InverseMatrix + // class represents the data + // structure for an inverse + // matrix. It is derived from the one + // in step-20. The only difference is + // that we now do include a + // preconditioner to the matrix since + // we will apply this class to + // different kinds of matrices that + // will require different + // preconditioners (in step-20 we did + // not use a preconditioner in this + // class at all). The types of matrix + // and preconditioner are passed to + // this class via template + // parameters, and matrix and + // preconditioner objects of these + // types will then be passed to the + // constructor when an + // InverseMatrix object + // is created. The member function + // vmult is, as in + // step-20, a multiplication with a + // vector, obtained by solving a + // linear system: template class InverseMatrix : public Subscriptor { public: InverseMatrix (const Matrix &m, - const Preconditioner &preconditioner); + const Preconditioner &preconditioner); void vmult (Vector &dst, - const Vector &src) const; + const Vector &src) const; private: const SmartPointer matrix; @@ -333,33 +333,33 @@ namespace Step22 template InverseMatrix::InverseMatrix (const Matrix &m, - const Preconditioner &preconditioner) - : - matrix (&m), - preconditioner (&preconditioner) + const Preconditioner &preconditioner) + : + matrix (&m), + preconditioner (&preconditioner) {} - // This is the implementation of the - // vmult function. - - // In this class we use a rather large - // tolerance for the solver control. The - // reason for this is that the function is - // used very frequently, and hence, any - // additional effort to make the residual - // in the CG solve smaller makes the - // solution more expensive. Note that we do - // not only use this class as a - // preconditioner for the Schur complement, - // but also when forming the inverse of the - // Laplace matrix – which is hence - // directly responsible for the accuracy of - // the solution itself, so we can't choose - // a too large tolerance, either. + // This is the implementation of the + // vmult function. + + // In this class we use a rather large + // tolerance for the solver control. The + // reason for this is that the function is + // used very frequently, and hence, any + // additional effort to make the residual + // in the CG solve smaller makes the + // solution more expensive. Note that we do + // not only use this class as a + // preconditioner for the Schur complement, + // but also when forming the inverse of the + // Laplace matrix – which is hence + // directly responsible for the accuracy of + // the solution itself, so we can't choose + // a too large tolerance, either. template void InverseMatrix::vmult (Vector &dst, - const Vector &src) const + const Vector &src) const { SolverControl solver_control (src.size(), 1e-6*src.l2_norm()); SolverCG<> cg (solver_control); @@ -370,30 +370,30 @@ namespace Step22 } - // @sect4{The SchurComplement class template} - - // This class implements the Schur complement - // discussed in the introduction. It is in - // analogy to step-20. Though, we now call - // it with a template parameter - // Preconditioner in order to - // access that when specifying the respective - // type of the inverse matrix class. As a - // consequence of the definition above, the - // declaration InverseMatrix now - // contains the second template parameter - // for a preconditioner class as above, which - // affects the SmartPointer - // object m_inverse as well. + // @sect4{The SchurComplement class template} + + // This class implements the Schur complement + // discussed in the introduction. It is in + // analogy to step-20. Though, we now call + // it with a template parameter + // Preconditioner in order to + // access that when specifying the respective + // type of the inverse matrix class. As a + // consequence of the definition above, the + // declaration InverseMatrix now + // contains the second template parameter + // for a preconditioner class as above, which + // affects the SmartPointer + // object m_inverse as well. template class SchurComplement : public Subscriptor { public: SchurComplement (const BlockSparseMatrix &system_matrix, - const InverseMatrix, Preconditioner> &A_inverse); + const InverseMatrix, Preconditioner> &A_inverse); void vmult (Vector &dst, - const Vector &src) const; + const Vector &src) const; private: const SmartPointer > system_matrix; @@ -407,18 +407,18 @@ namespace Step22 template SchurComplement:: SchurComplement (const BlockSparseMatrix &system_matrix, - const InverseMatrix,Preconditioner> &A_inverse) - : - system_matrix (&system_matrix), - A_inverse (&A_inverse), - tmp1 (system_matrix.block(0,0).m()), - tmp2 (system_matrix.block(0,0).m()) + const InverseMatrix,Preconditioner> &A_inverse) + : + system_matrix (&system_matrix), + A_inverse (&A_inverse), + tmp1 (system_matrix.block(0,0).m()), + tmp2 (system_matrix.block(0,0).m()) {} template void SchurComplement::vmult (Vector &dst, - const Vector &src) const + const Vector &src) const { system_matrix->block(0,1).vmult (tmp1, src); A_inverse->vmult (tmp2, tmp1); @@ -426,137 +426,137 @@ namespace Step22 } - // @sect3{StokesProblem class implementation} - - // @sect4{StokesProblem::StokesProblem} - - // The constructor of this class - // looks very similar to the one of - // step-20. The constructor - // initializes the variables for the - // polynomial degree, triangulation, - // finite element system and the dof - // handler. The underlying polynomial - // functions are of order - // degree+1 for the - // vector-valued velocity components - // and of order degree - // for the pressure. This gives the - // LBB-stable element pair - // $Q_{degree+1}^d\times Q_{degree}$, - // often referred to as the - // Taylor-Hood element. - // - // Note that we initialize the triangulation - // with a MeshSmoothing argument, which - // ensures that the refinement of cells is - // done in a way that the approximation of - // the PDE solution remains well-behaved - // (problems arise if grids are too - // unstructered), see the documentation of - // Triangulation::MeshSmoothing - // for details. + // @sect3{StokesProblem class implementation} + + // @sect4{StokesProblem::StokesProblem} + + // The constructor of this class + // looks very similar to the one of + // step-20. The constructor + // initializes the variables for the + // polynomial degree, triangulation, + // finite element system and the dof + // handler. The underlying polynomial + // functions are of order + // degree+1 for the + // vector-valued velocity components + // and of order degree + // for the pressure. This gives the + // LBB-stable element pair + // $Q_{degree+1}^d\times Q_{degree}$, + // often referred to as the + // Taylor-Hood element. + // + // Note that we initialize the triangulation + // with a MeshSmoothing argument, which + // ensures that the refinement of cells is + // done in a way that the approximation of + // the PDE solution remains well-behaved + // (problems arise if grids are too + // unstructered), see the documentation of + // Triangulation::MeshSmoothing + // for details. template StokesProblem::StokesProblem (const unsigned int degree) - : - degree (degree), - triangulation (Triangulation::maximum_smoothing), - fe (FE_Q(degree+1), dim, - FE_Q(degree), 1), - dof_handler (triangulation) + : + degree (degree), + triangulation (Triangulation::maximum_smoothing), + fe (FE_Q(degree+1), dim, + FE_Q(degree), 1), + dof_handler (triangulation) {} - // @sect4{StokesProblem::setup_dofs} - - // Given a mesh, this function - // associates the degrees of freedom - // with it and creates the - // corresponding matrices and - // vectors. At the beginning it also - // releases the pointer to the - // preconditioner object (if the - // shared pointer pointed at anything - // at all at this point) since it - // will definitely not be needed any - // more after this point and will - // have to be re-computed after - // assembling the matrix, and unties - // the sparse matrix from its - // sparsity pattern object. - // - // We then proceed with distributing - // degrees of freedom and renumbering - // them: In order to make the ILU - // preconditioner (in 3D) work - // efficiently, it is important to - // enumerate the degrees of freedom - // in such a way that it reduces the - // bandwidth of the matrix, or maybe - // more importantly: in such a way - // that the ILU is as close as - // possible to a real LU - // decomposition. On the other hand, - // we need to preserve the block - // structure of velocity and pressure - // already seen in in step-20 and - // step-21. This is done in two - // steps: First, all dofs are - // renumbered to improve the ILU and - // then we renumber once again by - // components. Since - // DoFRenumbering::component_wise - // does not touch the renumbering - // within the individual blocks, the - // basic renumbering from the first - // step remains. As for how the - // renumber degrees of freedom to - // improve the ILU: deal.II has a - // number of algorithms that attempt - // to find orderings to improve ILUs, - // or reduce the bandwidth of - // matrices, or optimize some other - // aspect. The DoFRenumbering - // namespace shows a comparison of - // the results we obtain with several - // of these algorithms based on the - // testcase discussed here in this - // tutorial program. Here, we will - // use the traditional Cuthill-McKee - // algorithm already used in some of - // the previous tutorial programs. - // In the - // section on improved ILU - // we're going to discuss this issue - // in more detail. - - // There is one more change compared - // to previous tutorial programs: - // There is no reason in sorting the - // dim velocity - // components individually. In fact, - // rather than first enumerating all - // $x$-velocities, then all - // $y$-velocities, etc, we would like - // to keep all velocities at the same - // location together and only - // separate between velocities (all - // components) and pressures. By - // default, this is not what the - // DoFRenumbering::component_wise - // function does: it treats each - // vector component separately; what - // we have to do is group several - // components into "blocks" and pass - // this block structure to that - // function. Consequently, we - // allocate a vector - // block_component with - // as many elements as there are - // components and describe all - // velocity components to correspond - // to block 0, while the pressure - // component will form block 1: + // @sect4{StokesProblem::setup_dofs} + + // Given a mesh, this function + // associates the degrees of freedom + // with it and creates the + // corresponding matrices and + // vectors. At the beginning it also + // releases the pointer to the + // preconditioner object (if the + // shared pointer pointed at anything + // at all at this point) since it + // will definitely not be needed any + // more after this point and will + // have to be re-computed after + // assembling the matrix, and unties + // the sparse matrix from its + // sparsity pattern object. + // + // We then proceed with distributing + // degrees of freedom and renumbering + // them: In order to make the ILU + // preconditioner (in 3D) work + // efficiently, it is important to + // enumerate the degrees of freedom + // in such a way that it reduces the + // bandwidth of the matrix, or maybe + // more importantly: in such a way + // that the ILU is as close as + // possible to a real LU + // decomposition. On the other hand, + // we need to preserve the block + // structure of velocity and pressure + // already seen in in step-20 and + // step-21. This is done in two + // steps: First, all dofs are + // renumbered to improve the ILU and + // then we renumber once again by + // components. Since + // DoFRenumbering::component_wise + // does not touch the renumbering + // within the individual blocks, the + // basic renumbering from the first + // step remains. As for how the + // renumber degrees of freedom to + // improve the ILU: deal.II has a + // number of algorithms that attempt + // to find orderings to improve ILUs, + // or reduce the bandwidth of + // matrices, or optimize some other + // aspect. The DoFRenumbering + // namespace shows a comparison of + // the results we obtain with several + // of these algorithms based on the + // testcase discussed here in this + // tutorial program. Here, we will + // use the traditional Cuthill-McKee + // algorithm already used in some of + // the previous tutorial programs. + // In the + // section on improved ILU + // we're going to discuss this issue + // in more detail. + + // There is one more change compared + // to previous tutorial programs: + // There is no reason in sorting the + // dim velocity + // components individually. In fact, + // rather than first enumerating all + // $x$-velocities, then all + // $y$-velocities, etc, we would like + // to keep all velocities at the same + // location together and only + // separate between velocities (all + // components) and pressures. By + // default, this is not what the + // DoFRenumbering::component_wise + // function does: it treats each + // vector component separately; what + // we have to do is group several + // components into "blocks" and pass + // this block structure to that + // function. Consequently, we + // allocate a vector + // block_component with + // as many elements as there are + // components and describe all + // velocity components to correspond + // to block 0, while the pressure + // component will form block 1: template void StokesProblem::setup_dofs () { @@ -570,161 +570,161 @@ namespace Step22 block_component[dim] = 1; DoFRenumbering::component_wise (dof_handler, block_component); - // Now comes the implementation of - // Dirichlet boundary conditions, which - // should be evident after the discussion - // in the introduction. All that changed is - // that the function already appears in the - // setup functions, whereas we were used to - // see it in some assembly routine. Further - // down below where we set up the mesh, we - // will associate the top boundary where we - // impose Dirichlet boundary conditions - // with boundary indicator 1. We will have - // to pass this boundary indicator as - // second argument to the function below - // interpolating boundary values. There is - // one more thing, though. The function - // describing the Dirichlet conditions was - // defined for all components, both - // velocity and pressure. However, the - // Dirichlet conditions are to be set for - // the velocity only. To this end, we use - // a component_mask that - // filters out the pressure component, so - // that the condensation is performed on - // velocity degrees of freedom only. Since - // we use adaptively refined grids the - // constraint matrix needs to be first - // filled with hanging node constraints - // generated from the DoF handler. Note the - // order of the two functions — we - // first compute the hanging node - // constraints, and then insert the - // boundary values into the constraint - // matrix. This makes sure that we respect - // H1 conformity on boundaries - // with hanging nodes (in three space - // dimensions), where the hanging node - // needs to dominate the Dirichlet boundary - // values. + // Now comes the implementation of + // Dirichlet boundary conditions, which + // should be evident after the discussion + // in the introduction. All that changed is + // that the function already appears in the + // setup functions, whereas we were used to + // see it in some assembly routine. Further + // down below where we set up the mesh, we + // will associate the top boundary where we + // impose Dirichlet boundary conditions + // with boundary indicator 1. We will have + // to pass this boundary indicator as + // second argument to the function below + // interpolating boundary values. There is + // one more thing, though. The function + // describing the Dirichlet conditions was + // defined for all components, both + // velocity and pressure. However, the + // Dirichlet conditions are to be set for + // the velocity only. To this end, we use + // a component_mask that + // filters out the pressure component, so + // that the condensation is performed on + // velocity degrees of freedom only. Since + // we use adaptively refined grids the + // constraint matrix needs to be first + // filled with hanging node constraints + // generated from the DoF handler. Note the + // order of the two functions — we + // first compute the hanging node + // constraints, and then insert the + // boundary values into the constraint + // matrix. This makes sure that we respect + // H1 conformity on boundaries + // with hanging nodes (in three space + // dimensions), where the hanging node + // needs to dominate the Dirichlet boundary + // values. { constraints.clear (); std::vector component_mask (dim+1, true); component_mask[dim] = false; DoFTools::make_hanging_node_constraints (dof_handler, - constraints); + constraints); VectorTools::interpolate_boundary_values (dof_handler, - 1, - BoundaryValues(), - constraints, - component_mask); + 1, + BoundaryValues(), + constraints, + component_mask); } constraints.close (); - // In analogy to step-20, we count the dofs - // in the individual components. We could - // do this in the same way as there, but we - // want to operate on the block structure - // we used already for the renumbering: The - // function - // DoFTools::count_dofs_per_block - // does the same as - // DoFTools::count_dofs_per_component, - // but now grouped as velocity and pressure - // block via block_component. + // In analogy to step-20, we count the dofs + // in the individual components. We could + // do this in the same way as there, but we + // want to operate on the block structure + // we used already for the renumbering: The + // function + // DoFTools::count_dofs_per_block + // does the same as + // DoFTools::count_dofs_per_component, + // but now grouped as velocity and pressure + // block via block_component. std::vector dofs_per_block (2); DoFTools::count_dofs_per_block (dof_handler, dofs_per_block, block_component); const unsigned int n_u = dofs_per_block[0], - n_p = dofs_per_block[1]; + n_p = dofs_per_block[1]; std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << " (" << n_u << '+' << n_p << ')' - << std::endl; - - // The next task is to allocate a - // sparsity pattern for the system matrix - // we will create. We could do this in - // the same way as in step-20, - // i.e. directly build an object of type - // SparsityPattern through - // DoFTools::make_sparsity_pattern. However, - // there is a major reason not to do so: - // In 3D, the function - // DoFTools::max_couplings_between_dofs - // yields a conservative but rather large - // number for the coupling between the - // individual dofs, so that the memory - // initially provided for the creation of - // the sparsity pattern of the matrix is - // far too much -- so much actually that - // the initial sparsity pattern won't - // even fit into the physical memory of - // most systems already for - // moderately-sized 3D problems, see also - // the discussion in step-18. Instead, - // we first build a temporary object that - // uses a different data structure that - // doesn't require allocating more memory - // than necessary but isn't suitable for - // use as a basis of SparseMatrix or - // BlockSparseMatrix objects; in a second - // step we then copy this object into an - // object of BlockSparsityPattern. This - // is entirely analgous to what we - // already did in step-11 and step-18. - // - // There is one snag again here, though: - // it turns out that using the - // CompressedSparsityPattern (or the - // block version - // BlockCompressedSparsityPattern we - // would use here) has a bottleneck that - // makes the algorithm to build the - // sparsity pattern be quadratic in the - // number of degrees of freedom. This - // doesn't become noticable until we get - // well into the range of several 100,000 - // degrees of freedom, but eventually - // dominates the setup of the linear - // system when we get to more than a - // million degrees of freedom. This is - // due to the data structures used in the - // CompressedSparsityPattern class, - // nothing that can easily be - // changed. Fortunately, there is an easy - // solution: the - // CompressedSimpleSparsityPattern class - // (and its block variant - // BlockCompressedSimpleSparsityPattern) - // has exactly the same interface, uses a - // different %internal data structure and - // is linear in the number of degrees of - // freedom and therefore much more - // efficient for large problems. As - // another alternative, we could also - // have chosen the class - // BlockCompressedSetSparsityPattern that - // uses yet another strategy for %internal - // memory management. Though, that class - // turns out to be more memory-demanding - // than - // BlockCompressedSimpleSparsityPattern - // for this example. - // - // Consequently, this is the class that - // we will use for our intermediate - // sparsity representation. All this is - // done inside a new scope, which means - // that the memory of csp - // will be released once the information - // has been copied to - // sparsity_pattern. + << triangulation.n_active_cells() + << std::endl + << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << " (" << n_u << '+' << n_p << ')' + << std::endl; + + // The next task is to allocate a + // sparsity pattern for the system matrix + // we will create. We could do this in + // the same way as in step-20, + // i.e. directly build an object of type + // SparsityPattern through + // DoFTools::make_sparsity_pattern. However, + // there is a major reason not to do so: + // In 3D, the function + // DoFTools::max_couplings_between_dofs + // yields a conservative but rather large + // number for the coupling between the + // individual dofs, so that the memory + // initially provided for the creation of + // the sparsity pattern of the matrix is + // far too much -- so much actually that + // the initial sparsity pattern won't + // even fit into the physical memory of + // most systems already for + // moderately-sized 3D problems, see also + // the discussion in step-18. Instead, + // we first build a temporary object that + // uses a different data structure that + // doesn't require allocating more memory + // than necessary but isn't suitable for + // use as a basis of SparseMatrix or + // BlockSparseMatrix objects; in a second + // step we then copy this object into an + // object of BlockSparsityPattern. This + // is entirely analgous to what we + // already did in step-11 and step-18. + // + // There is one snag again here, though: + // it turns out that using the + // CompressedSparsityPattern (or the + // block version + // BlockCompressedSparsityPattern we + // would use here) has a bottleneck that + // makes the algorithm to build the + // sparsity pattern be quadratic in the + // number of degrees of freedom. This + // doesn't become noticable until we get + // well into the range of several 100,000 + // degrees of freedom, but eventually + // dominates the setup of the linear + // system when we get to more than a + // million degrees of freedom. This is + // due to the data structures used in the + // CompressedSparsityPattern class, + // nothing that can easily be + // changed. Fortunately, there is an easy + // solution: the + // CompressedSimpleSparsityPattern class + // (and its block variant + // BlockCompressedSimpleSparsityPattern) + // has exactly the same interface, uses a + // different %internal data structure and + // is linear in the number of degrees of + // freedom and therefore much more + // efficient for large problems. As + // another alternative, we could also + // have chosen the class + // BlockCompressedSetSparsityPattern that + // uses yet another strategy for %internal + // memory management. Though, that class + // turns out to be more memory-demanding + // than + // BlockCompressedSimpleSparsityPattern + // for this example. + // + // Consequently, this is the class that + // we will use for our intermediate + // sparsity representation. All this is + // done inside a new scope, which means + // that the memory of csp + // will be released once the information + // has been copied to + // sparsity_pattern. { BlockCompressedSimpleSparsityPattern csp (2,2); @@ -739,10 +739,10 @@ namespace Step22 sparsity_pattern.copy_from (csp); } - // Finally, the system matrix, - // solution and right hand side are - // created from the block - // structure as in step-20: + // Finally, the system matrix, + // solution and right hand side are + // created from the block + // structure as in step-20: system_matrix.reinit (sparsity_pattern); solution.reinit (2); @@ -757,16 +757,16 @@ namespace Step22 } - // @sect4{StokesProblem::assemble_system} + // @sect4{StokesProblem::assemble_system} - // The assembly process follows the - // discussion in step-20 and in the - // introduction. We use the well-known - // abbreviations for the data structures - // that hold the local matrix, right - // hand side, and global - // numbering of the degrees of freedom - // for the present cell. + // The assembly process follows the + // discussion in step-20 and in the + // introduction. We use the well-known + // abbreviations for the data structures + // that hold the local matrix, right + // hand side, and global + // numbering of the degrees of freedom + // for the present cell. template void StokesProblem::assemble_system () { @@ -776,10 +776,10 @@ namespace Step22 QGauss quadrature_formula(degree+2); FEValues fe_values (fe, quadrature_formula, - update_values | - update_quadrature_points | - update_JxW_values | - update_gradients); + update_values | + update_quadrature_points | + update_JxW_values | + update_gradients); const unsigned int dofs_per_cell = fe.dofs_per_cell; @@ -792,65 +792,65 @@ namespace Step22 const RightHandSide right_hand_side; std::vector > rhs_values (n_q_points, - Vector(dim+1)); + Vector(dim+1)); - // Next, we need two objects that work as - // extractors for the FEValues - // object. Their use is explained in detail - // in the report on @ref vector_valued : + // Next, we need two objects that work as + // extractors for the FEValues + // object. Their use is explained in detail + // in the report on @ref vector_valued : const FEValuesExtractors::Vector velocities (0); const FEValuesExtractors::Scalar pressure (dim); - // As an extension over step-20 and - // step-21, we include a few - // optimizations that make assembly - // much faster for this particular - // problem. The improvements are - // based on the observation that we - // do a few calculations too many - // times when we do as in step-20: - // The symmetric gradient actually - // has dofs_per_cell - // different values per quadrature - // point, but we extract it - // dofs_per_cell*dofs_per_cell - // times from the FEValues object - - // for both the loop over - // i and the inner - // loop over j. In 3d, - // that means evaluating it - // $89^2=7921$ instead of $89$ - // times, a not insignificant - // difference. - // - // So what we're - // going to do here is to avoid - // such repeated calculations by - // getting a vector of rank-2 - // tensors (and similarly for - // the divergence and the basis - // function value on pressure) - // at the quadrature point prior - // to starting the loop over the - // dofs on the cell. First, we - // create the respective objects - // that will hold these - // values. Then, we start the - // loop over all cells and the loop - // over the quadrature points, - // where we first extract these - // values. There is one more - // optimization we implement here: - // the local matrix (as well as - // the global one) is going to - // be symmetric, since all - // the operations involved are - // symmetric with respect to $i$ - // and $j$. This is implemented by - // simply running the inner loop - // not to dofs_per_cell, - // but only up to i, - // the index of the outer loop. + // As an extension over step-20 and + // step-21, we include a few + // optimizations that make assembly + // much faster for this particular + // problem. The improvements are + // based on the observation that we + // do a few calculations too many + // times when we do as in step-20: + // The symmetric gradient actually + // has dofs_per_cell + // different values per quadrature + // point, but we extract it + // dofs_per_cell*dofs_per_cell + // times from the FEValues object - + // for both the loop over + // i and the inner + // loop over j. In 3d, + // that means evaluating it + // $89^2=7921$ instead of $89$ + // times, a not insignificant + // difference. + // + // So what we're + // going to do here is to avoid + // such repeated calculations by + // getting a vector of rank-2 + // tensors (and similarly for + // the divergence and the basis + // function value on pressure) + // at the quadrature point prior + // to starting the loop over the + // dofs on the cell. First, we + // create the respective objects + // that will hold these + // values. Then, we start the + // loop over all cells and the loop + // over the quadrature points, + // where we first extract these + // values. There is one more + // optimization we implement here: + // the local matrix (as well as + // the global one) is going to + // be symmetric, since all + // the operations involved are + // symmetric with respect to $i$ + // and $j$. This is implemented by + // simply running the inner loop + // not to dofs_per_cell, + // but only up to i, + // the index of the outer loop. std::vector > phi_grads_u (dofs_per_cell); std::vector div_phi_u (dofs_per_cell); std::vector phi_p (dofs_per_cell); @@ -860,134 +860,134 @@ namespace Step22 endc = dof_handler.end(); for (; cell!=endc; ++cell) { - fe_values.reinit (cell); - local_matrix = 0; - local_rhs = 0; - - right_hand_side.vector_value_list(fe_values.get_quadrature_points(), - rhs_values); - - for (unsigned int q=0; q phi_p[i] - // * phi_p[j] , yielding a - // pressure mass matrix in the - // $(1,1)$ block of the matrix as - // discussed in the - // introduction. That this term only - // ends up in the $(1,1)$ block stems - // from the fact that both of the - // factors in phi_p[i] * - // phi_p[j] are only non-zero - // when all the other terms vanish - // (and the other way around). - // - // Note also that operator* is - // overloaded for symmetric - // tensors, yielding the scalar - // product between the two - // tensors in the first line of - // the local matrix - // contribution. - - // Before we can write the local data - // into the global matrix (and - // simultaneously use the - // ConstraintMatrix object to apply - // Dirichlet boundary conditions and - // eliminate hanging node - // constraints, as we discussed in - // the introduction), we have to be - // careful about one thing, - // though. We have only build up half - // of the local matrix because of - // symmetry, but we're going to save - // the full system matrix in order to - // use the standard functions for - // solution. This is done by flipping - // the indices in case we are - // pointing into the empty part of - // the local matrix. - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - constraints.distribute_local_to_global (local_matrix, local_rhs, - local_dof_indices, - system_matrix, system_rhs); + fe_values.reinit (cell); + local_matrix = 0; + local_rhs = 0; + + right_hand_side.vector_value_list(fe_values.get_quadrature_points(), + rhs_values); + + for (unsigned int q=0; q phi_p[i] + // * phi_p[j] , yielding a + // pressure mass matrix in the + // $(1,1)$ block of the matrix as + // discussed in the + // introduction. That this term only + // ends up in the $(1,1)$ block stems + // from the fact that both of the + // factors in phi_p[i] * + // phi_p[j] are only non-zero + // when all the other terms vanish + // (and the other way around). + // + // Note also that operator* is + // overloaded for symmetric + // tensors, yielding the scalar + // product between the two + // tensors in the first line of + // the local matrix + // contribution. + + // Before we can write the local data + // into the global matrix (and + // simultaneously use the + // ConstraintMatrix object to apply + // Dirichlet boundary conditions and + // eliminate hanging node + // constraints, as we discussed in + // the introduction), we have to be + // careful about one thing, + // though. We have only build up half + // of the local matrix because of + // symmetry, but we're going to save + // the full system matrix in order to + // use the standard functions for + // solution. This is done by flipping + // the indices in case we are + // pointing into the empty part of + // the local matrix. + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + constraints.distribute_local_to_global (local_matrix, local_rhs, + local_dof_indices, + system_matrix, system_rhs); } - // Before we're going to solve this - // linear system, we generate a - // preconditioner for the - // velocity-velocity matrix, i.e., - // block(0,0) in the - // system matrix. As mentioned - // above, this depends on the - // spatial dimension. Since the two - // classes described by the - // InnerPreconditioner::type - // typedef have the same interface, - // we do not have to do anything - // different whether we want to use - // a sparse direct solver or an - // ILU: + // Before we're going to solve this + // linear system, we generate a + // preconditioner for the + // velocity-velocity matrix, i.e., + // block(0,0) in the + // system matrix. As mentioned + // above, this depends on the + // spatial dimension. Since the two + // classes described by the + // InnerPreconditioner::type + // typedef have the same interface, + // we do not have to do anything + // different whether we want to use + // a sparse direct solver or an + // ILU: std::cout << " Computing preconditioner..." << std::endl << std::flush; A_preconditioner = std_cxx1x::shared_ptr::type>(new typename InnerPreconditioner::type()); A_preconditioner->initialize (system_matrix.block(0,0), - typename InnerPreconditioner::type::AdditionalData()); + typename InnerPreconditioner::type::AdditionalData()); } - // @sect4{StokesProblem::solve} + // @sect4{StokesProblem::solve} - // After the discussion in the introduction - // and the definition of the respective - // classes above, the implementation of the - // solve function is rather - // straigt-forward and done in a similar way - // as in step-20. To start with, we need an - // object of the InverseMatrix - // class that represents the inverse of the - // matrix A. As described in the - // introduction, the inverse is generated - // with the help of an inner preconditioner - // of type - // InnerPreconditioner::type. + // After the discussion in the introduction + // and the definition of the respective + // classes above, the implementation of the + // solve function is rather + // straigt-forward and done in a similar way + // as in step-20. To start with, we need an + // object of the InverseMatrix + // class that represents the inverse of the + // matrix A. As described in the + // introduction, the inverse is generated + // with the help of an inner preconditioner + // of type + // InnerPreconditioner::type. template void StokesProblem::solve () { @@ -996,14 +996,14 @@ namespace Step22 A_inverse (system_matrix.block(0,0), *A_preconditioner); Vector tmp (solution.block(0).size()); - // This is as in step-20. We generate the - // right hand side $B A^{-1} F - G$ for the - // Schur complement and an object that - // represents the respective linear - // operation $B A^{-1} B^T$, now with a - // template parameter indicating the - // preconditioner - in accordance with the - // definition of the class. + // This is as in step-20. We generate the + // right hand side $B A^{-1} F - G$ for the + // Schur complement and an object that + // represents the respective linear + // operation $B A^{-1} B^T$, now with a + // template parameter indicating the + // preconditioner - in accordance with the + // definition of the class. { Vector schur_rhs (solution.block(1).size()); A_inverse.vmult (tmp, system_rhs.block(0)); @@ -1011,98 +1011,98 @@ namespace Step22 schur_rhs -= system_rhs.block(1); SchurComplement::type> - schur_complement (system_matrix, A_inverse); + schur_complement (system_matrix, A_inverse); - // The usual control structures for - // the solver call are created... + // The usual control structures for + // the solver call are created... SolverControl solver_control (solution.block(1).size(), - 1e-6*schur_rhs.l2_norm()); + 1e-6*schur_rhs.l2_norm()); SolverCG<> cg (solver_control); - // Now to the preconditioner to the - // Schur complement. As explained in - // the introduction, the - // preconditioning is done by a mass - // matrix in the pressure variable. It - // is stored in the $(1,1)$ block of - // the system matrix (that is not used - // anywhere else but in - // preconditioning). - // - // Actually, the solver needs to have - // the preconditioner in the form - // $P^{-1}$, so we need to create an - // inverse operation. Once again, we - // use an object of the class - // InverseMatrix, which - // implements the vmult - // operation that is needed by the - // solver. In this case, we have to - // invert the pressure mass matrix. As - // it already turned out in earlier - // tutorial programs, the inversion of - // a mass matrix is a rather cheap and - // straight-forward operation (compared - // to, e.g., a Laplace matrix). The CG - // method with ILU preconditioning - // converges in 5-10 steps, - // independently on the mesh size. - // This is precisely what we do here: - // We choose another ILU preconditioner - // and take it along to the - // InverseMatrix object via the - // corresponding template parameter. A - // CG solver is then called within the - // vmult operation of the inverse - // matrix. - // - // An alternative that is cheaper to - // build, but needs more iterations - // afterwards, would be to choose a - // SSOR preconditioner with factor - // 1.2. It needs about twice the number - // of iterations, but the costs for its - // generation are almost neglible. + // Now to the preconditioner to the + // Schur complement. As explained in + // the introduction, the + // preconditioning is done by a mass + // matrix in the pressure variable. It + // is stored in the $(1,1)$ block of + // the system matrix (that is not used + // anywhere else but in + // preconditioning). + // + // Actually, the solver needs to have + // the preconditioner in the form + // $P^{-1}$, so we need to create an + // inverse operation. Once again, we + // use an object of the class + // InverseMatrix, which + // implements the vmult + // operation that is needed by the + // solver. In this case, we have to + // invert the pressure mass matrix. As + // it already turned out in earlier + // tutorial programs, the inversion of + // a mass matrix is a rather cheap and + // straight-forward operation (compared + // to, e.g., a Laplace matrix). The CG + // method with ILU preconditioning + // converges in 5-10 steps, + // independently on the mesh size. + // This is precisely what we do here: + // We choose another ILU preconditioner + // and take it along to the + // InverseMatrix object via the + // corresponding template parameter. A + // CG solver is then called within the + // vmult operation of the inverse + // matrix. + // + // An alternative that is cheaper to + // build, but needs more iterations + // afterwards, would be to choose a + // SSOR preconditioner with factor + // 1.2. It needs about twice the number + // of iterations, but the costs for its + // generation are almost neglible. SparseILU preconditioner; preconditioner.initialize (system_matrix.block(1,1), - SparseILU::AdditionalData()); + SparseILU::AdditionalData()); InverseMatrix,SparseILU > - m_inverse (system_matrix.block(1,1), preconditioner); - - // With the Schur complement and an - // efficient preconditioner at hand, we - // can solve the respective equation - // for the pressure (i.e. block 0 in - // the solution vector) in the usual - // way: + m_inverse (system_matrix.block(1,1), preconditioner); + + // With the Schur complement and an + // efficient preconditioner at hand, we + // can solve the respective equation + // for the pressure (i.e. block 0 in + // the solution vector) in the usual + // way: cg.solve (schur_complement, solution.block(1), schur_rhs, - m_inverse); + m_inverse); - // After this first solution step, the - // hanging node constraints have to be - // distributed to the solution in order - // to achieve a consistent pressure - // field. + // After this first solution step, the + // hanging node constraints have to be + // distributed to the solution in order + // to achieve a consistent pressure + // field. constraints.distribute (solution); std::cout << " " - << solver_control.last_step() - << " outer CG Schur complement iterations for pressure" - << std::endl; + << solver_control.last_step() + << " outer CG Schur complement iterations for pressure" + << std::endl; } - // As in step-20, we finally need to - // solve for the velocity equation where - // we plug in the solution to the - // pressure equation. This involves only - // objects we already know - so we simply - // multiply $p$ by $B^T$, subtract the - // right hand side and multiply by the - // inverse of $A$. At the end, we need to - // distribute the constraints from - // hanging nodes in order to obtain a - // constistent flow field: + // As in step-20, we finally need to + // solve for the velocity equation where + // we plug in the solution to the + // pressure equation. This involves only + // objects we already know - so we simply + // multiply $p$ by $B^T$, subtract the + // right hand side and multiply by the + // inverse of $A$. At the end, we need to + // distribute the constraints from + // hanging nodes in order to obtain a + // constistent flow field: { system_matrix.block(0,1).vmult (tmp, solution.block(1)); tmp *= -1; @@ -1115,48 +1115,48 @@ namespace Step22 } - // @sect4{StokesProblem::output_results} - - // The next function generates graphical - // output. In this example, we are going to - // use the VTK file format. We attach - // names to the individual variables in the - // problem: velocity to the - // dim components of velocity - // and pressure to the - // pressure. - // - // Not all visualization programs have the - // ability to group individual vector - // components into a vector to provide - // vector plots; in particular, this holds - // for some VTK-based visualization - // programs. In this case, the logical - // grouping of components into vectors - // should already be described in the file - // containing the data. In other words, - // what we need to do is provide our output - // writers with a way to know which of the - // components of the finite element - // logically form a vector (with $d$ - // components in $d$ space dimensions) - // rather than letting them assume that we - // simply have a bunch of scalar fields. - // This is achieved using the members of - // the - // DataComponentInterpretation - // namespace: as with the filename, we - // create a vector in which the first - // dim components refer to the - // velocities and are given the tag - // DataComponentInterpretation::component_is_part_of_vector; - // we finally push one tag - // DataComponentInterpretation::component_is_scalar - // to describe the grouping of the pressure - // variable. - - // The rest of the function is then - // the same as in step-20. + // @sect4{StokesProblem::output_results} + + // The next function generates graphical + // output. In this example, we are going to + // use the VTK file format. We attach + // names to the individual variables in the + // problem: velocity to the + // dim components of velocity + // and pressure to the + // pressure. + // + // Not all visualization programs have the + // ability to group individual vector + // components into a vector to provide + // vector plots; in particular, this holds + // for some VTK-based visualization + // programs. In this case, the logical + // grouping of components into vectors + // should already be described in the file + // containing the data. In other words, + // what we need to do is provide our output + // writers with a way to know which of the + // components of the finite element + // logically form a vector (with $d$ + // components in $d$ space dimensions) + // rather than letting them assume that we + // simply have a bunch of scalar fields. + // This is achieved using the members of + // the + // DataComponentInterpretation + // namespace: as with the filename, we + // create a vector in which the first + // dim components refer to the + // velocities and are given the tag + // DataComponentInterpretation::component_is_part_of_vector; + // we finally push one tag + // DataComponentInterpretation::component_is_scalar + // to describe the grouping of the pressure + // variable. + + // The rest of the function is then + // the same as in step-20. template void StokesProblem::output_results (const unsigned int refinement_cycle) const @@ -1173,34 +1173,34 @@ namespace Step22 DataOut data_out; data_out.attach_dof_handler (dof_handler); data_out.add_data_vector (solution, solution_names, - DataOut::type_dof_data, - data_component_interpretation); + DataOut::type_dof_data, + data_component_interpretation); data_out.build_patches (); std::ostringstream filename; filename << "solution-" - << Utilities::int_to_string (refinement_cycle, 2) - << ".vtk"; + << Utilities::int_to_string (refinement_cycle, 2) + << ".vtk"; std::ofstream output (filename.str().c_str()); data_out.write_vtk (output); } - // @sect4{StokesProblem::refine_mesh} - - // This is the last interesting function of - // the StokesProblem class. - // As indicated by its name, it takes the - // solution to the problem and refines the - // mesh where this is needed. The procedure - // is the same as in the respective step in - // step-6, with the exception that we base - // the refinement only on the change in - // pressure, i.e., we call the Kelly error - // estimator with a mask - // object. Additionally, we do not coarsen - // the grid again: + // @sect4{StokesProblem::refine_mesh} + + // This is the last interesting function of + // the StokesProblem class. + // As indicated by its name, it takes the + // solution to the problem and refines the + // mesh where this is needed. The procedure + // is the same as in the respective step in + // step-6, with the exception that we base + // the refinement only on the change in + // pressure, i.e., we call the Kelly error + // estimator with a mask + // object. Additionally, we do not coarsen + // the grid again: template void StokesProblem::refine_mesh () @@ -1210,40 +1210,40 @@ namespace Step22 std::vector component_mask (dim+1, false); component_mask[dim] = true; KellyErrorEstimator::estimate (dof_handler, - QGauss(degree+1), - typename FunctionMap::type(), - solution, - estimated_error_per_cell, - component_mask); + QGauss(degree+1), + typename FunctionMap::type(), + solution, + estimated_error_per_cell, + component_mask); GridRefinement::refine_and_coarsen_fixed_number (triangulation, - estimated_error_per_cell, - 0.3, 0.0); + estimated_error_per_cell, + 0.3, 0.0); triangulation.execute_coarsening_and_refinement (); } - // @sect4{StokesProblem::run} - - // The last step in the Stokes class is, as - // usual, the function that generates the - // initial grid and calls the other - // functions in the respective order. - // - // We start off with a rectangle of size $4 - // \times 1$ (in 2d) or $4 \times 1 \times - // 1$ (in 3d), placed in $R^2/R^3$ as - // $(-2,2)\times(-1,0)$ or - // $(-2,2)\times(0,1)\times(-1,0)$, - // respectively. It is natural to start - // with equal mesh size in each direction, - // so we subdivide the initial rectangle - // four times in the first coordinate - // direction. To limit the scope of the - // variables involved in the creation of - // the mesh to the range where we actually - // need them, we put the entire block - // between a pair of braces: + // @sect4{StokesProblem::run} + + // The last step in the Stokes class is, as + // usual, the function that generates the + // initial grid and calls the other + // functions in the respective order. + // + // We start off with a rectangle of size $4 + // \times 1$ (in 2d) or $4 \times 1 \times + // 1$ (in 3d), placed in $R^2/R^3$ as + // $(-2,2)\times(-1,0)$ or + // $(-2,2)\times(0,1)\times(-1,0)$, + // respectively. It is natural to start + // with equal mesh size in each direction, + // so we subdivide the initial rectangle + // four times in the first coordinate + // direction. To limit the scope of the + // variables involved in the creation of + // the mesh to the range where we actually + // need them, we put the entire block + // between a pair of braces: template void StokesProblem::run () { @@ -1252,74 +1252,74 @@ namespace Step22 subdivisions[0] = 4; const Point bottom_left = (dim == 2 ? - Point(-2,-1) : - Point(-2,0,-1)); + Point(-2,-1) : + Point(-2,0,-1)); const Point top_right = (dim == 2 ? - Point(2,0) : - Point(2,1,0)); + Point(2,0) : + Point(2,1,0)); GridGenerator::subdivided_hyper_rectangle (triangulation, - subdivisions, - bottom_left, - top_right); + subdivisions, + bottom_left, + top_right); } - // A boundary indicator of 1 is set to all - // boundaries that are subject to Dirichlet - // boundary conditions, i.e. to faces that - // are located at 0 in the last coordinate - // direction. See the example description - // above for details. + // A boundary indicator of 1 is set to all + // boundaries that are subject to Dirichlet + // boundary conditions, i.e. to faces that + // are located at 0 in the last coordinate + // direction. See the example description + // above for details. for (typename Triangulation::active_cell_iterator - cell = triangulation.begin_active(); - cell != triangulation.end(); ++cell) + cell = triangulation.begin_active(); + cell != triangulation.end(); ++cell) for (unsigned int f=0; f::faces_per_cell; ++f) - if (cell->face(f)->center()[dim-1] == 0) - cell->face(f)->set_all_boundary_indicators(1); + if (cell->face(f)->center()[dim-1] == 0) + cell->face(f)->set_all_boundary_indicators(1); - // We then apply an initial refinement - // before solving for the first time. In - // 3D, there are going to be more degrees - // of freedom, so we refine less there: + // We then apply an initial refinement + // before solving for the first time. In + // 3D, there are going to be more degrees + // of freedom, so we refine less there: triangulation.refine_global (4-dim); - // As first seen in step-6, we cycle over - // the different refinement levels and - // refine (except for the first cycle), - // setup the degrees of freedom and - // matrices, assemble, solve and create - // output: + // As first seen in step-6, we cycle over + // the different refinement levels and + // refine (except for the first cycle), + // setup the degrees of freedom and + // matrices, assemble, solve and create + // output: for (unsigned int refinement_cycle = 0; refinement_cycle<6; - ++refinement_cycle) + ++refinement_cycle) { - std::cout << "Refinement cycle " << refinement_cycle << std::endl; + std::cout << "Refinement cycle " << refinement_cycle << std::endl; - if (refinement_cycle > 0) - refine_mesh (); + if (refinement_cycle > 0) + refine_mesh (); - setup_dofs (); + setup_dofs (); - std::cout << " Assembling..." << std::endl << std::flush; - assemble_system (); + std::cout << " Assembling..." << std::endl << std::flush; + assemble_system (); - std::cout << " Solving..." << std::flush; - solve (); + std::cout << " Solving..." << std::flush; + solve (); - output_results (refinement_cycle); + output_results (refinement_cycle); - std::cout << std::endl; + std::cout << std::endl; } } } - // @sect3{The main function} + // @sect3{The main function} - // The main function is the same as in - // step-20. We pass the element degree as a - // parameter and choose the space dimension - // at the well-known template slot. + // The main function is the same as in + // step-20. We pass the element degree as a + // parameter and choose the space dimension + // at the well-known template slot. int main () { try diff --git a/deal.II/examples/step-23/step-23.cc b/deal.II/examples/step-23/step-23.cc index c92a5ed653..fb10bb96e0 100644 --- a/deal.II/examples/step-23/step-23.cc +++ b/deal.II/examples/step-23/step-23.cc @@ -11,11 +11,11 @@ /* further information on this license. */ - // @sect3{Include files} + // @sect3{Include files} - // We start with the usual assortment - // of include files that we've seen - // in so many of the previous tests: + // We start with the usual assortment + // of include files that we've seen + // in so many of the previous tests: #include #include #include @@ -44,113 +44,113 @@ #include #include - // Here are the only three include - // files of some new interest: The - // first one is already used, for - // example, for the - // VectorTools::interpolate_boundary_values - // and - // VectorTools::apply_boundary_values - // functions. However, we here use - // another function in that class, - // VectorTools::project to compute - // our initial values as the $L^2$ - // projection of the continuous - // initial values. Furthermore, we - // use - // VectorTools::create_right_hand_side - // to generate the integrals - // $(f^n,\phi^n_i)$. These were - // previously always generated by - // hand in - // assemble_system or - // similar functions in application - // code. However, we're too lazy to - // do that here, so simply use a - // library function: + // Here are the only three include + // files of some new interest: The + // first one is already used, for + // example, for the + // VectorTools::interpolate_boundary_values + // and + // VectorTools::apply_boundary_values + // functions. However, we here use + // another function in that class, + // VectorTools::project to compute + // our initial values as the $L^2$ + // projection of the continuous + // initial values. Furthermore, we + // use + // VectorTools::create_right_hand_side + // to generate the integrals + // $(f^n,\phi^n_i)$. These were + // previously always generated by + // hand in + // assemble_system or + // similar functions in application + // code. However, we're too lazy to + // do that here, so simply use a + // library function: #include - // In a very similar vein, we are - // also too lazy to write the code to - // assemble mass and Laplace - // matrices, although it would have - // only taken copying the relevant - // code from any number of previous - // tutorial programs. Rather, we want - // to focus on the things that are - // truly new to this program and - // therefore use the - // MatrixTools::create_mass_matrix - // and - // MatrixTools::create_laplace_matrix - // functions. They are declared here: + // In a very similar vein, we are + // also too lazy to write the code to + // assemble mass and Laplace + // matrices, although it would have + // only taken copying the relevant + // code from any number of previous + // tutorial programs. Rather, we want + // to focus on the things that are + // truly new to this program and + // therefore use the + // MatrixTools::create_mass_matrix + // and + // MatrixTools::create_laplace_matrix + // functions. They are declared here: #include - // Finally, here is an include file - // that contains all sorts of tool - // functions that one sometimes - // needs. In particular, we need the - // Utilities::int_to_string class - // that, given an integer argument, - // returns a string representation of - // it. It is particularly useful - // since it allows for a second - // parameter indicating the number of - // digits to which we want the result - // padded with leading zeros. We will - // use this to write output files - // that have the form - // solution-XXX.gnuplot - // where XXX denotes the - // number of the time step and always - // consists of three digits even if - // we are still in the single or - // double digit time steps. + // Finally, here is an include file + // that contains all sorts of tool + // functions that one sometimes + // needs. In particular, we need the + // Utilities::int_to_string class + // that, given an integer argument, + // returns a string representation of + // it. It is particularly useful + // since it allows for a second + // parameter indicating the number of + // digits to which we want the result + // padded with leading zeros. We will + // use this to write output files + // that have the form + // solution-XXX.gnuplot + // where XXX denotes the + // number of the time step and always + // consists of three digits even if + // we are still in the single or + // double digit time steps. #include - // The last step is as in all - // previous programs: + // The last step is as in all + // previous programs: namespace Step23 { using namespace dealii; - // @sect3{The WaveEquation class} - - // Next comes the declaration of the main - // class. It's public interface of functions - // is like in most of the other tutorial - // programs. Worth mentioning is that we now - // have to store four matrices instead of - // one: the mass matrix $M$, the Laplace - // matrix $A$, the matrix $M+k^2\theta^2A$ - // used for solving for $U^n$, and a copy of - // the mass matrix with boundary conditions - // applied used for solving for $V^n$. Note - // that it is a bit wasteful to have an - // additional copy of the mass matrix - // around. We will discuss strategies for how - // to avoid this in the section on possible - // improvements. - // - // Likewise, we need solution vectors for - // $U^n,V^n$ as well as for the corresponding - // vectors at the previous time step, - // $U^{n-1},V^{n-1}$. The - // system_rhs will be used for - // whatever right hand side vector we have - // when solving one of the two linear systems - // in each time step. These will be solved in - // the two functions solve_u and - // solve_v. - // - // Finally, the variable - // theta is used to - // indicate the parameter $\theta$ - // that is used to define which time - // stepping scheme to use, as - // explained in the introduction. The - // rest is self-explanatory. + // @sect3{The WaveEquation class} + + // Next comes the declaration of the main + // class. It's public interface of functions + // is like in most of the other tutorial + // programs. Worth mentioning is that we now + // have to store four matrices instead of + // one: the mass matrix $M$, the Laplace + // matrix $A$, the matrix $M+k^2\theta^2A$ + // used for solving for $U^n$, and a copy of + // the mass matrix with boundary conditions + // applied used for solving for $V^n$. Note + // that it is a bit wasteful to have an + // additional copy of the mass matrix + // around. We will discuss strategies for how + // to avoid this in the section on possible + // improvements. + // + // Likewise, we need solution vectors for + // $U^n,V^n$ as well as for the corresponding + // vectors at the previous time step, + // $U^{n-1},V^{n-1}$. The + // system_rhs will be used for + // whatever right hand side vector we have + // when solving one of the two linear systems + // in each time step. These will be solved in + // the two functions solve_u and + // solve_v. + // + // Finally, the variable + // theta is used to + // indicate the parameter $\theta$ + // that is used to define which time + // stepping scheme to use, as + // explained in the introduction. The + // rest is self-explanatory. template class WaveEquation { @@ -187,26 +187,26 @@ namespace Step23 - // @sect3{Equation data} - - // Before we go on filling in the - // details of the main class, let us - // define the equation data - // corresponding to the problem, - // i.e. initial and boundary values - // for both the solution $u$ and its - // time derivative $v$, as well as a - // right hand side class. We do so - // using classes derived from the - // Function class template that has - // been used many times before, so - // the following should not be a - // surprise. - // - // Let's start with initial values - // and choose zero for both the value - // $u$ as well as its time - // derivative, the velocity $v$: + // @sect3{Equation data} + + // Before we go on filling in the + // details of the main class, let us + // define the equation data + // corresponding to the problem, + // i.e. initial and boundary values + // for both the solution $u$ and its + // time derivative $v$, as well as a + // right hand side class. We do so + // using classes derived from the + // Function class template that has + // been used many times before, so + // the following should not be a + // surprise. + // + // Let's start with initial values + // and choose zero for both the value + // $u$ as well as its time + // derivative, the velocity $v$: template class InitialValuesU : public Function { @@ -214,7 +214,7 @@ namespace Step23 InitialValuesU () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; @@ -225,14 +225,14 @@ namespace Step23 InitialValuesV () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; template double InitialValuesU::value (const Point &/*p*/, - const unsigned int component) const + const unsigned int component) const { Assert (component == 0, ExcInternalError()); return 0; @@ -242,7 +242,7 @@ namespace Step23 template double InitialValuesV::value (const Point &/*p*/, - const unsigned int component) const + const unsigned int component) const { Assert (component == 0, ExcInternalError()); return 0; @@ -250,9 +250,9 @@ namespace Step23 - // Secondly, we have the right hand - // side forcing term. Boring as we - // are, we choose zero here as well: + // Secondly, we have the right hand + // side forcing term. Boring as we + // are, we choose zero here as well: template class RightHandSide : public Function { @@ -260,14 +260,14 @@ namespace Step23 RightHandSide () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; template double RightHandSide::value (const Point &/*p*/, - const unsigned int component) const + const unsigned int component) const { Assert (component == 0, ExcInternalError()); return 0; @@ -275,10 +275,10 @@ namespace Step23 - // Finally, we have boundary values for $u$ - // and $v$. They are as described in the - // introduction, one being the time - // derivative of the other: + // Finally, we have boundary values for $u$ + // and $v$. They are as described in the + // introduction, one being the time + // derivative of the other: template class BoundaryValuesU : public Function { @@ -286,7 +286,7 @@ namespace Step23 BoundaryValuesU () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; @@ -299,7 +299,7 @@ namespace Step23 BoundaryValuesV () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; @@ -307,14 +307,14 @@ namespace Step23 template double BoundaryValuesU::value (const Point &p, - const unsigned int component) const + const unsigned int component) const { Assert (component == 0, ExcInternalError()); if ((this->get_time() <= 0.5) && - (p[0] < 0) && - (p[1] < 1./3) && - (p[1] > -1./3)) + (p[0] < 0) && + (p[1] < 1./3) && + (p[1] > -1./3)) return std::sin (this->get_time() * 4 * numbers::PI); else return 0; @@ -324,16 +324,16 @@ namespace Step23 template double BoundaryValuesV::value (const Point &p, - const unsigned int component) const + const unsigned int component) const { Assert (component == 0, ExcInternalError()); if ((this->get_time() <= 0.5) && - (p[0] < 0) && - (p[1] < 1./3) && - (p[1] > -1./3)) + (p[0] < 0) && + (p[1] < 1./3) && + (p[1] > -1./3)) return (std::cos (this->get_time() * 4 * numbers::PI) * - 4 * numbers::PI); + 4 * numbers::PI); else return 0; } @@ -341,44 +341,44 @@ namespace Step23 - // @sect3{Implementation of the WaveEquation class} + // @sect3{Implementation of the WaveEquation class} - // The implementation of the actual logic is - // actually fairly short, since we relegate - // things like assembling the matrices and - // right hand side vectors to the - // library. The rest boils down to not much - // more than 130 lines of actual code, a - // significant fraction of which is - // boilerplate code that can be taken from - // previous example programs (e.g. the - // functions that solve linear systems, or - // that generate output). - // - // Let's start with the constructor (for an - // explanation of the choice of time step, - // see the section on Courant, Friedrichs, - // and Lewy in the introduction): + // The implementation of the actual logic is + // actually fairly short, since we relegate + // things like assembling the matrices and + // right hand side vectors to the + // library. The rest boils down to not much + // more than 130 lines of actual code, a + // significant fraction of which is + // boilerplate code that can be taken from + // previous example programs (e.g. the + // functions that solve linear systems, or + // that generate output). + // + // Let's start with the constructor (for an + // explanation of the choice of time step, + // see the section on Courant, Friedrichs, + // and Lewy in the introduction): template WaveEquation::WaveEquation () : - fe (1), - dof_handler (triangulation), - time_step (1./64), - theta (0.5) + fe (1), + dof_handler (triangulation), + time_step (1./64), + theta (0.5) {} - // @sect4{WaveEquation::setup_system} + // @sect4{WaveEquation::setup_system} - // The next function is the one that - // sets up the mesh, DoFHandler, and - // matrices and vectors at the - // beginning of the program, - // i.e. before the first time - // step. The first few lines are - // pretty much standard if you've - // read through the tutorial programs - // at least up to step-6: + // The next function is the one that + // sets up the mesh, DoFHandler, and + // matrices and vectors at the + // beginning of the program, + // i.e. before the first time + // step. The first few lines are + // pretty much standard if you've + // read through the tutorial programs + // at least up to step-6: template void WaveEquation::setup_system () { @@ -386,81 +386,81 @@ namespace Step23 triangulation.refine_global (7); std::cout << "Number of active cells: " - << triangulation.n_active_cells() - << std::endl; + << triangulation.n_active_cells() + << std::endl; dof_handler.distribute_dofs (fe); std::cout << "Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl - << std::endl; + << dof_handler.n_dofs() + << std::endl + << std::endl; sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); sparsity_pattern.compress(); - // Then comes a block where we have to - // initialize the 3 matrices we need in the - // course of the program: the mass matrix, - // the laplace matrix, and the matrix - // $M+k^2\theta^2A$ used when solving for - // $U^n$ in each time step. - // - // When setting up these matrices, note - // that they all make use of the same - // sparsity pattern object. Finally, the - // reason why matrices and sparsity - // patterns are separate objects in deal.II - // (unlike in many other finite element or - // linear algebra classes) becomes clear: - // in a significant fraction of - // applications, one has to hold several - // matrices that happen to have the same - // sparsity pattern, and there is no reason - // for them not to share this information, - // rather than re-building and wasting - // memory on it several times. - // - // After initializing all of these - // matrices, we call library functions that - // build the Laplace and mass matrices. All - // they need is a DoFHandler object and a - // quadrature formula object that is to be - // used for numerical integration. Note - // that in many respects these functions - // are better than what we would usually do - // in application programs, for example - // because they automatically parallelize - // building the matrices if multiple - // processors are available in a - // machine. The matrices for solving linear - // systems will be filled in the run() - // method because we need to re-apply - // boundary conditions every time step. + // Then comes a block where we have to + // initialize the 3 matrices we need in the + // course of the program: the mass matrix, + // the laplace matrix, and the matrix + // $M+k^2\theta^2A$ used when solving for + // $U^n$ in each time step. + // + // When setting up these matrices, note + // that they all make use of the same + // sparsity pattern object. Finally, the + // reason why matrices and sparsity + // patterns are separate objects in deal.II + // (unlike in many other finite element or + // linear algebra classes) becomes clear: + // in a significant fraction of + // applications, one has to hold several + // matrices that happen to have the same + // sparsity pattern, and there is no reason + // for them not to share this information, + // rather than re-building and wasting + // memory on it several times. + // + // After initializing all of these + // matrices, we call library functions that + // build the Laplace and mass matrices. All + // they need is a DoFHandler object and a + // quadrature formula object that is to be + // used for numerical integration. Note + // that in many respects these functions + // are better than what we would usually do + // in application programs, for example + // because they automatically parallelize + // building the matrices if multiple + // processors are available in a + // machine. The matrices for solving linear + // systems will be filled in the run() + // method because we need to re-apply + // boundary conditions every time step. mass_matrix.reinit (sparsity_pattern); laplace_matrix.reinit (sparsity_pattern); matrix_u.reinit (sparsity_pattern); matrix_v.reinit (sparsity_pattern); MatrixCreator::create_mass_matrix (dof_handler, QGauss(3), - mass_matrix); + mass_matrix); MatrixCreator::create_laplace_matrix (dof_handler, QGauss(3), - laplace_matrix); - - // The rest of the function is spent on - // setting vector sizes to the correct - // value. The final line closes the hanging - // node constraints object. Since we work - // on a uniformly refined mesh, no - // constraints exist or have been computed - // (i.e. there was no need to call - // DoFTools::make_hanging_node_constraints - // as in other programs), but we need a - // constraints object in one place further - // down below anyway. + laplace_matrix); + + // The rest of the function is spent on + // setting vector sizes to the correct + // value. The final line closes the hanging + // node constraints object. Since we work + // on a uniformly refined mesh, no + // constraints exist or have been computed + // (i.e. there was no need to call + // DoFTools::make_hanging_node_constraints + // as in other programs), but we need a + // constraints object in one place further + // down below anyway. solution_u.reinit (dof_handler.n_dofs()); solution_v.reinit (dof_handler.n_dofs()); old_solution_u.reinit (dof_handler.n_dofs()); @@ -471,26 +471,26 @@ namespace Step23 } - // @sect4{WaveEquation::solve_u and WaveEquation::solve_v} - - // The next two functions deal with solving - // the linear systems associated with the - // equations for $U^n$ and $V^n$. Both are - // not particularly interesting as they - // pretty much follow the scheme used in all - // the previous tutorial programs. - // - // One can make little experiments with - // preconditioners for the two matrices we - // have to invert. As it turns out, however, - // for the matrices at hand here, using - // Jacobi or SSOR preconditioners reduces the - // number of iterations necessary to solve - // the linear system slightly, but due to the - // cost of applying the preconditioner it is - // no win in terms of run-time. It is not - // much of a loss either, but let's keep it - // simple and just do without: + // @sect4{WaveEquation::solve_u and WaveEquation::solve_v} + + // The next two functions deal with solving + // the linear systems associated with the + // equations for $U^n$ and $V^n$. Both are + // not particularly interesting as they + // pretty much follow the scheme used in all + // the previous tutorial programs. + // + // One can make little experiments with + // preconditioners for the two matrices we + // have to invert. As it turns out, however, + // for the matrices at hand here, using + // Jacobi or SSOR preconditioners reduces the + // number of iterations necessary to solve + // the linear system slightly, but due to the + // cost of applying the preconditioner it is + // no win in terms of run-time. It is not + // much of a loss either, but let's keep it + // simple and just do without: template void WaveEquation::solve_u () { @@ -498,11 +498,11 @@ namespace Step23 SolverCG<> cg (solver_control); cg.solve (matrix_u, solution_u, system_rhs, - PreconditionIdentity()); + PreconditionIdentity()); std::cout << " u-equation: " << solver_control.last_step() - << " CG iterations." - << std::endl; + << " CG iterations." + << std::endl; } @@ -513,25 +513,25 @@ namespace Step23 SolverCG<> cg (solver_control); cg.solve (matrix_v, solution_v, system_rhs, - PreconditionIdentity()); + PreconditionIdentity()); std::cout << " v-equation: " << solver_control.last_step() - << " CG iterations." - << std::endl; + << " CG iterations." + << std::endl; } - // @sect4{WaveEquation::output_results} + // @sect4{WaveEquation::output_results} - // Likewise, the following function is pretty - // much what we've done before. The only - // thing worth mentioning is how here we - // generate a string representation of the - // time step number padded with leading zeros - // to 3 character length using the - // Utilities::int_to_string function's second - // argument. + // Likewise, the following function is pretty + // much what we've done before. The only + // thing worth mentioning is how here we + // generate a string representation of the + // time step number padded with leading zeros + // to 3 character length using the + // Utilities::int_to_string function's second + // argument. template void WaveEquation::output_results () const { @@ -544,8 +544,8 @@ namespace Step23 data_out.build_patches (); const std::string filename = "solution-" + - Utilities::int_to_string (timestep_number, 3) + - ".gnuplot"; + Utilities::int_to_string (timestep_number, 3) + + ".gnuplot"; std::ofstream output (filename.c_str()); data_out.write_gnuplot (output); } @@ -553,229 +553,229 @@ namespace Step23 - // @sect4{WaveEquation::run} + // @sect4{WaveEquation::run} - // The following is really the only - // interesting function of the program. It - // contains the loop over all time steps, but - // before we get to that we have to set up - // the grid, DoFHandler, and matrices. In - // addition, we have to somehow get started - // with initial values. To this end, we use - // the VectorTools::project function that - // takes an object that describes a - // continuous function and computes the $L^2$ - // projection of this function onto the - // finite element space described by the - // DoFHandler object. Can't be any simpler - // than that: + // The following is really the only + // interesting function of the program. It + // contains the loop over all time steps, but + // before we get to that we have to set up + // the grid, DoFHandler, and matrices. In + // addition, we have to somehow get started + // with initial values. To this end, we use + // the VectorTools::project function that + // takes an object that describes a + // continuous function and computes the $L^2$ + // projection of this function onto the + // finite element space described by the + // DoFHandler object. Can't be any simpler + // than that: template void WaveEquation::run () { setup_system(); VectorTools::project (dof_handler, constraints, QGauss(3), - InitialValuesU(), - old_solution_u); + InitialValuesU(), + old_solution_u); VectorTools::project (dof_handler, constraints, QGauss(3), - InitialValuesV(), - old_solution_v); - - // The next thing is to loop over all the - // time steps until we reach the end time - // ($T=5$ in this case). In each time step, - // we first have to solve for $U^n$, using - // the equation $(M^n + k^2\theta^2 A^n)U^n - // =$ $(M^{n,n-1} - k^2\theta(1-\theta) - // A^{n,n-1})U^{n-1} + kM^{n,n-1}V^{n-1} +$ - // $k\theta \left[k \theta F^n + k(1-\theta) - // F^{n-1} \right]$. Note that we use the - // same mesh for all time steps, so that - // $M^n=M^{n,n-1}=M$ and - // $A^n=A^{n,n-1}=A$. What we therefore - // have to do first is to add up $MU^{n-1} - // - k^2\theta(1-\theta) AU^{n-1} + kMV^{n-1}$ and - // the forcing terms, and put the result - // into the system_rhs - // vector. (For these additions, we need a - // temporary vector that we declare before - // the loop to avoid repeated memory - // allocations in each time step.) - // - // The one thing to realize here is how we - // communicate the time variable to the - // object describing the right hand side: - // each object derived from the Function - // class has a time field that can be set - // using the Function::set_time and read by - // Function::get_time. In essence, using - // this mechanism, all functions of space - // and time are therefore considered - // functions of space evaluated at a - // particular time. This matches well what - // we typically need in finite element - // programs, where we almost always work on - // a single time step at a time, and where - // it never happens that, for example, one - // would like to evaluate a space-time - // function for all times at any given - // spatial location. + InitialValuesV(), + old_solution_v); + + // The next thing is to loop over all the + // time steps until we reach the end time + // ($T=5$ in this case). In each time step, + // we first have to solve for $U^n$, using + // the equation $(M^n + k^2\theta^2 A^n)U^n + // =$ $(M^{n,n-1} - k^2\theta(1-\theta) + // A^{n,n-1})U^{n-1} + kM^{n,n-1}V^{n-1} +$ + // $k\theta \left[k \theta F^n + k(1-\theta) + // F^{n-1} \right]$. Note that we use the + // same mesh for all time steps, so that + // $M^n=M^{n,n-1}=M$ and + // $A^n=A^{n,n-1}=A$. What we therefore + // have to do first is to add up $MU^{n-1} + // - k^2\theta(1-\theta) AU^{n-1} + kMV^{n-1}$ and + // the forcing terms, and put the result + // into the system_rhs + // vector. (For these additions, we need a + // temporary vector that we declare before + // the loop to avoid repeated memory + // allocations in each time step.) + // + // The one thing to realize here is how we + // communicate the time variable to the + // object describing the right hand side: + // each object derived from the Function + // class has a time field that can be set + // using the Function::set_time and read by + // Function::get_time. In essence, using + // this mechanism, all functions of space + // and time are therefore considered + // functions of space evaluated at a + // particular time. This matches well what + // we typically need in finite element + // programs, where we almost always work on + // a single time step at a time, and where + // it never happens that, for example, one + // would like to evaluate a space-time + // function for all times at any given + // spatial location. Vector tmp (solution_u.size()); Vector forcing_terms (solution_u.size()); for (timestep_number=1, time=time_step; - time<=5; - time+=time_step, ++timestep_number) + time<=5; + time+=time_step, ++timestep_number) { - std::cout << "Time step " << timestep_number - << " at t=" << time - << std::endl; - - mass_matrix.vmult (system_rhs, old_solution_u); - - mass_matrix.vmult (tmp, old_solution_v); - system_rhs.add (time_step, tmp); - - laplace_matrix.vmult (tmp, old_solution_u); - system_rhs.add (-theta * (1-theta) * time_step * time_step, tmp); - - RightHandSide rhs_function; - rhs_function.set_time (time); - VectorTools::create_right_hand_side (dof_handler, QGauss(2), - rhs_function, tmp); - forcing_terms = tmp; - forcing_terms *= theta * time_step; - - rhs_function.set_time (time-time_step); - VectorTools::create_right_hand_side (dof_handler, QGauss(2), - rhs_function, tmp); - - forcing_terms.add ((1-theta) * time_step, tmp); - - system_rhs.add (theta * time_step, forcing_terms); - - // After so constructing the right hand - // side vector of the first equation, - // all we have to do is apply the - // correct boundary values. As for the - // right hand side, this is a - // space-time function evaluated at a - // particular time, which we - // interpolate at boundary nodes and - // then use the result to apply - // boundary values as we usually - // do. The result is then handed off to - // the solve_u() function: - { - BoundaryValuesU boundary_values_u_function; - boundary_values_u_function.set_time (time); - - std::map boundary_values; - VectorTools::interpolate_boundary_values (dof_handler, - 0, - boundary_values_u_function, - boundary_values); - - // The matrix for solve_u() is the same in - // every time steps, so one could think - // that it is enough to do this only once - // at the beginning of the - // simulation. However, since we need to - // apply boundary values to the linear - // system (which eliminate some matrix rows - // and columns and give contributions to - // the right hand side), we have to refill - // the matrix in every time steps before we - // actually apply boundary data. The actual - // content is very simple: it is the sum of - // the mass matrix and a weighted Laplace - // matrix: - matrix_u.copy_from (mass_matrix); - matrix_u.add (theta * theta * time_step * time_step, laplace_matrix); - MatrixTools::apply_boundary_values (boundary_values, - matrix_u, - solution_u, - system_rhs); - } - solve_u (); - - - // The second step, i.e. solving for - // $V^n$, works similarly, except that - // this time the matrix on the left is - // the mass matrix (which we copy again - // in order to be able to apply - // boundary conditions, and the right - // hand side is $MV^{n-1} - k\left[ - // \theta A U^n + (1-\theta) - // AU^{n-1}\right]$ plus forcing - // terms. %Boundary values are applied - // in the same way as before, except - // that now we have to use the - // BoundaryValuesV class: - laplace_matrix.vmult (system_rhs, solution_u); - system_rhs *= -theta * time_step; - - mass_matrix.vmult (tmp, old_solution_v); - system_rhs += tmp; - - laplace_matrix.vmult (tmp, old_solution_u); - system_rhs.add (-time_step * (1-theta), tmp); - - system_rhs += forcing_terms; - - { - BoundaryValuesV boundary_values_v_function; - boundary_values_v_function.set_time (time); - - std::map boundary_values; - VectorTools::interpolate_boundary_values (dof_handler, - 0, - boundary_values_v_function, - boundary_values); - matrix_v.copy_from (mass_matrix); - MatrixTools::apply_boundary_values (boundary_values, - matrix_v, - solution_v, - system_rhs); - } - solve_v (); - - // Finally, after both solution - // components have been computed, we - // output the result, compute the - // energy in the solution, and go on to - // the next time step after shifting - // the present solution into the - // vectors that hold the solution at - // the previous time step. Note the - // function - // SparseMatrix::matrix_norm_square - // that can compute - // $\left$ and - // $\left$ in one step, - // saving us the expense of a temporary - // vector and several lines of code: - output_results (); - - std::cout << " Total energy: " - << (mass_matrix.matrix_norm_square (solution_v) + - laplace_matrix.matrix_norm_square (solution_u)) / 2 - << std::endl; - - old_solution_u = solution_u; - old_solution_v = solution_v; + std::cout << "Time step " << timestep_number + << " at t=" << time + << std::endl; + + mass_matrix.vmult (system_rhs, old_solution_u); + + mass_matrix.vmult (tmp, old_solution_v); + system_rhs.add (time_step, tmp); + + laplace_matrix.vmult (tmp, old_solution_u); + system_rhs.add (-theta * (1-theta) * time_step * time_step, tmp); + + RightHandSide rhs_function; + rhs_function.set_time (time); + VectorTools::create_right_hand_side (dof_handler, QGauss(2), + rhs_function, tmp); + forcing_terms = tmp; + forcing_terms *= theta * time_step; + + rhs_function.set_time (time-time_step); + VectorTools::create_right_hand_side (dof_handler, QGauss(2), + rhs_function, tmp); + + forcing_terms.add ((1-theta) * time_step, tmp); + + system_rhs.add (theta * time_step, forcing_terms); + + // After so constructing the right hand + // side vector of the first equation, + // all we have to do is apply the + // correct boundary values. As for the + // right hand side, this is a + // space-time function evaluated at a + // particular time, which we + // interpolate at boundary nodes and + // then use the result to apply + // boundary values as we usually + // do. The result is then handed off to + // the solve_u() function: + { + BoundaryValuesU boundary_values_u_function; + boundary_values_u_function.set_time (time); + + std::map boundary_values; + VectorTools::interpolate_boundary_values (dof_handler, + 0, + boundary_values_u_function, + boundary_values); + + // The matrix for solve_u() is the same in + // every time steps, so one could think + // that it is enough to do this only once + // at the beginning of the + // simulation. However, since we need to + // apply boundary values to the linear + // system (which eliminate some matrix rows + // and columns and give contributions to + // the right hand side), we have to refill + // the matrix in every time steps before we + // actually apply boundary data. The actual + // content is very simple: it is the sum of + // the mass matrix and a weighted Laplace + // matrix: + matrix_u.copy_from (mass_matrix); + matrix_u.add (theta * theta * time_step * time_step, laplace_matrix); + MatrixTools::apply_boundary_values (boundary_values, + matrix_u, + solution_u, + system_rhs); + } + solve_u (); + + + // The second step, i.e. solving for + // $V^n$, works similarly, except that + // this time the matrix on the left is + // the mass matrix (which we copy again + // in order to be able to apply + // boundary conditions, and the right + // hand side is $MV^{n-1} - k\left[ + // \theta A U^n + (1-\theta) + // AU^{n-1}\right]$ plus forcing + // terms. %Boundary values are applied + // in the same way as before, except + // that now we have to use the + // BoundaryValuesV class: + laplace_matrix.vmult (system_rhs, solution_u); + system_rhs *= -theta * time_step; + + mass_matrix.vmult (tmp, old_solution_v); + system_rhs += tmp; + + laplace_matrix.vmult (tmp, old_solution_u); + system_rhs.add (-time_step * (1-theta), tmp); + + system_rhs += forcing_terms; + + { + BoundaryValuesV boundary_values_v_function; + boundary_values_v_function.set_time (time); + + std::map boundary_values; + VectorTools::interpolate_boundary_values (dof_handler, + 0, + boundary_values_v_function, + boundary_values); + matrix_v.copy_from (mass_matrix); + MatrixTools::apply_boundary_values (boundary_values, + matrix_v, + solution_v, + system_rhs); + } + solve_v (); + + // Finally, after both solution + // components have been computed, we + // output the result, compute the + // energy in the solution, and go on to + // the next time step after shifting + // the present solution into the + // vectors that hold the solution at + // the previous time step. Note the + // function + // SparseMatrix::matrix_norm_square + // that can compute + // $\left$ and + // $\left$ in one step, + // saving us the expense of a temporary + // vector and several lines of code: + output_results (); + + std::cout << " Total energy: " + << (mass_matrix.matrix_norm_square (solution_v) + + laplace_matrix.matrix_norm_square (solution_u)) / 2 + << std::endl; + + old_solution_u = solution_u; + old_solution_v = solution_v; } } } - // @sect3{The main function} + // @sect3{The main function} - // What remains is the main function of the - // program. There is nothing here that hasn't - // been shown in several of the previous - // programs: + // What remains is the main function of the + // program. There is nothing here that hasn't + // been shown in several of the previous + // programs: int main () { try @@ -791,25 +791,25 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-24/step-24.cc b/deal.II/examples/step-24/step-24.cc index 95807feee6..f959cadbba 100644 --- a/deal.II/examples/step-24/step-24.cc +++ b/deal.II/examples/step-24/step-24.cc @@ -9,10 +9,10 @@ /* further information on this license. */ - // @sect3{Include files} + // @sect3{Include files} - // The following have all been covered - // previously: + // The following have all been covered + // previously: #include #include #include @@ -45,23 +45,23 @@ #include #include - // This is the only new one: We will need a - // library function defined in a class - // GridTools that computes the minimal cell - // diameter. + // This is the only new one: We will need a + // library function defined in a class + // GridTools that computes the minimal cell + // diameter. #include - // The last step is as in all - // previous programs: + // The last step is as in all + // previous programs: namespace Step24 { using namespace dealii; - // @sect3{The "forward problem" class template} + // @sect3{The "forward problem" class template} - // The first part of the main class is - // exactly as in step-23 - // (except for the name): + // The first part of the main class is + // exactly as in step-23 + // (except for the name): template class TATForwardProblem { @@ -94,238 +94,238 @@ namespace Step24 unsigned int timestep_number; const double theta; - // Here's what's new: first, we need - // that boundary mass matrix $B$ that - // came out of the absorbing boundary - // condition. Likewise, since this time - // we consider a realistic medium, we - // must have a measure of the wave speed - // $c_0$ that will enter all the - // formulas with the Laplace matrix - // (which we still define as $(\nabla - // \phi_i,\nabla \phi_j)$): + // Here's what's new: first, we need + // that boundary mass matrix $B$ that + // came out of the absorbing boundary + // condition. Likewise, since this time + // we consider a realistic medium, we + // must have a measure of the wave speed + // $c_0$ that will enter all the + // formulas with the Laplace matrix + // (which we still define as $(\nabla + // \phi_i,\nabla \phi_j)$): SparseMatrix boundary_matrix; const double wave_speed; - // The last thing we have to take care of - // is that we wanted to evaluate the - // solution at a certain number of - // detector locations. We need an array - // to hold these locations, declared here - // and filled in the constructor: + // The last thing we have to take care of + // is that we wanted to evaluate the + // solution at a certain number of + // detector locations. We need an array + // to hold these locations, declared here + // and filled in the constructor: std::vector > detector_locations; }; - // @sect3{Equation data} - - // As usual, we have to define our - // initial values, boundary - // conditions, and right hand side - // functions. Except things are a bit - // simpler this time: we are to - // consider a problem that is driven - // by initial conditions, so there is - // no right hand side function - // (though you could look up in - // step-23 to see how this can be - // done. Secondly, there are no - // boundary conditions: the entire - // boundary of the domain consists of - // absorbing boundary - // conditions. That only leaves - // initial conditions, and there - // things are simple too since for - // this particular application only - // nonzero initial conditions for the - // pressure are prescribed, not for - // the velocity (which is zero at the - // initial time). - // - // So this is all we need: a class that - // specifies initial conditions for the - // pressure. In the physical setting - // considered in this program, these are - // small absorbers, which we model as a - // series of little circles where we assume - // that the pressure surplus is one, whereas - // no absorption and therefore no pressure - // surplus is anywhere else. This is how we - // do things (note that if we wanted to - // expand this program to not only compile - // but also to run, we would have to - // initialize the sources with - // three-dimensional source locations): + // @sect3{Equation data} + + // As usual, we have to define our + // initial values, boundary + // conditions, and right hand side + // functions. Except things are a bit + // simpler this time: we are to + // consider a problem that is driven + // by initial conditions, so there is + // no right hand side function + // (though you could look up in + // step-23 to see how this can be + // done. Secondly, there are no + // boundary conditions: the entire + // boundary of the domain consists of + // absorbing boundary + // conditions. That only leaves + // initial conditions, and there + // things are simple too since for + // this particular application only + // nonzero initial conditions for the + // pressure are prescribed, not for + // the velocity (which is zero at the + // initial time). + // + // So this is all we need: a class that + // specifies initial conditions for the + // pressure. In the physical setting + // considered in this program, these are + // small absorbers, which we model as a + // series of little circles where we assume + // that the pressure surplus is one, whereas + // no absorption and therefore no pressure + // surplus is anywhere else. This is how we + // do things (note that if we wanted to + // expand this program to not only compile + // but also to run, we would have to + // initialize the sources with + // three-dimensional source locations): template class InitialValuesP : public Function { public: InitialValuesP () - : - Function() - {} + : + Function() + {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; private: struct Source { - Source (const Point &l, - const double r) - : - location (l), - radius (r) - {} - - const Point location; - const double radius; + Source (const Point &l, + const double r) + : + location (l), + radius (r) + {} + + const Point location; + const double radius; }; }; template double InitialValuesP::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { static const Source sources[] = {Source (Point (0, 0), 0.025), - Source (Point (-0.135, 0), 0.05), - Source (Point (0.17, 0), 0.03), - Source (Point (-0.25, 0), 0.02), - Source (Point (-0.05, -0.15), 0.015)}; + Source (Point (-0.135, 0), 0.05), + Source (Point (0.17, 0), 0.03), + Source (Point (-0.25, 0), 0.02), + Source (Point (-0.05, -0.15), 0.015)}; static const unsigned int n_sources = sizeof(sources)/sizeof(sources[0]); for (unsigned int i=0; iTATForwardProblem class} - - // Let's start again with the - // constructor. Setting the member variables - // is straightforward. We use the acoustic - // wave speed of mineral oil (in millimeters - // per microsecond, a common unit in - // experimental biomedical imaging) since - // this is where many of the experiments we - // want to compare the output with are made - // in. The Crank-Nicolson scheme is used - // again, i.e. theta is set to 0.5. The time - // step is later selected to satisfy $k = - // \frac hc$ + // @sect3{Implementation of the TATForwardProblem class} + + // Let's start again with the + // constructor. Setting the member variables + // is straightforward. We use the acoustic + // wave speed of mineral oil (in millimeters + // per microsecond, a common unit in + // experimental biomedical imaging) since + // this is where many of the experiments we + // want to compare the output with are made + // in. The Crank-Nicolson scheme is used + // again, i.e. theta is set to 0.5. The time + // step is later selected to satisfy $k = + // \frac hc$ template TATForwardProblem::TATForwardProblem () - : - fe (1), - dof_handler (triangulation), - theta (0.5), - wave_speed (1.437) + : + fe (1), + dof_handler (triangulation), + theta (0.5), + wave_speed (1.437) { - // The second task in the constructor is to - // initialize the array that holds the - // detector locations. The results of this - // program were compared with experiments - // in which the step size of the detector - // spacing is 2.25 degree, corresponding to - // 160 detector locations. The radius of - // the scanning circle is selected to be - // half way between the center and the - // boundary to avoid that the remaining - // reflections from the imperfect boundary - // condition spoils our numerical results. - // - // The locations of the detectors are then - // calculated in clockwise order. Note that - // the following of course only works if we - // are computing in 2d, a condition that we - // guard with an assertion. If we later - // wanted to run the same program in 3d, we - // would have to add code here for the - // initialization of detector locations in - // 3d. Due to the assertion, there is no - // way we can forget to do this. + // The second task in the constructor is to + // initialize the array that holds the + // detector locations. The results of this + // program were compared with experiments + // in which the step size of the detector + // spacing is 2.25 degree, corresponding to + // 160 detector locations. The radius of + // the scanning circle is selected to be + // half way between the center and the + // boundary to avoid that the remaining + // reflections from the imperfect boundary + // condition spoils our numerical results. + // + // The locations of the detectors are then + // calculated in clockwise order. Note that + // the following of course only works if we + // are computing in 2d, a condition that we + // guard with an assertion. If we later + // wanted to run the same program in 3d, we + // would have to add code here for the + // initialization of detector locations in + // 3d. Due to the assertion, there is no + // way we can forget to do this. Assert (dim == 2, ExcNotImplemented()); const double detector_step_angle = 2.25; const double detector_radius = 0.5; for (double detector_angle = 2*numbers::PI; - detector_angle >= 0; - detector_angle -= detector_step_angle/360*2*numbers::PI) + detector_angle >= 0; + detector_angle -= detector_step_angle/360*2*numbers::PI) detector_locations.push_back (Point (std::cos(detector_angle), - std::sin(detector_angle)) * - detector_radius); + std::sin(detector_angle)) * + detector_radius); } - // @sect4{TATForwardProblem::setup_system} - - // The following system is pretty much what - // we've already done in - // step-23, but with two important - // differences. First, we have to create a - // circular (or spherical) mesh around the - // origin, with a radius of 1. This nothing - // new: we've done so before in - // step-6, step-10, and - // step-11, where we also explain - // how to attach a boundary object to a - // triangulation to be used whenever the - // triangulation needs to know where new - // boundary points lie when a cell is - // refined. Following this, the mesh is - // refined a number of times. - // - // One thing we had to make sure is that the - // time step satisfies the CFL condition - // discussed in the introduction of - // step-23. Back in that program, - // we ensured this by hand by setting a - // timestep that matches the mesh width, but - // that was error prone because if we refined - // the mesh once more we would also have to - // make sure the time step is changed. Here, - // we do that automatically: we ask a library - // function for the minimal diameter of any - // cell. Then we set $k=\frac h{c_0}$. The - // only problem is: what exactly is $h$? The - // point is that there is really no good - // theory on this question for the wave - // equation. It is known that for uniformly - // refined meshes consisting of rectangles, - // $h$ is the minimal edge length. But for - // meshes on general quadrilaterals, the - // exact relationship appears to be unknown, - // i.e. it is unknown what properties of - // cells are relevant for the CFL - // condition. The problem is that the CFL - // condition follows from knowledge of the - // smallest eigenvalue of the Laplace matrix, - // and that can only be computed analytically - // for simply structured meshes. - // - // The upshot of all this is that we're not - // quite sure what exactly we should take for - // $h$. The function - // GridTools::minimal_cell_diameter computes - // the minimal diameter of all cells. If the - // cells were all squares or cubes, then the - // minimal edge length would be the minimal - // diameter divided by - // std::sqrt(dim). We simply - // generalize this, without theoretical - // justification, to the case of non-uniform - // meshes. - // - // The only other significant change is that - // we need to build the boundary mass - // matrix. We will comment on this further - // down below. + // @sect4{TATForwardProblem::setup_system} + + // The following system is pretty much what + // we've already done in + // step-23, but with two important + // differences. First, we have to create a + // circular (or spherical) mesh around the + // origin, with a radius of 1. This nothing + // new: we've done so before in + // step-6, step-10, and + // step-11, where we also explain + // how to attach a boundary object to a + // triangulation to be used whenever the + // triangulation needs to know where new + // boundary points lie when a cell is + // refined. Following this, the mesh is + // refined a number of times. + // + // One thing we had to make sure is that the + // time step satisfies the CFL condition + // discussed in the introduction of + // step-23. Back in that program, + // we ensured this by hand by setting a + // timestep that matches the mesh width, but + // that was error prone because if we refined + // the mesh once more we would also have to + // make sure the time step is changed. Here, + // we do that automatically: we ask a library + // function for the minimal diameter of any + // cell. Then we set $k=\frac h{c_0}$. The + // only problem is: what exactly is $h$? The + // point is that there is really no good + // theory on this question for the wave + // equation. It is known that for uniformly + // refined meshes consisting of rectangles, + // $h$ is the minimal edge length. But for + // meshes on general quadrilaterals, the + // exact relationship appears to be unknown, + // i.e. it is unknown what properties of + // cells are relevant for the CFL + // condition. The problem is that the CFL + // condition follows from knowledge of the + // smallest eigenvalue of the Laplace matrix, + // and that can only be computed analytically + // for simply structured meshes. + // + // The upshot of all this is that we're not + // quite sure what exactly we should take for + // $h$. The function + // GridTools::minimal_cell_diameter computes + // the minimal diameter of all cells. If the + // cells were all squares or cubes, then the + // minimal edge length would be the minimal + // diameter divided by + // std::sqrt(dim). We simply + // generalize this, without theoretical + // justification, to the case of non-uniform + // meshes. + // + // The only other significant change is that + // we need to build the boundary mass + // matrix. We will comment on this further + // down below. template void TATForwardProblem::setup_system () { @@ -336,23 +336,23 @@ namespace Step24 triangulation.refine_global (7); time_step = GridTools::minimal_cell_diameter(triangulation) / - wave_speed / - std::sqrt (1.*dim); + wave_speed / + std::sqrt (1.*dim); std::cout << "Number of active cells: " - << triangulation.n_active_cells() - << std::endl; + << triangulation.n_active_cells() + << std::endl; dof_handler.distribute_dofs (fe); std::cout << "Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl - << std::endl; + << dof_handler.n_dofs() + << std::endl + << std::endl; sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); sparsity_pattern.compress(); @@ -361,82 +361,82 @@ namespace Step24 laplace_matrix.reinit (sparsity_pattern); MatrixCreator::create_mass_matrix (dof_handler, QGauss(3), - mass_matrix); + mass_matrix); MatrixCreator::create_laplace_matrix (dof_handler, QGauss(3), - laplace_matrix); - - // The second difference, as mentioned, to - // step-23 is that we need - // to build the boundary mass matrix that - // grew out of the absorbing boundary - // conditions. - // - // A first observation would be that this - // matrix is much sparser than the regular - // mass matrix, since none of the shape - // functions with purely interior support - // contributes to this matrix. We could - // therefore optimize the storage pattern - // to this situation and build up a second - // sparsity pattern that only contains the - // nonzero entries that we need. There is a - // trade-off to make here: first, we would - // have to have a second sparsity pattern - // object, so that costs memory. Secondly, - // the matrix attached to this sparsity - // pattern is going to be smaller and - // therefore requires less memory; it would - // also be faster to perform matrix-vector - // multiplications with it. The final - // argument, however, is the one that tips - // the scale: we are not primarily - // interested in performing matrix-vector - // with the boundary matrix alone (though - // we need to do that for the right hand - // side vector once per time step), but - // mostly wish to add it up to the other - // matrices used in the first of the two - // equations since this is the one that is - // going to be multiplied with once per - // iteration of the CG method, - // i.e. significantly more often. It is now - // the case that the SparseMatrix::add - // class allows to add one matrix to - // another, but only if they use the same - // sparsity pattern (the reason being that - // we can't add nonzero entries to a matrix - // after the sparsity pattern has been - // created, so we simply require that the - // two matrices have the same sparsity - // pattern). - // - // So let's go with that: + laplace_matrix); + + // The second difference, as mentioned, to + // step-23 is that we need + // to build the boundary mass matrix that + // grew out of the absorbing boundary + // conditions. + // + // A first observation would be that this + // matrix is much sparser than the regular + // mass matrix, since none of the shape + // functions with purely interior support + // contributes to this matrix. We could + // therefore optimize the storage pattern + // to this situation and build up a second + // sparsity pattern that only contains the + // nonzero entries that we need. There is a + // trade-off to make here: first, we would + // have to have a second sparsity pattern + // object, so that costs memory. Secondly, + // the matrix attached to this sparsity + // pattern is going to be smaller and + // therefore requires less memory; it would + // also be faster to perform matrix-vector + // multiplications with it. The final + // argument, however, is the one that tips + // the scale: we are not primarily + // interested in performing matrix-vector + // with the boundary matrix alone (though + // we need to do that for the right hand + // side vector once per time step), but + // mostly wish to add it up to the other + // matrices used in the first of the two + // equations since this is the one that is + // going to be multiplied with once per + // iteration of the CG method, + // i.e. significantly more often. It is now + // the case that the SparseMatrix::add + // class allows to add one matrix to + // another, but only if they use the same + // sparsity pattern (the reason being that + // we can't add nonzero entries to a matrix + // after the sparsity pattern has been + // created, so we simply require that the + // two matrices have the same sparsity + // pattern). + // + // So let's go with that: boundary_matrix.reinit (sparsity_pattern); - // The second thing to do is to actually - // build the matrix. Here, we need to - // integrate over faces of cells, so first - // we need a quadrature object that works - // on dim-1 dimensional - // objects. Secondly, the FEFaceValues - // variant of FEValues that works on faces, - // as its name suggest. And finally, the - // other variables that are part of the - // assembly machinery. All of this we put - // between curly braces to limit the scope - // of these variables to where we actually - // need them. - // - // The actual act of assembling the matrix - // is then fairly straightforward: we loop - // over all cells, over all faces of each - // of these cells, and then do something - // only if that particular face is at the - // boundary of the domain. Like this: + // The second thing to do is to actually + // build the matrix. Here, we need to + // integrate over faces of cells, so first + // we need a quadrature object that works + // on dim-1 dimensional + // objects. Secondly, the FEFaceValues + // variant of FEValues that works on faces, + // as its name suggest. And finally, the + // other variables that are part of the + // assembly machinery. All of this we put + // between curly braces to limit the scope + // of these variables to where we actually + // need them. + // + // The actual act of assembling the matrix + // is then fairly straightforward: we loop + // over all cells, over all faces of each + // of these cells, and then do something + // only if that particular face is at the + // boundary of the domain. Like this: { const QGauss quadrature_formula(3); FEFaceValues fe_values (fe, quadrature_formula, - update_values | update_JxW_values); + update_values | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -448,37 +448,37 @@ namespace Step24 typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); + cell = dof_handler.begin_active(), + endc = dof_handler.end(); for (; cell!=endc; ++cell) - for (unsigned int f=0; f::faces_per_cell; ++f) - if (cell->at_boundary(f)) - { - cell_matrix = 0; - - fe_values.reinit (cell, f); - - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - for (unsigned int i=0; i::faces_per_cell; ++f) + if (cell->at_boundary(f)) + { + cell_matrix = 0; + + fe_values.reinit (cell, f); + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + for (unsigned int i=0; i void TATForwardProblem::solve_p () { @@ -509,11 +509,11 @@ namespace Step24 SolverCG<> cg (solver_control); cg.solve (system_matrix, solution_p, system_rhs_p, - PreconditionIdentity()); + PreconditionIdentity()); std::cout << " p-equation: " << solver_control.last_step() - << " CG iterations." - << std::endl; + << " CG iterations." + << std::endl; } @@ -524,19 +524,19 @@ namespace Step24 SolverCG<> cg (solver_control); cg.solve (mass_matrix, solution_v, system_rhs_v, - PreconditionIdentity()); + PreconditionIdentity()); std::cout << " v-equation: " << solver_control.last_step() - << " CG iterations." - << std::endl; + << " CG iterations." + << std::endl; } - // @sect4{TATForwardProblem::output_results} + // @sect4{TATForwardProblem::output_results} - // The same holds here: the function is from - // step-23. + // The same holds here: the function is from + // step-23. template void TATForwardProblem::output_results () const { @@ -549,43 +549,43 @@ namespace Step24 data_out.build_patches (); const std::string filename = "solution-" + - Utilities::int_to_string (timestep_number, 3) + - ".gnuplot"; + Utilities::int_to_string (timestep_number, 3) + + ".gnuplot"; std::ofstream output (filename.c_str()); data_out.write_gnuplot (output); } - // @sect4{TATForwardProblem::run} - - // This function that does most of the work - // is pretty much again like in step-23, - // though we make things a bit clearer by - // using the vectors G1 and G2 mentioned in - // the introduction. Compared to the overall - // memory consumption of the program, the - // introduction of a few temporary vectors - // isn't doing much harm. - // - // The only changes to this function are: - // First, that we do not have to project - // initial values for the velocity $v$, since - // we know that it is zero. And second that - // we evaluate the solution at the detector - // locations computed in the - // constructor. This is done using the - // VectorTools::point_value function. These - // values are then written to a file that we - // open at the beginning of the function. + // @sect4{TATForwardProblem::run} + + // This function that does most of the work + // is pretty much again like in step-23, + // though we make things a bit clearer by + // using the vectors G1 and G2 mentioned in + // the introduction. Compared to the overall + // memory consumption of the program, the + // introduction of a few temporary vectors + // isn't doing much harm. + // + // The only changes to this function are: + // First, that we do not have to project + // initial values for the velocity $v$, since + // we know that it is zero. And second that + // we evaluate the solution at the detector + // locations computed in the + // constructor. This is done using the + // VectorTools::point_value function. These + // values are then written to a file that we + // open at the beginning of the function. template void TATForwardProblem::run () { setup_system(); VectorTools::project (dof_handler, constraints, - QGauss(3), InitialValuesP(), - old_solution_p); + QGauss(3), InitialValuesP(), + old_solution_p); old_solution_v = 0; @@ -597,65 +597,65 @@ namespace Step24 const double end_time = 0.7; for (timestep_number=1, time=time_step; - time<=end_time; - time+=time_step, ++timestep_number) + time<=end_time; + time+=time_step, ++timestep_number) { - std::cout << std::endl; - std::cout<< "time_step " << timestep_number << " @ t=" << time << std::endl; + std::cout << std::endl; + std::cout<< "time_step " << timestep_number << " @ t=" << time << std::endl; - mass_matrix.vmult (G1, old_solution_p); - mass_matrix.vmult (tmp, old_solution_v); - G1.add(time_step * (1-theta), tmp); + mass_matrix.vmult (G1, old_solution_p); + mass_matrix.vmult (tmp, old_solution_v); + G1.add(time_step * (1-theta), tmp); - mass_matrix.vmult (G2, old_solution_v); - laplace_matrix.vmult (tmp, old_solution_p); - G2.add (-wave_speed * wave_speed * time_step * (1-theta), tmp); + mass_matrix.vmult (G2, old_solution_v); + laplace_matrix.vmult (tmp, old_solution_p); + G2.add (-wave_speed * wave_speed * time_step * (1-theta), tmp); - boundary_matrix.vmult (tmp, old_solution_p); - G2.add (wave_speed, tmp); + boundary_matrix.vmult (tmp, old_solution_p); + G2.add (wave_speed, tmp); - system_rhs_p = G1; - system_rhs_p.add(time_step * theta , G2); + system_rhs_p = G1; + system_rhs_p.add(time_step * theta , G2); - solve_p (); + solve_p (); - system_rhs_v = G2; - laplace_matrix.vmult (tmp, solution_p); - system_rhs_v.add (-time_step * theta * wave_speed * wave_speed, tmp); + system_rhs_v = G2; + laplace_matrix.vmult (tmp, solution_p); + system_rhs_v.add (-time_step * theta * wave_speed * wave_speed, tmp); - boundary_matrix.vmult (tmp, solution_p); - system_rhs_v.add (-wave_speed, tmp); + boundary_matrix.vmult (tmp, solution_p); + system_rhs_v.add (-wave_speed, tmp); - solve_v (); + solve_v (); - output_results (); + output_results (); - detector_data << time; - for (unsigned int i=0 ; imain function} + // @sect3{The main function} - // What remains is the main function of the - // program. There is nothing here that hasn't - // been shown in several of the previous - // programs: + // What remains is the main function of the + // program. There is nothing here that hasn't + // been shown in several of the previous + // programs: int main () { try @@ -671,25 +671,25 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-25/step-25.cc b/deal.II/examples/step-25/step-25.cc index 71bbdb39d4..8f912012ef 100644 --- a/deal.II/examples/step-25/step-25.cc +++ b/deal.II/examples/step-25/step-25.cc @@ -9,23 +9,23 @@ /* further information on this license. */ - // @sect3{Include files and global variables} - - // For an explanation of the include - // files, the reader should refer to - // the example programs step-1 - // through step-4. They are in the - // standard order, which is - // base -- - // lac -- - // grid -- - // dofs -- - // fe -- - // numerics (since each - // of these categories roughly builds - // upon previous ones), then a few - // C++ headers for file input/output - // and string streams. + // @sect3{Include files and global variables} + + // For an explanation of the include + // files, the reader should refer to + // the example programs step-1 + // through step-4. They are in the + // standard order, which is + // base -- + // lac -- + // grid -- + // dofs -- + // fe -- + // numerics (since each + // of these categories roughly builds + // upon previous ones), then a few + // C++ headers for file input/output + // and string streams. #include #include #include @@ -53,70 +53,70 @@ #include - // The last step is as in all - // previous programs: + // The last step is as in all + // previous programs: namespace Step25 { using namespace dealii; - // @sect3{The SineGordonProblem class template} - - // The entire algorithm for solving the - // problem is encapsulated in this class. As - // in previous example programs, the class is - // declared with a template parameter, which - // is the spatial dimension, so that we can - // solve the sine-Gordon equation in one, two - // or three spatial dimensions. For more on - // the dimension-independent - // class-encapsulation of the problem, the - // reader should consult step-3 and step-4. - // - // Compared to step-23 and step-24, there - // isn't anything newsworthy in the general - // structure of the program (though there is - // of course in the inner workings of the - // various functions!). The most notable - // difference is the presence of the two new - // functions compute_nl_term and - // compute_nl_matrix that - // compute the nonlinear contributions to the - // system matrix and right-hand side of the first - // equation, as discussed in the - // Introduction. In addition, we have to have - // a vector solution_update that - // contains the nonlinear update to the - // solution vector in each Newton step. - // - // As also mentioned in the introduction, we - // do not store the velocity variable in this - // program, but the mass matrix times the - // velocity. This is done in the - // M_x_velocity variable (the - // "x" is intended to stand for - // "times"). - // - // Finally, the - // output_timestep_skip - // variable stores the number of time - // steps to be taken each time before - // graphical output is to be - // generated. This is of importance - // when using fine meshes (and - // consequently small time steps) - // where we would run lots of time - // steps and create lots of output - // files of solutions that look - // almost the same in subsequent - // files. This only clogs up our - // visualization procedures and we - // should avoid creating more output - // than we are really interested - // in. Therefore, if this variable is - // set to a value $n$ bigger than one, - // output is generated only every - // $n$th time step. + // @sect3{The SineGordonProblem class template} + + // The entire algorithm for solving the + // problem is encapsulated in this class. As + // in previous example programs, the class is + // declared with a template parameter, which + // is the spatial dimension, so that we can + // solve the sine-Gordon equation in one, two + // or three spatial dimensions. For more on + // the dimension-independent + // class-encapsulation of the problem, the + // reader should consult step-3 and step-4. + // + // Compared to step-23 and step-24, there + // isn't anything newsworthy in the general + // structure of the program (though there is + // of course in the inner workings of the + // various functions!). The most notable + // difference is the presence of the two new + // functions compute_nl_term and + // compute_nl_matrix that + // compute the nonlinear contributions to the + // system matrix and right-hand side of the first + // equation, as discussed in the + // Introduction. In addition, we have to have + // a vector solution_update that + // contains the nonlinear update to the + // solution vector in each Newton step. + // + // As also mentioned in the introduction, we + // do not store the velocity variable in this + // program, but the mass matrix times the + // velocity. This is done in the + // M_x_velocity variable (the + // "x" is intended to stand for + // "times"). + // + // Finally, the + // output_timestep_skip + // variable stores the number of time + // steps to be taken each time before + // graphical output is to be + // generated. This is of importance + // when using fine meshes (and + // consequently small time steps) + // where we would run lots of time + // steps and create lots of output + // files of solutions that look + // almost the same in subsequent + // files. This only clogs up our + // visualization procedures and we + // should avoid creating more output + // than we are really interested + // in. Therefore, if this variable is + // set to a value $n$ bigger than one, + // output is generated only every + // $n$th time step. template class SineGordonProblem { @@ -128,11 +128,11 @@ namespace Step25 void make_grid_and_dofs (); void assemble_system (); void compute_nl_term (const Vector &old_data, - const Vector &new_data, - Vector &nl_term) const; + const Vector &new_data, + Vector &nl_term) const; void compute_nl_matrix (const Vector &old_data, - const Vector &new_data, - SparseMatrix &nl_matrix) const; + const Vector &new_data, + SparseMatrix &nl_matrix) const; unsigned int solve (); void output_results (const unsigned int timestep_number) const; @@ -159,197 +159,197 @@ namespace Step25 }; - // @sect3{Initial conditions} - - // In the following two classes, we first - // implement the exact solution for 1D, 2D, - // and 3D mentioned in the introduction to - // this program. This space-time solution may - // be of independent interest if one wanted - // to test the accuracy of the program by - // comparing the numerical against the - // analytic solution (note however that the - // program uses a finite domain, whereas - // these are analytic solutions for an - // unbounded domain). This may, for example, - // be done using the - // VectorTools::integrate_difference - // function. Note, again (as was already - // discussed in step-23), how we describe - // space-time functions as spatial functions - // that depend on a time variable that can be - // set and queried using the - // FunctionTime::set_time() and - // FunctionTime::get_time() member functions - // of the FunctionTime base class of the - // Function class. + // @sect3{Initial conditions} + + // In the following two classes, we first + // implement the exact solution for 1D, 2D, + // and 3D mentioned in the introduction to + // this program. This space-time solution may + // be of independent interest if one wanted + // to test the accuracy of the program by + // comparing the numerical against the + // analytic solution (note however that the + // program uses a finite domain, whereas + // these are analytic solutions for an + // unbounded domain). This may, for example, + // be done using the + // VectorTools::integrate_difference + // function. Note, again (as was already + // discussed in step-23), how we describe + // space-time functions as spatial functions + // that depend on a time variable that can be + // set and queried using the + // FunctionTime::set_time() and + // FunctionTime::get_time() member functions + // of the FunctionTime base class of the + // Function class. template class ExactSolution : public Function { public: ExactSolution (const unsigned int n_components = 1, - const double time = 0.) : Function(n_components, time) {} + const double time = 0.) : Function(n_components, time) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; template double ExactSolution::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { double t = this->get_time (); switch (dim) { - case 1: - { - const double m = 0.5; - const double c1 = 0.; - const double c2 = 0.; - return -4.*std::atan (m / - std::sqrt(1.-m*m) * - std::sin(std::sqrt(1.-m*m)*t+c2) / - std::cosh(m*p[0]+c1)); - } - - case 2: - { - const double theta = numbers::PI/4.; - const double lambda = 1.; - const double a0 = 1.; - const double s = 1.; - const double arg = p[0] * std::cos(theta) + - std::sin(theta) * - (p[1] * std::cosh(lambda) + - t * std::sinh(lambda)); - return 4.*std::atan(a0*std::exp(s*arg)); - } - - case 3: - { - double theta = numbers::PI/4; - double phi = numbers::PI/4; - double tau = 1.; - double c0 = 1.; - double s = 1.; - double arg = p[0]*std::cos(theta) + - p[1]*std::sin(theta) * std::cos(phi) + - std::sin(theta) * std::sin(phi) * - (p[2]*std::cosh(tau)+t*std::sinh(tau)); - return 4.*std::atan(c0*std::exp(s*arg)); - } - - default: - Assert (false, ExcNotImplemented()); - return -1e8; + case 1: + { + const double m = 0.5; + const double c1 = 0.; + const double c2 = 0.; + return -4.*std::atan (m / + std::sqrt(1.-m*m) * + std::sin(std::sqrt(1.-m*m)*t+c2) / + std::cosh(m*p[0]+c1)); + } + + case 2: + { + const double theta = numbers::PI/4.; + const double lambda = 1.; + const double a0 = 1.; + const double s = 1.; + const double arg = p[0] * std::cos(theta) + + std::sin(theta) * + (p[1] * std::cosh(lambda) + + t * std::sinh(lambda)); + return 4.*std::atan(a0*std::exp(s*arg)); + } + + case 3: + { + double theta = numbers::PI/4; + double phi = numbers::PI/4; + double tau = 1.; + double c0 = 1.; + double s = 1.; + double arg = p[0]*std::cos(theta) + + p[1]*std::sin(theta) * std::cos(phi) + + std::sin(theta) * std::sin(phi) * + (p[2]*std::cosh(tau)+t*std::sinh(tau)); + return 4.*std::atan(c0*std::exp(s*arg)); + } + + default: + Assert (false, ExcNotImplemented()); + return -1e8; } } - // In the second part of this section, we - // provide the initial conditions. We are lazy - // (and cautious) and don't want to implement - // the same functions as above a second - // time. Rather, if we are queried for - // initial conditions, we create an object - // ExactSolution, set it to the - // correct time, and let it compute whatever - // values the exact solution has at that - // time: + // In the second part of this section, we + // provide the initial conditions. We are lazy + // (and cautious) and don't want to implement + // the same functions as above a second + // time. Rather, if we are queried for + // initial conditions, we create an object + // ExactSolution, set it to the + // correct time, and let it compute whatever + // values the exact solution has at that + // time: template class InitialValues : public Function { public: InitialValues (const unsigned int n_components = 1, - const double time = 0.) - : - Function(n_components, time) - {} + const double time = 0.) + : + Function(n_components, time) + {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; template double InitialValues::value (const Point &p, - const unsigned int component) const + const unsigned int component) const { return ExactSolution(1, this->get_time()).value (p, component); } - // @sect3{Implementation of the SineGordonProblem class} - - // Let's move on to the implementation of the - // main class, as it implements the algorithm - // outlined in the introduction. - - // @sect4{SineGordonProblem::SineGordonProblem} - - // This is the constructor of the - // SineGordonProblem class. It - // specifies the desired polynomial degree of - // the finite elements, associates a - // DoFHandler to the - // triangulation object (just as - // in the example programs step-3 and - // step-4), initializes the current or - // initial time, the final time, the time - // step size, and the value of $\theta$ for - // the time stepping scheme. Since the - // solutions we compute here are - // time-periodic, the actual value of the - // start-time doesn't matter, and we choose - // it so that we start at an interesting - // time. - // - // Note that if we were to chose the explicit - // Euler time stepping scheme ($\theta = 0$), - // then we must pick a time step $k \le h$, - // otherwise the scheme is not stable and - // oscillations might arise in the - // solution. The Crank-Nicolson scheme - // ($\theta = \frac{1}{2}$) and the implicit - // Euler scheme ($\theta=1$) do not suffer - // from this deficiency, since they are - // unconditionally stable. However, even then - // the time step should be chosen to be on - // the order of $h$ in order to obtain a good - // solution. Since we know that our mesh - // results from the uniform subdivision of a - // rectangle, we can compute that time step - // easily; if we had a different domain, the - // technique in step-24 using - // GridTools::minimal_cell_diameter would - // work as well. + // @sect3{Implementation of the SineGordonProblem class} + + // Let's move on to the implementation of the + // main class, as it implements the algorithm + // outlined in the introduction. + + // @sect4{SineGordonProblem::SineGordonProblem} + + // This is the constructor of the + // SineGordonProblem class. It + // specifies the desired polynomial degree of + // the finite elements, associates a + // DoFHandler to the + // triangulation object (just as + // in the example programs step-3 and + // step-4), initializes the current or + // initial time, the final time, the time + // step size, and the value of $\theta$ for + // the time stepping scheme. Since the + // solutions we compute here are + // time-periodic, the actual value of the + // start-time doesn't matter, and we choose + // it so that we start at an interesting + // time. + // + // Note that if we were to chose the explicit + // Euler time stepping scheme ($\theta = 0$), + // then we must pick a time step $k \le h$, + // otherwise the scheme is not stable and + // oscillations might arise in the + // solution. The Crank-Nicolson scheme + // ($\theta = \frac{1}{2}$) and the implicit + // Euler scheme ($\theta=1$) do not suffer + // from this deficiency, since they are + // unconditionally stable. However, even then + // the time step should be chosen to be on + // the order of $h$ in order to obtain a good + // solution. Since we know that our mesh + // results from the uniform subdivision of a + // rectangle, we can compute that time step + // easily; if we had a different domain, the + // technique in step-24 using + // GridTools::minimal_cell_diameter would + // work as well. template SineGordonProblem::SineGordonProblem () - : - fe (1), - dof_handler (triangulation), - n_global_refinements (6), - time (-5.4414), - final_time (2.7207), - time_step (10*1./std::pow(2.,1.*n_global_refinements)), - theta (0.5), - output_timestep_skip (1) + : + fe (1), + dof_handler (triangulation), + n_global_refinements (6), + time (-5.4414), + final_time (2.7207), + time_step (10*1./std::pow(2.,1.*n_global_refinements)), + theta (0.5), + output_timestep_skip (1) {} - // @sect4{SineGordonProblem::make_grid_and_dofs} - - // This function creates a rectangular grid - // in dim dimensions and refines - // it several times. Also, all matrix and - // vector members of the - // SineGordonProblem class are - // initialized to their appropriate sizes - // once the degrees of freedom have been - // assembled. Like step-24, we use the - // MatrixCreator class to - // generate a mass matrix $M$ and a Laplace - // matrix $A$ and store them in the - // appropriate variables for the remainder of - // the program's life. + // @sect4{SineGordonProblem::make_grid_and_dofs} + + // This function creates a rectangular grid + // in dim dimensions and refines + // it several times. Also, all matrix and + // vector members of the + // SineGordonProblem class are + // initialized to their appropriate sizes + // once the degrees of freedom have been + // assembled. Like step-24, we use the + // MatrixCreator class to + // generate a mass matrix $M$ and a Laplace + // matrix $A$ and store them in the + // appropriate variables for the remainder of + // the program's life. template void SineGordonProblem::make_grid_and_dofs () { @@ -357,21 +357,21 @@ namespace Step25 triangulation.refine_global (n_global_refinements); std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << " Total number of cells: " - << triangulation.n_cells() - << std::endl; + << triangulation.n_active_cells() + << std::endl + << " Total number of cells: " + << triangulation.n_cells() + << std::endl; dof_handler.distribute_dofs (fe); std::cout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl; + << dof_handler.n_dofs() + << std::endl; sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); sparsity_pattern.compress (); @@ -380,11 +380,11 @@ namespace Step25 laplace_matrix.reinit (sparsity_pattern); MatrixCreator::create_mass_matrix (dof_handler, - QGauss(3), - mass_matrix); + QGauss(3), + mass_matrix); MatrixCreator::create_laplace_matrix (dof_handler, - QGauss(3), - laplace_matrix); + QGauss(3), + laplace_matrix); solution.reinit (dof_handler.n_dofs()); solution_update.reinit (dof_handler.n_dofs()); @@ -393,35 +393,35 @@ namespace Step25 system_rhs.reinit (dof_handler.n_dofs()); } - // @sect4{SineGordonProblem::assemble_system} - - // This functions assembles the system matrix - // and right-hand side vector for each - // iteration of Newton's method. The reader - // should refer to the Introduction for the - // explicit formulas for the system matrix - // and right-hand side. - // - // Note that during each time step, we have to - // add up the various contributions to the - // matrix and right hand sides. In contrast - // to step-23 and step-24, this requires - // assembling a few more terms, since they - // depend on the solution of the previous - // time step or previous nonlinear step. We - // use the functions - // compute_nl_matrix and - // compute_nl_term to do this, - // while the present function provides the - // top-level logic. + // @sect4{SineGordonProblem::assemble_system} + + // This functions assembles the system matrix + // and right-hand side vector for each + // iteration of Newton's method. The reader + // should refer to the Introduction for the + // explicit formulas for the system matrix + // and right-hand side. + // + // Note that during each time step, we have to + // add up the various contributions to the + // matrix and right hand sides. In contrast + // to step-23 and step-24, this requires + // assembling a few more terms, since they + // depend on the solution of the previous + // time step or previous nonlinear step. We + // use the functions + // compute_nl_matrix and + // compute_nl_term to do this, + // while the present function provides the + // top-level logic. template void SineGordonProblem::assemble_system () { - // First we assemble the Jacobian - // matrix $F'_h(U^{n,l})$, where - // $U^{n,l}$ is stored in the vector - // solution for - // convenience. + // First we assemble the Jacobian + // matrix $F'_h(U^{n,l})$, where + // $U^{n,l}$ is stored in the vector + // solution for + // convenience. system_matrix = 0; system_matrix.copy_from (mass_matrix); system_matrix.add (std::pow(time_step*theta,2), laplace_matrix); @@ -430,8 +430,8 @@ namespace Step25 compute_nl_matrix (old_solution, solution, tmp_matrix); system_matrix.add (-std::pow(time_step*theta,2), tmp_matrix); - // Then, we compute the right-hand - // side vector $-F_h(U^{n,l})$. + // Then, we compute the right-hand + // side vector $-F_h(U^{n,l})$. system_rhs = 0; tmp_matrix = 0; @@ -459,54 +459,54 @@ namespace Step25 system_rhs *= -1; } - // @sect4{SineGordonProblem::compute_nl_term} - - // This function computes the vector - // $S(\cdot,\cdot)$, which appears in the - // nonlinear term in the both equations of - // the split formulation. This function not - // only simplifies the repeated computation - // of this term, but it is also a fundamental - // part of the nonlinear iterative solver - // that we use when the time stepping is - // implicit (i.e. $\theta\ne 0$). Moreover, - // we must allow the function to receive as - // input an "old" and a "new" solution. These - // may not be the actual solutions of the - // problem stored in - // old_solution and - // solution, but are simply the - // two functions we linearize about. For the - // purposes of this function, let us call the - // first two arguments $w_{\mathrm{old}}$ and - // $w_{\mathrm{new}}$ in the documentation of - // this class below, respectively. - // - // As a side-note, it is perhaps worth - // investigating what order quadrature - // formula is best suited for this type of - // integration. Since $\sin(\cdot)$ is not a - // polynomial, there are probably no - // quadrature formulas that can integrate - // these terms exactly. It is usually - // sufficient to just make sure that the - // right hand side is integrated up to the - // same order of accuracy as the - // discretization scheme is, but it may be - // possible to improve on the constant in the - // asympotitic statement of convergence by - // choosing a more accurate quadrature - // formula. + // @sect4{SineGordonProblem::compute_nl_term} + + // This function computes the vector + // $S(\cdot,\cdot)$, which appears in the + // nonlinear term in the both equations of + // the split formulation. This function not + // only simplifies the repeated computation + // of this term, but it is also a fundamental + // part of the nonlinear iterative solver + // that we use when the time stepping is + // implicit (i.e. $\theta\ne 0$). Moreover, + // we must allow the function to receive as + // input an "old" and a "new" solution. These + // may not be the actual solutions of the + // problem stored in + // old_solution and + // solution, but are simply the + // two functions we linearize about. For the + // purposes of this function, let us call the + // first two arguments $w_{\mathrm{old}}$ and + // $w_{\mathrm{new}}$ in the documentation of + // this class below, respectively. + // + // As a side-note, it is perhaps worth + // investigating what order quadrature + // formula is best suited for this type of + // integration. Since $\sin(\cdot)$ is not a + // polynomial, there are probably no + // quadrature formulas that can integrate + // these terms exactly. It is usually + // sufficient to just make sure that the + // right hand side is integrated up to the + // same order of accuracy as the + // discretization scheme is, but it may be + // possible to improve on the constant in the + // asympotitic statement of convergence by + // choosing a more accurate quadrature + // formula. template void SineGordonProblem::compute_nl_term (const Vector &old_data, - const Vector &new_data, - Vector &nl_term) const + const Vector &new_data, + Vector &nl_term) const { const QGauss quadrature_formula (3); FEValues fe_values (fe, quadrature_formula, - update_values | - update_JxW_values | - update_quadrature_points); + update_values | + update_JxW_values | + update_quadrature_points); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -522,64 +522,64 @@ namespace Step25 for (; cell!=endc; ++cell) { - // Once we re-initialize our - // FEValues instantiation - // to the current cell, we make use of - // the get_function_values - // routine to get the values of the - // "old" data (presumably at - // $t=t_{n-1}$) and the "new" data - // (presumably at $t=t_n$) at the nodes - // of the chosen quadrature formula. - fe_values.reinit (cell); - fe_values.get_function_values (old_data, old_data_values); - fe_values.get_function_values (new_data, new_data_values); - - // Now, we can evaluate $\int_K - // \sin\left[\theta w_{\mathrm{new}} + - // (1-\theta) w_{\mathrm{old}}\right] - // \,\varphi_j\,\mathrm{d}x$ using the - // desired quadrature formula. - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - - for (unsigned int i=0; iFEValues instantiation + // to the current cell, we make use of + // the get_function_values + // routine to get the values of the + // "old" data (presumably at + // $t=t_{n-1}$) and the "new" data + // (presumably at $t=t_n$) at the nodes + // of the chosen quadrature formula. + fe_values.reinit (cell); + fe_values.get_function_values (old_data, old_data_values); + fe_values.get_function_values (new_data, new_data_values); + + // Now, we can evaluate $\int_K + // \sin\left[\theta w_{\mathrm{new}} + + // (1-\theta) w_{\mathrm{old}}\right] + // \,\varphi_j\,\mathrm{d}x$ using the + // desired quadrature formula. + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + + for (unsigned int i=0; icompute_nl_term, we must - // allow this function to receive as input an - // "old" and a "new" solution, which we again - // call $w_{\mathrm{old}}$ and - // $w_{\mathrm{new}}$ below, respectively. + // @sect4{SineGordonProblem::compute_nl_matrix} + + // This is the second function dealing with the + // nonlinear scheme. It computes the matrix + // $N(\cdot,\cdot)$, whicih appears in the + // nonlinear term in the Jacobian of + // $F(\cdot)$. Just as + // compute_nl_term, we must + // allow this function to receive as input an + // "old" and a "new" solution, which we again + // call $w_{\mathrm{old}}$ and + // $w_{\mathrm{new}}$ below, respectively. template void SineGordonProblem::compute_nl_matrix (const Vector &old_data, - const Vector &new_data, - SparseMatrix &nl_matrix) const + const Vector &new_data, + SparseMatrix &nl_matrix) const { QGauss quadrature_formula (3); FEValues fe_values (fe, quadrature_formula, - update_values | update_JxW_values | update_quadrature_points); + update_values | update_JxW_values | update_quadrature_points); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -595,88 +595,88 @@ namespace Step25 for (; cell!=endc; ++cell) { - // Again, first we - // re-initialize our - // FEValues - // instantiation to the current - // cell. - fe_values.reinit (cell); - fe_values.get_function_values (old_data, old_data_values); - fe_values.get_function_values (new_data, new_data_values); - - // Then, we evaluate $\int_K - // \cos\left[\theta - // w_{\mathrm{new}} + - // (1-\theta) - // w_{\mathrm{old}}\right]\, - // \varphi_i\, - // \varphi_j\,\mathrm{d}x$ - // using the desired quadrature - // formula. - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - - for (unsigned int i=0; iFEValues + // instantiation to the current + // cell. + fe_values.reinit (cell); + fe_values.get_function_values (old_data, old_data_values); + fe_values.get_function_values (new_data, new_data_values); + + // Then, we evaluate $\int_K + // \cos\left[\theta + // w_{\mathrm{new}} + + // (1-\theta) + // w_{\mathrm{old}}\right]\, + // \varphi_i\, + // \varphi_j\,\mathrm{d}x$ + // using the desired quadrature + // formula. + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + + for (unsigned int i=0; isolution_update and used to update - // solution in the - // run function. - // - // Note that we re-set the solution update to - // zero before solving for it. This is not - // necessary: iterative solvers can start - // from any point and converge to the correct - // solution. If one has a good estimate about - // the solution of a linear system, it may be - // worthwhile to start from that vector, but - // as a general observation it is a fact that - // the starting point doesn't matter very - // much: it has to be a very, very good guess - // to reduce the number of iterations by more - // than a few. It turns out that for this problem, - // using the previous nonlinear update as a - // starting point actually hurts convergence and - // increases the number of iterations needed, - // so we simply set it to zero. - // - // The function returns the number of - // iterations it took to converge to a - // solution. This number will later be used - // to generate output on the screen showing - // how many iterations were needed in each - // nonlinear iteration. + // @sect4{SineGordonProblem::solve} + + // As discussed in the Introduction, this + // function uses the CG iterative solver on + // the linear system of equations resulting + // from the finite element spatial + // discretization of each iteration of + // Newton's method for the (nonlinear) first + // equation of the split formulation. The + // solution to the system is, in fact, + // $\delta U^{n,l}$ so it is stored in + // solution_update and used to update + // solution in the + // run function. + // + // Note that we re-set the solution update to + // zero before solving for it. This is not + // necessary: iterative solvers can start + // from any point and converge to the correct + // solution. If one has a good estimate about + // the solution of a linear system, it may be + // worthwhile to start from that vector, but + // as a general observation it is a fact that + // the starting point doesn't matter very + // much: it has to be a very, very good guess + // to reduce the number of iterations by more + // than a few. It turns out that for this problem, + // using the previous nonlinear update as a + // starting point actually hurts convergence and + // increases the number of iterations needed, + // so we simply set it to zero. + // + // The function returns the number of + // iterations it took to converge to a + // solution. This number will later be used + // to generate output on the screen showing + // how many iterations were needed in each + // nonlinear iteration. template unsigned int SineGordonProblem::solve () @@ -689,18 +689,18 @@ namespace Step25 solution_update = 0; cg.solve (system_matrix, solution_update, - system_rhs, - preconditioner); + system_rhs, + preconditioner); return solver_control.last_step(); } - // @sect4{SineGordonProblem::output_results} + // @sect4{SineGordonProblem::output_results} - // This function outputs the results to a - // file. It is pretty much identical to the - // respective functions in step-23 and - // step-24: + // This function outputs the results to a + // file. It is pretty much identical to the + // respective functions in step-23 and + // step-24: template void SineGordonProblem::output_results (const unsigned int timestep_number) const @@ -712,191 +712,191 @@ namespace Step25 data_out.build_patches (); const std::string filename = "solution-" + - Utilities::int_to_string (timestep_number, 3) + - ".vtk"; + Utilities::int_to_string (timestep_number, 3) + + ".vtk"; std::ofstream output (filename.c_str()); data_out.write_vtk (output); } - // @sect4{SineGordonProblem::run} + // @sect4{SineGordonProblem::run} - // This function has the top-level - // control over everything: it runs - // the (outer) time-stepping loop, - // the (inner) nonlinear-solver loop, - // and outputs the solution after each - // time step. + // This function has the top-level + // control over everything: it runs + // the (outer) time-stepping loop, + // the (inner) nonlinear-solver loop, + // and outputs the solution after each + // time step. template void SineGordonProblem::run () { make_grid_and_dofs (); - // To aknowledge the initial - // condition, we must use the - // function $u_0(x)$ to compute - // $U^0$. To this end, below we - // will create an object of type - // InitialValues; note - // that when we create this object - // (which is derived from the - // Function class), we - // set its internal time variable - // to $t_0$, to indicate that the - // initial condition is a function - // of space and time evaluated at - // $t=t_0$. - // - // Then we produce $U^0$ by projecting - // $u_0(x)$ onto the grid using - // VectorTools::project. We - // have to use the same construct using - // hanging node constraints as in step-21: - // the VectorTools::project function - // requires a hanging node constraints - // object, but to be used we first need to - // close it: + // To aknowledge the initial + // condition, we must use the + // function $u_0(x)$ to compute + // $U^0$. To this end, below we + // will create an object of type + // InitialValues; note + // that when we create this object + // (which is derived from the + // Function class), we + // set its internal time variable + // to $t_0$, to indicate that the + // initial condition is a function + // of space and time evaluated at + // $t=t_0$. + // + // Then we produce $U^0$ by projecting + // $u_0(x)$ onto the grid using + // VectorTools::project. We + // have to use the same construct using + // hanging node constraints as in step-21: + // the VectorTools::project function + // requires a hanging node constraints + // object, but to be used we first need to + // close it: { ConstraintMatrix constraints; constraints.close(); VectorTools::project (dof_handler, - constraints, - QGauss(3), - InitialValues (1, time), - solution); + constraints, + QGauss(3), + InitialValues (1, time), + solution); } - // For completeness, we output the - // zeroth time step to a file just - // like any other other time step. + // For completeness, we output the + // zeroth time step to a file just + // like any other other time step. output_results (0); - // Now we perform the time - // stepping: at every time step we - // solve the matrix equation(s) - // corresponding to the finite - // element discretization of the - // problem, and then advance our - // solution according to the time - // stepping formulas we discussed - // in the Introduction. + // Now we perform the time + // stepping: at every time step we + // solve the matrix equation(s) + // corresponding to the finite + // element discretization of the + // problem, and then advance our + // solution according to the time + // stepping formulas we discussed + // in the Introduction. unsigned int timestep_number = 1; for (time+=time_step; time<=final_time; time+=time_step, ++timestep_number) { - old_solution = solution; - - std::cout << std::endl - << "Time step #" << timestep_number << "; " - << "advancing to t = " << time << "." - << std::endl; - - // At the beginning of each - // time step we must solve the - // nonlinear equation in the - // split formulation via - // Newton's method --- - // i.e. solve for $\delta - // U^{n,l}$ then compute - // $U^{n,l+1}$ and so on. The - // stopping criterion for this - // nonlinear iteration is that - // $\|F_h(U^{n,l})\|_2 \le - // 10^{-6} - // \|F_h(U^{n,0})\|_2$. Consequently, - // we need to record the norm - // of the residual in the first - // iteration. - // - // At the end of each iteration, we - // output to the console how many - // linear solver iterations it took - // us. When the loop below is done, we - // have (an approximation of) $U^n$. - double initial_rhs_norm = 0.; - bool first_iteration = true; - do - { - assemble_system (); - - if (first_iteration == true) - initial_rhs_norm = system_rhs.l2_norm(); - - const unsigned int n_iterations - = solve (); - - solution += solution_update; - - if (first_iteration == true) - std::cout << " " << n_iterations; - else - std::cout << '+' << n_iterations; - first_iteration = false; - } - while (system_rhs.l2_norm() > 1e-6 * initial_rhs_norm); - - std::cout << " CG iterations per nonlinear step." - << std::endl; - - // Upon obtaining the solution to the - // first equation of the problem at - // $t=t_n$, we must update the - // auxiliary velocity variable - // $V^n$. However, we do not compute - // and store $V^n$ since it is not a - // quantity we use directly in the - // problem. Hence, for simplicity, we - // update $MV^n$ directly: - Vector tmp_vector (solution.size()); - laplace_matrix.vmult (tmp_vector, solution); - M_x_velocity.add (-time_step*theta, tmp_vector); - - tmp_vector = 0; - laplace_matrix.vmult (tmp_vector, old_solution); - M_x_velocity.add (-time_step*(1-theta), tmp_vector); - - tmp_vector = 0; - compute_nl_term (old_solution, solution, tmp_vector); - M_x_velocity.add (-time_step, tmp_vector); - - // Oftentimes, in particular - // for fine meshes, we must - // pick the time step to be - // quite small in order for the - // scheme to be - // stable. Therefore, there are - // a lot of time steps during - // which "nothing interesting - // happens" in the solution. To - // improve overall efficiency - // -- in particular, speed up - // the program and save disk - // space -- we only output the - // solution every - // output_timestep_skip - // time steps: - if (timestep_number % output_timestep_skip == 0) - output_results (timestep_number); + old_solution = solution; + + std::cout << std::endl + << "Time step #" << timestep_number << "; " + << "advancing to t = " << time << "." + << std::endl; + + // At the beginning of each + // time step we must solve the + // nonlinear equation in the + // split formulation via + // Newton's method --- + // i.e. solve for $\delta + // U^{n,l}$ then compute + // $U^{n,l+1}$ and so on. The + // stopping criterion for this + // nonlinear iteration is that + // $\|F_h(U^{n,l})\|_2 \le + // 10^{-6} + // \|F_h(U^{n,0})\|_2$. Consequently, + // we need to record the norm + // of the residual in the first + // iteration. + // + // At the end of each iteration, we + // output to the console how many + // linear solver iterations it took + // us. When the loop below is done, we + // have (an approximation of) $U^n$. + double initial_rhs_norm = 0.; + bool first_iteration = true; + do + { + assemble_system (); + + if (first_iteration == true) + initial_rhs_norm = system_rhs.l2_norm(); + + const unsigned int n_iterations + = solve (); + + solution += solution_update; + + if (first_iteration == true) + std::cout << " " << n_iterations; + else + std::cout << '+' << n_iterations; + first_iteration = false; + } + while (system_rhs.l2_norm() > 1e-6 * initial_rhs_norm); + + std::cout << " CG iterations per nonlinear step." + << std::endl; + + // Upon obtaining the solution to the + // first equation of the problem at + // $t=t_n$, we must update the + // auxiliary velocity variable + // $V^n$. However, we do not compute + // and store $V^n$ since it is not a + // quantity we use directly in the + // problem. Hence, for simplicity, we + // update $MV^n$ directly: + Vector tmp_vector (solution.size()); + laplace_matrix.vmult (tmp_vector, solution); + M_x_velocity.add (-time_step*theta, tmp_vector); + + tmp_vector = 0; + laplace_matrix.vmult (tmp_vector, old_solution); + M_x_velocity.add (-time_step*(1-theta), tmp_vector); + + tmp_vector = 0; + compute_nl_term (old_solution, solution, tmp_vector); + M_x_velocity.add (-time_step, tmp_vector); + + // Oftentimes, in particular + // for fine meshes, we must + // pick the time step to be + // quite small in order for the + // scheme to be + // stable. Therefore, there are + // a lot of time steps during + // which "nothing interesting + // happens" in the solution. To + // improve overall efficiency + // -- in particular, speed up + // the program and save disk + // space -- we only output the + // solution every + // output_timestep_skip + // time steps: + if (timestep_number % output_timestep_skip == 0) + output_results (timestep_number); } } } - // @sect3{The main function} - - // This is the main function of the - // program. It creates an object of - // top-level class and calls its - // principal function. Also, we - // supress some of the library output - // by setting - // deallog.depth_console - // to zero. Furthermore, if - // exceptions are thrown during the - // execution of the run method of the - // SineGordonProblem - // class, we catch and report them - // here. For more information about - // exceptions the reader should - // consult step-6. + // @sect3{The main function} + + // This is the main function of the + // program. It creates an object of + // top-level class and calls its + // principal function. Also, we + // supress some of the library output + // by setting + // deallog.depth_console + // to zero. Furthermore, if + // exceptions are thrown during the + // execution of the run method of the + // SineGordonProblem + // class, we catch and report them + // here. For more information about + // exceptions the reader should + // consult step-6. int main () { try @@ -925,12 +925,12 @@ int main () catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-26/step-26.cc b/deal.II/examples/step-26/step-26.cc index 8195177109..2597a72ae2 100644 --- a/deal.II/examples/step-26/step-26.cc +++ b/deal.II/examples/step-26/step-26.cc @@ -11,11 +11,11 @@ // @sect3{Include files} - // The first few (many?) include - // files have already been used in - // the previous example, so we will - // not explain their meaning here - // again. + // The first few (many?) include + // files have already been used in + // the previous example, so we will + // not explain their meaning here + // again. #include #include #include @@ -40,20 +40,20 @@ #include #include - // This is new, however: in the previous - // example we got some unwanted output from - // the linear solvers. If we want to suppress - // it, we have to include this file and add a - // single line somewhere to the program (see - // the main() function below for that): + // This is new, however: in the previous + // example we got some unwanted output from + // the linear solvers. If we want to suppress + // it, we have to include this file and add a + // single line somewhere to the program (see + // the main() function below for that): #include #include #include - // The last step is as in all - // previous programs: + // The last step is as in all + // previous programs: namespace Step26 { using namespace dealii; @@ -61,84 +61,84 @@ namespace Step26 class PointCloudSurface : public StraightBoundary<3> { public: - /** - * Constructor. - */ + /** + * Constructor. + */ PointCloudSurface (const std::string &filename); - /** - * Let the new point be the - * arithmetic mean of the two - * vertices of the line. - * - * Refer to the general - * documentation of this class - * and the documentation of the - * base class for more - * information. - */ + /** + * Let the new point be the + * arithmetic mean of the two + * vertices of the line. + * + * Refer to the general + * documentation of this class + * and the documentation of the + * base class for more + * information. + */ virtual Point<3> get_new_point_on_line (const Triangulation<3>::line_iterator &line) const; - /** - * Let the new point be the - * arithmetic mean of the four - * vertices of this quad and the - * four midpoints of the lines, - * which are already created at - * the time of calling this - * function. - * - * Refer to the general - * documentation of this class - * and the documentation of the - * base class for more - * information. - */ + /** + * Let the new point be the + * arithmetic mean of the four + * vertices of this quad and the + * four midpoints of the lines, + * which are already created at + * the time of calling this + * function. + * + * Refer to the general + * documentation of this class + * and the documentation of the + * base class for more + * information. + */ virtual Point<3> get_new_point_on_quad (const Triangulation<3>::quad_iterator &quad) const; - /** - * Gives n=points.size() - * points that splits the - * StraightBoundary line into - * $n+1$ partitions of equal - * lengths. - * - * Refer to the general - * documentation of this class - * and the documentation of the - * base class. - */ + /** + * Gives n=points.size() + * points that splits the + * StraightBoundary line into + * $n+1$ partitions of equal + * lengths. + * + * Refer to the general + * documentation of this class + * and the documentation of the + * base class. + */ virtual void get_intermediate_points_on_line (const Triangulation<3>::line_iterator &line, - std::vector > &points) const; - - /** - * Gives n=points.size()=m*m - * points that splits the - * p{StraightBoundary} quad into - * (m+1)(m+1) subquads of equal - * size. - * - * Refer to the general - * documentation of this class - * and the documentation of the - * base class. - */ + std::vector > &points) const; + + /** + * Gives n=points.size()=m*m + * points that splits the + * p{StraightBoundary} quad into + * (m+1)(m+1) subquads of equal + * size. + * + * Refer to the general + * documentation of this class + * and the documentation of the + * base class. + */ virtual void get_intermediate_points_on_quad (const Triangulation<3>::quad_iterator &quad, - std::vector > &points) const; - - /** - * A function that, given a point - * p, returns the closest - * point on the surface defined by the - * input file. For the time being, we - * simply return the closest point in the - * point cloud, rather than doing any - * sort of interpolation. - */ + std::vector > &points) const; + + /** + * A function that, given a point + * p, returns the closest + * point on the surface defined by the + * input file. For the time being, we + * simply return the closest point in the + * point cloud, rather than doing any + * sort of interpolation. + */ Point<3> closest_point (const Point<3> &p) const; private: std::vector > point_list; @@ -147,106 +147,106 @@ namespace Step26 PointCloudSurface::PointCloudSurface (const std::string &filename) { - // first read in all the points + // first read in all the points { std::ifstream in (filename.c_str()); AssertThrow (in, ExcIO()); while (in) - { - Point<3> p; - in >> p; - point_list.push_back (p); - } + { + Point<3> p; + in >> p; + point_list.push_back (p); + } AssertThrow (point_list.size() > 1, ExcIO()); } - // next fit a linear model through the data - // cloud to rectify it in a local - // coordinate system - // - // the first step is to move the center of - // mass of the points to the origin + // next fit a linear model through the data + // cloud to rectify it in a local + // coordinate system + // + // the first step is to move the center of + // mass of the points to the origin { const Point<3> c_o_m = std::accumulate (point_list.begin(), - point_list.end(), - Point<3>()) / - point_list.size(); + point_list.end(), + Point<3>()) / + point_list.size(); for (unsigned int i=0; i gradient_direction - = Point<2>(a,b) / std::sqrt(a*a+b*b); + = Point<2>(a,b) / std::sqrt(a*a+b*b); const Point<2> orthogonal_direction - = Point<2>(-b,a) / std::sqrt(a*a+b*b); + = Point<2>(-b,a) / std::sqrt(a*a+b*b); const double stretch_factor = std::sqrt(1.+a*a+b*b); for (unsigned int i=0; i xy (point_list[i][0], - point_list[i][1]); - const double grad_distance = xy * gradient_direction; - const double orth_distance = xy * orthogonal_direction; - - // we then have to stretch the points - // in the gradient direction. the - // stretch factor is defined above - // (zero if the original plane was - // already the xy plane, infinity if - // it was vertical) - const Point<2> new_xy - = (grad_distance * stretch_factor * gradient_direction + - orth_distance * orthogonal_direction); - point_list[i][0] = new_xy[0]; - point_list[i][1] = new_xy[1]; - } + { + // we can do that by, for each point, + // first subtract the points in the + // plane: + point_list[i][2] -= a*point_list[i][0] + b*point_list[i][1]; + + // we made a mistake here, though: + // we've shrunk the plan in the + // direction parallel to the + // gradient. we will have to correct + // for this: + const Point<2> xy (point_list[i][0], + point_list[i][1]); + const double grad_distance = xy * gradient_direction; + const double orth_distance = xy * orthogonal_direction; + + // we then have to stretch the points + // in the gradient direction. the + // stretch factor is defined above + // (zero if the original plane was + // already the xy plane, infinity if + // it was vertical) + const Point<2> new_xy + = (grad_distance * stretch_factor * gradient_direction + + orth_distance * orthogonal_direction); + point_list[i][0] = new_xy[0]; + point_list[i][1] = new_xy[1]; + } } } @@ -258,14 +258,14 @@ namespace Step26 Point<3> point = point_list[0]; for (std::vector >::const_iterator i=point_list.begin(); - i != point_list.end(); ++i) + i != point_list.end(); ++i) { - const double d = p.distance (*i); - if (d < distance) - { - distance = d; - point = *i; - } + const double d = p.distance (*i); + if (d < distance) + { + distance = d; + point = *i; + } } return point; @@ -293,10 +293,10 @@ namespace Step26 void PointCloudSurface:: get_intermediate_points_on_line (const Triangulation<3>::line_iterator &line, - std::vector > &points) const + std::vector > &points) const { StraightBoundary<3>::get_intermediate_points_on_line (line, - points); + points); for (unsigned int i=0; i::quad_iterator &quad, - std::vector > &points) const + std::vector > &points) const { StraightBoundary<3>::get_intermediate_points_on_quad (quad, - points); + points); for (unsigned int i=0; iLaplaceProblem class template} + // @sect3{The LaplaceProblem class template} - // This is again the same - // LaplaceProblem class as in the - // previous example. The only - // difference is that we have now - // declared it as a class with a - // template parameter, and the - // template parameter is of course - // the spatial dimension in which we - // would like to solve the Laplace - // equation. Of course, several of - // the member variables depend on - // this dimension as well, in - // particular the Triangulation - // class, which has to represent - // quadrilaterals or hexahedra, - // respectively. Apart from this, - // everything is as before. + // This is again the same + // LaplaceProblem class as in the + // previous example. The only + // difference is that we have now + // declared it as a class with a + // template parameter, and the + // template parameter is of course + // the spatial dimension in which we + // would like to solve the Laplace + // equation. Of course, several of + // the member variables depend on + // this dimension as well, in + // particular the Triangulation + // class, which has to represent + // quadrilaterals or hexahedra, + // respectively. Apart from this, + // everything is as before. template class LaplaceProblem { @@ -369,7 +369,7 @@ namespace Step26 }; - // @sect3{Right hand side and boundary values} + // @sect3{Right hand side and boundary values} @@ -381,109 +381,109 @@ namespace Step26 BoundaryValues () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; template double BoundaryValues::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { return std::max(p[dim-1], -5.); } - // @sect3{Implementation of the LaplaceProblem class} - - // Next for the implementation of the class - // template that makes use of the functions - // above. As before, we will write everything - // as templates that have a formal parameter - // dim that we assume unknown at the time - // we define the template functions. Only - // later, the compiler will find a - // declaration of LaplaceProblem@<2@> (in - // the main function, actually) and - // compile the entire class with dim - // replaced by 2, a process referred to as - // `instantiation of a template'. When doing - // so, it will also replace instances of - // RightHandSide@ by - // RightHandSide@<2@> and instantiate the - // latter class from the class template. - // - // In fact, the compiler will also find a - // declaration LaplaceProblem@<3@> in - // main(). This will cause it to again go - // back to the general - // LaplaceProblem@ template, replace - // all occurrences of dim, this time by - // 3, and compile the class a second - // time. Note that the two instantiations - // LaplaceProblem@<2@> and - // LaplaceProblem@<3@> are completely - // independent classes; their only common - // feature is that they are both instantiated - // from the same general template, but they - // are not convertible into each other, for - // example, and share no code (both - // instantiations are compiled completely - // independently). - - - // @sect4{LaplaceProblem::LaplaceProblem} - - // After this introduction, here is the - // constructor of the LaplaceProblem - // class. It specifies the desired polynomial - // degree of the finite elements and - // associates the DoFHandler to the - // triangulation just as in the previous - // example program, step-3: + // @sect3{Implementation of the LaplaceProblem class} + + // Next for the implementation of the class + // template that makes use of the functions + // above. As before, we will write everything + // as templates that have a formal parameter + // dim that we assume unknown at the time + // we define the template functions. Only + // later, the compiler will find a + // declaration of LaplaceProblem@<2@> (in + // the main function, actually) and + // compile the entire class with dim + // replaced by 2, a process referred to as + // `instantiation of a template'. When doing + // so, it will also replace instances of + // RightHandSide@ by + // RightHandSide@<2@> and instantiate the + // latter class from the class template. + // + // In fact, the compiler will also find a + // declaration LaplaceProblem@<3@> in + // main(). This will cause it to again go + // back to the general + // LaplaceProblem@ template, replace + // all occurrences of dim, this time by + // 3, and compile the class a second + // time. Note that the two instantiations + // LaplaceProblem@<2@> and + // LaplaceProblem@<3@> are completely + // independent classes; their only common + // feature is that they are both instantiated + // from the same general template, but they + // are not convertible into each other, for + // example, and share no code (both + // instantiations are compiled completely + // independently). + + + // @sect4{LaplaceProblem::LaplaceProblem} + + // After this introduction, here is the + // constructor of the LaplaceProblem + // class. It specifies the desired polynomial + // degree of the finite elements and + // associates the DoFHandler to the + // triangulation just as in the previous + // example program, step-3: template LaplaceProblem::LaplaceProblem () : - fe (1), - dof_handler (triangulation) + fe (1), + dof_handler (triangulation) {} - // @sect4{LaplaceProblem::make_grid_and_dofs} - - // Grid creation is something - // inherently dimension - // dependent. However, as long as the - // domains are sufficiently similar - // in 2D or 3D, the library can - // abstract for you. In our case, we - // would like to again solve on the - // square [-1,1]x[-1,1] in 2D, or on - // the cube [-1,1]x[-1,1]x[-1,1] in - // 3D; both can be termed - // hyper_cube, so we may use the - // same function in whatever - // dimension we are. Of course, the - // functions that create a hypercube - // in two and three dimensions are - // very much different, but that is - // something you need not care - // about. Let the library handle the - // difficult things. - // - // Likewise, associating a degree of freedom - // with each vertex is something which - // certainly looks different in 2D and 3D, - // but that does not need to bother you - // either. This function therefore looks - // exactly like in the previous example, - // although it performs actions that in their - // details are quite different if dim - // happens to be 3. The only significant - // difference from a user's perspective is - // the number of cells resulting, which is - // much higher in three than in two space - // dimensions! + // @sect4{LaplaceProblem::make_grid_and_dofs} + + // Grid creation is something + // inherently dimension + // dependent. However, as long as the + // domains are sufficiently similar + // in 2D or 3D, the library can + // abstract for you. In our case, we + // would like to again solve on the + // square [-1,1]x[-1,1] in 2D, or on + // the cube [-1,1]x[-1,1]x[-1,1] in + // 3D; both can be termed + // hyper_cube, so we may use the + // same function in whatever + // dimension we are. Of course, the + // functions that create a hypercube + // in two and three dimensions are + // very much different, but that is + // something you need not care + // about. Let the library handle the + // difficult things. + // + // Likewise, associating a degree of freedom + // with each vertex is something which + // certainly looks different in 2D and 3D, + // but that does not need to bother you + // either. This function therefore looks + // exactly like in the previous example, + // although it performs actions that in their + // details are quite different if dim + // happens to be 3. The only significant + // difference from a user's perspective is + // the number of cells resulting, which is + // much higher in three than in two space + // dimensions! template void LaplaceProblem::make_grid_and_dofs () { @@ -491,40 +491,40 @@ namespace Step26 for (unsigned int f=0; f::faces_per_cell; ++f) if (triangulation.begin()->face(f)->center()[2] > 15) - { - triangulation.begin()->face(f)->set_boundary_indicator (1); - for (unsigned int i=0; i::lines_per_face; ++i) - triangulation.begin()->face(f)->line(i)->set_boundary_indicator (1); - break; - } + { + triangulation.begin()->face(f)->set_boundary_indicator (1); + for (unsigned int i=0; i::lines_per_face; ++i) + triangulation.begin()->face(f)->line(i)->set_boundary_indicator (1); + break; + } triangulation.set_boundary (1, pds); for (unsigned int v=0; v::vertices_per_cell; ++v) if (triangulation.begin()->vertex(v)[2] > 0) - triangulation.begin()->vertex(v) - = pds.closest_point (Point<3>(triangulation.begin()->vertex(v)[0], - triangulation.begin()->vertex(v)[1], - 0)); + triangulation.begin()->vertex(v) + = pds.closest_point (Point<3>(triangulation.begin()->vertex(v)[0], + triangulation.begin()->vertex(v)[1], + 0)); for (unsigned int i=0; i<4; ++i) { - for (typename Triangulation::active_cell_iterator - cell = triangulation.begin_active(); - cell != triangulation.end(); ++cell) - for (unsigned int f=0; f::faces_per_cell; ++f) - if (cell->face(f)->boundary_indicator() == 1) - cell->set_refine_flag (); - - triangulation.execute_coarsening_and_refinement (); - - std::cout << "Refinement cycle " << i << std::endl - << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << " Total number of cells: " - << triangulation.n_cells() - << std::endl; + for (typename Triangulation::active_cell_iterator + cell = triangulation.begin_active(); + cell != triangulation.end(); ++cell) + for (unsigned int f=0; f::faces_per_cell; ++f) + if (cell->face(f)->boundary_indicator() == 1) + cell->set_refine_flag (); + + triangulation.execute_coarsening_and_refinement (); + + std::cout << "Refinement cycle " << i << std::endl + << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl + << " Total number of cells: " + << triangulation.n_cells() + << std::endl; } @@ -532,12 +532,12 @@ namespace Step26 dof_handler.distribute_dofs (fe); std::cout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl; + << dof_handler.n_dofs() + << std::endl; sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); sparsity_pattern.compress(); @@ -548,111 +548,111 @@ namespace Step26 } - // @sect4{LaplaceProblem::assemble_system} - - // Unlike in the previous example, we - // would now like to use a - // non-constant right hand side - // function and non-zero boundary - // values. Both are tasks that are - // readily achieved with a only a few - // new lines of code in the - // assemblage of the matrix and right - // hand side. - // - // More interesting, though, is the - // way we assemble matrix and right - // hand side vector dimension - // independently: there is simply no - // difference to the - // two-dimensional case. Since the - // important objects used in this - // function (quadrature formula, - // FEValues) depend on the dimension - // by way of a template parameter as - // well, they can take care of - // setting up properly everything for - // the dimension for which this - // function is compiled. By declaring - // all classes which might depend on - // the dimension using a template - // parameter, the library can make - // nearly all work for you and you - // don't have to care about most - // things. + // @sect4{LaplaceProblem::assemble_system} + + // Unlike in the previous example, we + // would now like to use a + // non-constant right hand side + // function and non-zero boundary + // values. Both are tasks that are + // readily achieved with a only a few + // new lines of code in the + // assemblage of the matrix and right + // hand side. + // + // More interesting, though, is the + // way we assemble matrix and right + // hand side vector dimension + // independently: there is simply no + // difference to the + // two-dimensional case. Since the + // important objects used in this + // function (quadrature formula, + // FEValues) depend on the dimension + // by way of a template parameter as + // well, they can take care of + // setting up properly everything for + // the dimension for which this + // function is compiled. By declaring + // all classes which might depend on + // the dimension using a template + // parameter, the library can make + // nearly all work for you and you + // don't have to care about most + // things. template void LaplaceProblem::assemble_system () { MatrixTools::create_laplace_matrix (dof_handler, - QGauss(2), - system_matrix); + QGauss(2), + system_matrix); system_rhs = 0; std::map boundary_values; VectorTools::interpolate_boundary_values (dof_handler, - 0, - BoundaryValues(), - boundary_values); + 0, + BoundaryValues(), + boundary_values); MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); + system_matrix, + solution, + system_rhs); } - // @sect4{LaplaceProblem::solve} + // @sect4{LaplaceProblem::solve} - // Solving the linear system of - // equations is something that looks - // almost identical in most - // programs. In particular, it is - // dimension independent, so this - // function is copied verbatim from the - // previous example. + // Solving the linear system of + // equations is something that looks + // almost identical in most + // programs. In particular, it is + // dimension independent, so this + // function is copied verbatim from the + // previous example. template void LaplaceProblem::solve () { - // NEW + // NEW SolverControl solver_control (dof_handler.n_dofs(), - 1e-12*system_rhs.l2_norm()); + 1e-12*system_rhs.l2_norm()); SolverCG<> cg (solver_control); PreconditionSSOR<> preconditioner; preconditioner.initialize(system_matrix, 1.2); cg.solve (system_matrix, solution, system_rhs, - preconditioner); + preconditioner); } - // @sect4{LaplaceProblem::output_results} - - // This function also does what the - // respective one did in step-3. No changes - // here for dimension independence either. - // - // The only difference to the previous - // example is that we want to write output in - // GMV format, rather than for gnuplot (GMV - // is another graphics program that, contrary - // to gnuplot, shows data in nice colors, - // allows rotation of geometries with the - // mouse, and generates reasonable - // representations of 3d data; for ways to - // obtain it see the ReadMe file of - // deal.II). To write data in this format, we - // simply replace the - // data_out.write_gnuplot call by - // data_out.write_gmv. - // - // Since the program will run both 2d and 3d - // versions of the laplace solver, we use the - // dimension in the filename to generate - // distinct filenames for each run (in a - // better program, one would check whether - // `dim' can have other values than 2 or 3, - // but we neglect this here for the sake of - // brevity). + // @sect4{LaplaceProblem::output_results} + + // This function also does what the + // respective one did in step-3. No changes + // here for dimension independence either. + // + // The only difference to the previous + // example is that we want to write output in + // GMV format, rather than for gnuplot (GMV + // is another graphics program that, contrary + // to gnuplot, shows data in nice colors, + // allows rotation of geometries with the + // mouse, and generates reasonable + // representations of 3d data; for ways to + // obtain it see the ReadMe file of + // deal.II). To write data in this format, we + // simply replace the + // data_out.write_gnuplot call by + // data_out.write_gmv. + // + // Since the program will run both 2d and 3d + // versions of the laplace solver, we use the + // dimension in the filename to generate + // distinct filenames for each run (in a + // better program, one would check whether + // `dim' can have other values than 2 or 3, + // but we neglect this here for the sake of + // brevity). template void LaplaceProblem::output_results () const { @@ -664,20 +664,20 @@ namespace Step26 data_out.build_patches (); std::ofstream output (dim == 2 ? - "solution-2d.gmv" : - "solution-3d.gmv"); + "solution-2d.gmv" : + "solution-3d.gmv"); data_out.write_gmv (output); } - // @sect4{LaplaceProblem::run} + // @sect4{LaplaceProblem::run} - // This is the function which has the - // top-level control over - // everything. Apart from one line of - // additional output, it is the same - // as for the previous example. + // This is the function which has the + // top-level control over + // everything. Apart from one line of + // additional output, it is the same + // as for the previous example. template void LaplaceProblem::run () { @@ -693,47 +693,47 @@ namespace Step26 // @sect3{The main function} - // And this is the main function. It also - // looks mostly like in step-3, but if you - // look at the code below, note how we first - // create a variable of type - // LaplaceProblem@<2@> (forcing the - // compiler to compile the class template - // with dim replaced by 2) and run a - // 2d simulation, and then we do the whole - // thing over in 3d. - // - // In practice, this is probably not what you - // would do very frequently (you probably - // either want to solve a 2d problem, or one - // in 3d, but not both at the same - // time). However, it demonstrates the - // mechanism by which we can simply change - // which dimension we want in a single place, - // and thereby force the compiler to - // recompile the dimension independent class - // templates for the dimension we - // request. The emphasis here lies on the - // fact that we only need to change a single - // place. This makes it rather trivial to - // debug the program in 2d where computations - // are fast, and then switch a single place - // to a 3 to run the much more computing - // intensive program in 3d for `real' - // computations. - // - // Each of the two blocks is enclosed in - // braces to make sure that the - // laplace_problem_2d variable goes out - // of scope (and releases the memory it - // holds) before we move on to allocate - // memory for the 3d case. Without the - // additional braces, the - // laplace_problem_2d variable would only - // be destroyed at the end of the function, - // i.e. after running the 3d problem, and - // would needlessly hog memory while the 3d - // run could actually use it. + // And this is the main function. It also + // looks mostly like in step-3, but if you + // look at the code below, note how we first + // create a variable of type + // LaplaceProblem@<2@> (forcing the + // compiler to compile the class template + // with dim replaced by 2) and run a + // 2d simulation, and then we do the whole + // thing over in 3d. + // + // In practice, this is probably not what you + // would do very frequently (you probably + // either want to solve a 2d problem, or one + // in 3d, but not both at the same + // time). However, it demonstrates the + // mechanism by which we can simply change + // which dimension we want in a single place, + // and thereby force the compiler to + // recompile the dimension independent class + // templates for the dimension we + // request. The emphasis here lies on the + // fact that we only need to change a single + // place. This makes it rather trivial to + // debug the program in 2d where computations + // are fast, and then switch a single place + // to a 3 to run the much more computing + // intensive program in 3d for `real' + // computations. + // + // Each of the two blocks is enclosed in + // braces to make sure that the + // laplace_problem_2d variable goes out + // of scope (and releases the memory it + // holds) before we move on to allocate + // memory for the 3d case. Without the + // additional braces, the + // laplace_problem_2d variable would only + // be destroyed at the end of the function, + // i.e. after running the 3d problem, and + // would needlessly hog memory while the 3d + // run could actually use it. // // Finally, the first line of the function is // used to suppress some output. Remember @@ -780,25 +780,25 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-27/step-27.cc b/deal.II/examples/step-27/step-27.cc index e71d4122d5..5d917cf626 100644 --- a/deal.II/examples/step-27/step-27.cc +++ b/deal.II/examples/step-27/step-27.cc @@ -11,10 +11,10 @@ // @sect3{Include files} - // The first few files have already - // been covered in previous examples - // and will thus not be further - // commented on. + // The first few files have already + // been covered in previous examples + // and will thus not be further + // commented on. #include #include #include @@ -38,63 +38,63 @@ #include #include - // These are the new files we need. The first - // one provides an alternative to the usual - // SparsityPattern class and the - // CompressedSparsityPattern class already - // discussed in step-11 and step-18. The last - // two provide hp versions of the - // DoFHandler and FEValues classes as - // described in the introduction of this - // program. + // These are the new files we need. The first + // one provides an alternative to the usual + // SparsityPattern class and the + // CompressedSparsityPattern class already + // discussed in step-11 and step-18. The last + // two provide hp versions of the + // DoFHandler and FEValues classes as + // described in the introduction of this + // program. #include #include #include - // The last set of include files are standard - // C++ headers. We need support for complex - // numbers when we compute the Fourier - // transform. + // The last set of include files are standard + // C++ headers. We need support for complex + // numbers when we compute the Fourier + // transform. #include #include #include - // Finally, this is as in previous - // programs: + // Finally, this is as in previous + // programs: namespace Step27 { using namespace dealii; - // @sect3{The main class} - - // The main class of this program looks very - // much like the one already used in the - // first few tutorial programs, for example - // the one in step-6. The main difference is - // that we have merged the refine_grid and - // output_results functions into one since we - // will also want to output some of the - // quantities used in deciding how to refine - // the mesh (in particular the estimated - // smoothness of the solution). There is also - // a function that computes this estimated - // smoothness, as discussed in the - // introduction. - // - // As far as member variables are concerned, - // we use the same structure as already used - // in step-6, but instead of a regular - // DoFHandler we use an object of type - // hp::DoFHandler, and we need collections - // instead of individual finite element, - // quadrature, and face quadrature - // objects. We will fill these collections in - // the constructor of the class. The last - // variable, max_degree, - // indicates the maximal polynomial degree of - // shape functions used. + // @sect3{The main class} + + // The main class of this program looks very + // much like the one already used in the + // first few tutorial programs, for example + // the one in step-6. The main difference is + // that we have merged the refine_grid and + // output_results functions into one since we + // will also want to output some of the + // quantities used in deciding how to refine + // the mesh (in particular the estimated + // smoothness of the solution). There is also + // a function that computes this estimated + // smoothness, as discussed in the + // introduction. + // + // As far as member variables are concerned, + // we use the same structure as already used + // in step-6, but instead of a regular + // DoFHandler we use an object of type + // hp::DoFHandler, and we need collections + // instead of individual finite element, + // quadrature, and face quadrature + // objects. We will fill these collections in + // the constructor of the class. The last + // variable, max_degree, + // indicates the maximal polynomial degree of + // shape functions used. template class LaplaceProblem { @@ -132,11 +132,11 @@ namespace Step27 - // @sect3{Equation data} - // - // Next, let us define the right hand side - // function for this problem. It is $x+1$ in - // 1d, $(x+1)(y+1)$ in 2d, and so on. + // @sect3{Equation data} + // + // Next, let us define the right hand side + // function for this problem. It is $x+1$ in + // 1d, $(x+1)(y+1)$ in 2d, and so on. template class RightHandSide : public Function { @@ -144,14 +144,14 @@ namespace Step27 RightHandSide () : Function () {} virtual double value (const Point &p, - const unsigned int component) const; + const unsigned int component) const; }; template double RightHandSide::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { double product = 1; for (unsigned int d=0; d LaplaceProblem::LaplaceProblem () - : - dof_handler (triangulation), - max_degree (dim <= 2 ? 7 : 5) + : + dof_handler (triangulation), + max_degree (dim <= 2 ? 7 : 5) { for (unsigned int degree=2; degree<=max_degree; ++degree) { - fe_collection.push_back (FE_Q(degree)); - quadrature_collection.push_back (QGauss(degree+1)); - face_quadrature_collection.push_back (QGauss(degree+1)); + fe_collection.push_back (FE_Q(degree)); + quadrature_collection.push_back (QGauss(degree+1)); + face_quadrature_collection.push_back (QGauss(degree+1)); } } - // @sect4{LaplaceProblem::~LaplaceProblem} + // @sect4{LaplaceProblem::~LaplaceProblem} - // The destructor is unchanged from what we - // already did in step-6: + // The destructor is unchanged from what we + // already did in step-6: template LaplaceProblem::~LaplaceProblem () { @@ -209,44 +209,44 @@ namespace Step27 } - // @sect4{LaplaceProblem::setup_system} - // - // This function is again an almost - // verbatim copy of what we already did in - // step-6. The first change is that we - // append the Dirichlet boundary conditions - // to the ConstraintMatrix object, which we - // consequently call just - // constraints instead of - // hanging_node_constraints. The - // second difference is that we don't - // directly build the sparsity pattern, but - // first create an intermediate object that - // we later copy into the usual - // SparsityPattern data structure, since - // this is more efficient for the problem - // with many entries per row (and different - // number of entries in different rows). In - // another slight deviation, we do not - // first build the sparsity pattern and - // then condense away constrained degrees - // of freedom, but pass the constraint - // matrix object directly to the function - // that builds the sparsity pattern. We - // disable the insertion of constrained - // entries with false as fourth - // argument in the - // DoFTools::make_sparsity_pattern - // function. All of these changes are - // explained in the introduction of this - // program. - // - // The last change, maybe hidden in plain - // sight, is that the dof_handler variable - // here is an hp object -- nevertheless all - // the function calls we had before still - // work in exactly the same way as they - // always did. + // @sect4{LaplaceProblem::setup_system} + // + // This function is again an almost + // verbatim copy of what we already did in + // step-6. The first change is that we + // append the Dirichlet boundary conditions + // to the ConstraintMatrix object, which we + // consequently call just + // constraints instead of + // hanging_node_constraints. The + // second difference is that we don't + // directly build the sparsity pattern, but + // first create an intermediate object that + // we later copy into the usual + // SparsityPattern data structure, since + // this is more efficient for the problem + // with many entries per row (and different + // number of entries in different rows). In + // another slight deviation, we do not + // first build the sparsity pattern and + // then condense away constrained degrees + // of freedom, but pass the constraint + // matrix object directly to the function + // that builds the sparsity pattern. We + // disable the insertion of constrained + // entries with false as fourth + // argument in the + // DoFTools::make_sparsity_pattern + // function. All of these changes are + // explained in the introduction of this + // program. + // + // The last change, maybe hidden in plain + // sight, is that the dof_handler variable + // here is an hp object -- nevertheless all + // the function calls we had before still + // work in exactly the same way as they + // always did. template void LaplaceProblem::setup_system () { @@ -257,15 +257,15 @@ namespace Step27 constraints.clear (); DoFTools::make_hanging_node_constraints (dof_handler, - constraints); + constraints); VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction(), - constraints); + 0, + ZeroFunction(), + constraints); constraints.close (); CompressedSetSparsityPattern csp (dof_handler.n_dofs(), - dof_handler.n_dofs()); + dof_handler.n_dofs()); DoFTools::make_sparsity_pattern (dof_handler, csp, constraints, false); sparsity_pattern.copy_from (csp); @@ -274,58 +274,58 @@ namespace Step27 - // @sect4{LaplaceProblem::assemble_system} - - // This is the function that assembles the - // global matrix and right hand side vector - // from the local contributions of each - // cell. Its main working is as has been - // described in many of the tutorial programs - // before. The significant deviations are the - // ones necessary for hp finite element - // methods. In particular, that we need to - // use a collection of FEValues object - // (implemented through the hp::FEValues - // class), and that we have to eliminate - // constrained degrees of freedom already - // when copying local contributions into - // global objects. Both of these are - // explained in detail in the introduction of - // this program. - // - // One other slight complication is the fact - // that because we use different polynomial - // degrees on different cells, the matrices - // and vectors holding local contributions do - // not have the same size on all cells. At - // the beginning of the loop over all cells, - // we therefore each time have to resize them - // to the correct size (given by - // dofs_per_cell). Because these - // classes are implement in such a way that - // reducing the size of a matrix or vector - // does not release the currently allocated - // memory (unless the new size is zero), the - // process of resizing at the beginning of - // the loop will only require re-allocation - // of memory during the first few - // iterations. Once we have found in a cell - // with the maximal finite element degree, no - // more re-allocations will happen because - // all subsequent reinit calls - // will only set the size to something that - // fits the currently allocated memory. This - // is important since allocating memory is - // expensive, and doing so every time we - // visit a new cell would take significant - // compute time. + // @sect4{LaplaceProblem::assemble_system} + + // This is the function that assembles the + // global matrix and right hand side vector + // from the local contributions of each + // cell. Its main working is as has been + // described in many of the tutorial programs + // before. The significant deviations are the + // ones necessary for hp finite element + // methods. In particular, that we need to + // use a collection of FEValues object + // (implemented through the hp::FEValues + // class), and that we have to eliminate + // constrained degrees of freedom already + // when copying local contributions into + // global objects. Both of these are + // explained in detail in the introduction of + // this program. + // + // One other slight complication is the fact + // that because we use different polynomial + // degrees on different cells, the matrices + // and vectors holding local contributions do + // not have the same size on all cells. At + // the beginning of the loop over all cells, + // we therefore each time have to resize them + // to the correct size (given by + // dofs_per_cell). Because these + // classes are implement in such a way that + // reducing the size of a matrix or vector + // does not release the currently allocated + // memory (unless the new size is zero), the + // process of resizing at the beginning of + // the loop will only require re-allocation + // of memory during the first few + // iterations. Once we have found in a cell + // with the maximal finite element degree, no + // more re-allocations will happen because + // all subsequent reinit calls + // will only set the size to something that + // fits the currently allocated memory. This + // is important since allocating memory is + // expensive, and doing so every time we + // visit a new cell would take significant + // compute time. template void LaplaceProblem::assemble_system () { hp::FEValues hp_fe_values (fe_collection, - quadrature_collection, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + quadrature_collection, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); const RightHandSide rhs_function; @@ -339,171 +339,171 @@ namespace Step27 endc = dof_handler.end(); for (; cell!=endc; ++cell) { - const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; + const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; - cell_matrix.reinit (dofs_per_cell, dofs_per_cell); - cell_matrix = 0; + cell_matrix.reinit (dofs_per_cell, dofs_per_cell); + cell_matrix = 0; - cell_rhs.reinit (dofs_per_cell); - cell_rhs = 0; + cell_rhs.reinit (dofs_per_cell); + cell_rhs = 0; - hp_fe_values.reinit (cell); + hp_fe_values.reinit (cell); - const FEValues &fe_values = hp_fe_values.get_present_fe_values (); + const FEValues &fe_values = hp_fe_values.get_present_fe_values (); - std::vector rhs_values (fe_values.n_quadrature_points); - rhs_function.value_list (fe_values.get_quadrature_points(), - rhs_values); + std::vector rhs_values (fe_values.n_quadrature_points); + rhs_function.value_list (fe_values.get_quadrature_points(), + rhs_values); - for (unsigned int q_point=0; - q_pointget_dof_indices (local_dof_indices); + local_dof_indices.resize (dofs_per_cell); + cell->get_dof_indices (local_dof_indices); - constraints.distribute_local_to_global (cell_matrix, cell_rhs, - local_dof_indices, - system_matrix, system_rhs); + constraints.distribute_local_to_global (cell_matrix, cell_rhs, + local_dof_indices, + system_matrix, system_rhs); } - // Now with the loop over all cells - // finished, we are done for this - // function. The steps we still had to do - // at this point in earlier tutorial - // programs, namely condensing hanging - // node constraints and applying - // Dirichlet boundary conditions, have - // been taken care of by the - // ConstraintMatrix object - // constraints on the fly. + // Now with the loop over all cells + // finished, we are done for this + // function. The steps we still had to do + // at this point in earlier tutorial + // programs, namely condensing hanging + // node constraints and applying + // Dirichlet boundary conditions, have + // been taken care of by the + // ConstraintMatrix object + // constraints on the fly. } - // @sect4{LaplaceProblem::solve} + // @sect4{LaplaceProblem::solve} - // The function solving the linear system is - // entirely unchanged from previous - // examples. We simply try to reduce the - // initial residual (which equals the $l_2$ - // norm of the right hand side) by a certain - // factor: + // The function solving the linear system is + // entirely unchanged from previous + // examples. We simply try to reduce the + // initial residual (which equals the $l_2$ + // norm of the right hand side) by a certain + // factor: template void LaplaceProblem::solve () { SolverControl solver_control (system_rhs.size(), - 1e-8*system_rhs.l2_norm()); + 1e-8*system_rhs.l2_norm()); SolverCG<> cg (solver_control); PreconditionSSOR<> preconditioner; preconditioner.initialize(system_matrix, 1.2); cg.solve (system_matrix, solution, system_rhs, - preconditioner); + preconditioner); constraints.distribute (solution); } - // @sect4{LaplaceProblem::postprocess} + // @sect4{LaplaceProblem::postprocess} - // After solving the linear system, we will - // want to postprocess the solution. Here, - // all we do is to estimate the error, - // estimate the local smoothness of the - // solution as described in the introduction, - // then write graphical output, and finally - // refine the mesh in both $h$ and $p$ - // according to the indicators computed - // before. We do all this in the same - // function because we want the estimated - // error and smoothness indicators not only - // for refinement, but also include them in - // the graphical output. + // After solving the linear system, we will + // want to postprocess the solution. Here, + // all we do is to estimate the error, + // estimate the local smoothness of the + // solution as described in the introduction, + // then write graphical output, and finally + // refine the mesh in both $h$ and $p$ + // according to the indicators computed + // before. We do all this in the same + // function because we want the estimated + // error and smoothness indicators not only + // for refinement, but also include them in + // the graphical output. template void LaplaceProblem::postprocess (const unsigned int cycle) { - // Let us start with computing estimated - // error and smoothness indicators, which - // each are one number for each active cell - // of our triangulation. For the error - // indicator, we use the - // KellyErrorEstimator class as - // always. Estimating the smoothness is - // done in the respective function of this - // class; that function is discussed - // further down below: + // Let us start with computing estimated + // error and smoothness indicators, which + // each are one number for each active cell + // of our triangulation. For the error + // indicator, we use the + // KellyErrorEstimator class as + // always. Estimating the smoothness is + // done in the respective function of this + // class; that function is discussed + // further down below: Vector estimated_error_per_cell (triangulation.n_active_cells()); KellyErrorEstimator::estimate (dof_handler, - face_quadrature_collection, - typename FunctionMap::type(), - solution, - estimated_error_per_cell); + face_quadrature_collection, + typename FunctionMap::type(), + solution, + estimated_error_per_cell); Vector smoothness_indicators (triangulation.n_active_cells()); estimate_smoothness (smoothness_indicators); - // Next we want to generate graphical - // output. In addition to the two estimated - // quantities derived above, we would also - // like to output the polynomial degree of - // the finite elements used on each of the - // elements on the mesh. - // - // The way to do that requires that we loop - // over all cells and poll the active - // finite element index of them using - // cell-@>active_fe_index(). We - // then use the result of this operation - // and query the finite element collection - // for the finite element with that index, - // and finally determine the polynomial - // degree of that element. The result we - // put into a vector with one element per - // cell. The DataOut class requires this to - // be a vector of float or - // double, even though our - // values are all integers, so that it what - // we use: + // Next we want to generate graphical + // output. In addition to the two estimated + // quantities derived above, we would also + // like to output the polynomial degree of + // the finite elements used on each of the + // elements on the mesh. + // + // The way to do that requires that we loop + // over all cells and poll the active + // finite element index of them using + // cell-@>active_fe_index(). We + // then use the result of this operation + // and query the finite element collection + // for the finite element with that index, + // and finally determine the polynomial + // degree of that element. The result we + // put into a vector with one element per + // cell. The DataOut class requires this to + // be a vector of float or + // double, even though our + // values are all integers, so that it what + // we use: { Vector fe_degrees (triangulation.n_active_cells()); { - typename hp::DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (unsigned int index=0; cell!=endc; ++cell, ++index) - fe_degrees(index) - = fe_collection[cell->active_fe_index()].degree; + typename hp::DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (unsigned int index=0; cell!=endc; ++cell, ++index) + fe_degrees(index) + = fe_collection[cell->active_fe_index()].degree; } - // With now all data vectors available -- - // solution, estimated errors and - // smoothness indicators, and finite - // element degrees --, we create a - // DataOut object for graphical output - // and attach all data. Note that the - // DataOut class has a second template - // argument (which defaults to - // DoFHandler@, which is why we - // have never seen it in previous - // tutorial programs) that indicates the - // type of DoF handler to be used. Here, - // we have to use the hp::DoFHandler - // class: + // With now all data vectors available -- + // solution, estimated errors and + // smoothness indicators, and finite + // element degrees --, we create a + // DataOut object for graphical output + // and attach all data. Note that the + // DataOut class has a second template + // argument (which defaults to + // DoFHandler@, which is why we + // have never seen it in previous + // tutorial programs) that indicates the + // type of DoF handler to be used. Here, + // we have to use the hp::DoFHandler + // class: DataOut > data_out; data_out.attach_dof_handler (dof_handler); @@ -513,129 +513,129 @@ namespace Step27 data_out.add_data_vector (fe_degrees, "fe_degree"); data_out.build_patches (); - // The final step in generating - // output is to determine a file - // name, open the file, and write - // the data into it (here, we use - // VTK format): + // The final step in generating + // output is to determine a file + // name, open the file, and write + // the data into it (here, we use + // VTK format): const std::string filename = "solution-" + - Utilities::int_to_string (cycle, 2) + - ".vtk"; + Utilities::int_to_string (cycle, 2) + + ".vtk"; std::ofstream output (filename.c_str()); data_out.write_vtk (output); } - // After this, we would like to actually - // refine the mesh, in both $h$ and - // $p$. The way we are going to do this is - // as follows: first, we use the estimated - // error to flag those cells for refinement - // that have the largest error. This is - // what we have always done: + // After this, we would like to actually + // refine the mesh, in both $h$ and + // $p$. The way we are going to do this is + // as follows: first, we use the estimated + // error to flag those cells for refinement + // that have the largest error. This is + // what we have always done: { GridRefinement::refine_and_coarsen_fixed_number (triangulation, - estimated_error_per_cell, - 0.3, 0.03); - - // Next we would like to figure out which - // of the cells that have been flagged - // for refinement should actually have - // $p$ increased instead of $h$ - // decreased. The strategy we choose here - // is that we look at the smoothness - // indicators of those cells that are - // flagged for refinement, and increase - // $p$ for those with a smoothness larger - // than a certain threshold. For this, we - // first have to determine the maximal - // and minimal values of the smoothness - // indicators of all flagged cells, which - // we do using a loop over all cells and - // comparing current minimal and maximal - // values. (We start with the minimal and - // maximal values of all cells, a - // range within which the minimal and - // maximal values on cells flagged for - // refinement must surely lie.) Absent - // any better strategies, we will then - // set the threshold above which will - // increase $p$ instead of reducing $h$ - // as the mean value between minimal and - // maximal smoothness indicators on cells - // flagged for refinement: + estimated_error_per_cell, + 0.3, 0.03); + + // Next we would like to figure out which + // of the cells that have been flagged + // for refinement should actually have + // $p$ increased instead of $h$ + // decreased. The strategy we choose here + // is that we look at the smoothness + // indicators of those cells that are + // flagged for refinement, and increase + // $p$ for those with a smoothness larger + // than a certain threshold. For this, we + // first have to determine the maximal + // and minimal values of the smoothness + // indicators of all flagged cells, which + // we do using a loop over all cells and + // comparing current minimal and maximal + // values. (We start with the minimal and + // maximal values of all cells, a + // range within which the minimal and + // maximal values on cells flagged for + // refinement must surely lie.) Absent + // any better strategies, we will then + // set the threshold above which will + // increase $p$ instead of reducing $h$ + // as the mean value between minimal and + // maximal smoothness indicators on cells + // flagged for refinement: float max_smoothness = *std::min_element (smoothness_indicators.begin(), - smoothness_indicators.end()), - min_smoothness = *std::max_element (smoothness_indicators.begin(), - smoothness_indicators.end()); + smoothness_indicators.end()), + min_smoothness = *std::max_element (smoothness_indicators.begin(), + smoothness_indicators.end()); { - typename hp::DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (unsigned int index=0; cell!=endc; ++cell, ++index) - if (cell->refine_flag_set()) - { - max_smoothness = std::max (max_smoothness, - smoothness_indicators(index)); - min_smoothness = std::min (min_smoothness, - smoothness_indicators(index)); - } + typename hp::DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (unsigned int index=0; cell!=endc; ++cell, ++index) + if (cell->refine_flag_set()) + { + max_smoothness = std::max (max_smoothness, + smoothness_indicators(index)); + min_smoothness = std::min (min_smoothness, + smoothness_indicators(index)); + } } const float threshold_smoothness = (max_smoothness + min_smoothness) / 2; - // With this, we can go back, loop over - // all cells again, and for those cells - // for which (i) the refinement flag is - // set, (ii) the smoothness indicator is - // larger than the threshold, and (iii) - // we still have a finite element with a - // polynomial degree higher than the - // current one in the finite element - // collection, we then increase the - // polynomial degree and in return remove - // the flag indicating that the cell - // should undergo bisection. For all - // other cells, the refinement flags - // remain untouched: + // With this, we can go back, loop over + // all cells again, and for those cells + // for which (i) the refinement flag is + // set, (ii) the smoothness indicator is + // larger than the threshold, and (iii) + // we still have a finite element with a + // polynomial degree higher than the + // current one in the finite element + // collection, we then increase the + // polynomial degree and in return remove + // the flag indicating that the cell + // should undergo bisection. For all + // other cells, the refinement flags + // remain untouched: { - typename hp::DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (unsigned int index=0; cell!=endc; ++cell, ++index) - if (cell->refine_flag_set() - && - (smoothness_indicators(index) > threshold_smoothness) - && - (cell->active_fe_index()+1 < fe_collection.size())) - { - cell->clear_refine_flag(); - cell->set_active_fe_index (cell->active_fe_index() + 1); - } + typename hp::DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (unsigned int index=0; cell!=endc; ++cell, ++index) + if (cell->refine_flag_set() + && + (smoothness_indicators(index) > threshold_smoothness) + && + (cell->active_fe_index()+1 < fe_collection.size())) + { + cell->clear_refine_flag(); + cell->set_active_fe_index (cell->active_fe_index() + 1); + } } - // At the end of this procedure, we then - // refine the mesh. During this process, - // children of cells undergoing bisection - // inherit their mother cell's finite - // element index: + // At the end of this procedure, we then + // refine the mesh. During this process, + // children of cells undergoing bisection + // inherit their mother cell's finite + // element index: triangulation.execute_coarsening_and_refinement (); } } - // @sect4{LaplaceProblem::create_coarse_grid} - - // The following function is used when - // creating the initial grid. It is a - // specialization for the 2d case, i.e. a - // corresponding function needs to be - // implemented if the program is run in - // anything other then 2d. The function is - // actually stolen from step-14 and generates - // the same mesh used already there, i.e. the - // square domain with the square hole in the - // middle. The meaning of the different parts - // of this function are explained in the - // documentation of step-14: + // @sect4{LaplaceProblem::create_coarse_grid} + + // The following function is used when + // creating the initial grid. It is a + // specialization for the 2d case, i.e. a + // corresponding function needs to be + // implemented if the program is run in + // anything other then 2d. The function is + // actually stolen from step-14 and generates + // the same mesh used already there, i.e. the + // square domain with the square hole in the + // middle. The meaning of the different parts + // of this function are explained in the + // documentation of step-14: template <> void LaplaceProblem<2>::create_coarse_grid () { @@ -643,413 +643,413 @@ namespace Step27 static const Point<2> vertices_1[] = { Point<2> (-1., -1.), - Point<2> (-1./2, -1.), - Point<2> (0., -1.), - Point<2> (+1./2, -1.), - Point<2> (+1, -1.), - - Point<2> (-1., -1./2.), - Point<2> (-1./2, -1./2.), - Point<2> (0., -1./2.), - Point<2> (+1./2, -1./2.), - Point<2> (+1, -1./2.), - - Point<2> (-1., 0.), - Point<2> (-1./2, 0.), - Point<2> (+1./2, 0.), - Point<2> (+1, 0.), - - Point<2> (-1., 1./2.), - Point<2> (-1./2, 1./2.), - Point<2> (0., 1./2.), - Point<2> (+1./2, 1./2.), - Point<2> (+1, 1./2.), - - Point<2> (-1., 1.), - Point<2> (-1./2, 1.), - Point<2> (0., 1.), - Point<2> (+1./2, 1.), - Point<2> (+1, 1.) }; + Point<2> (-1./2, -1.), + Point<2> (0., -1.), + Point<2> (+1./2, -1.), + Point<2> (+1, -1.), + + Point<2> (-1., -1./2.), + Point<2> (-1./2, -1./2.), + Point<2> (0., -1./2.), + Point<2> (+1./2, -1./2.), + Point<2> (+1, -1./2.), + + Point<2> (-1., 0.), + Point<2> (-1./2, 0.), + Point<2> (+1./2, 0.), + Point<2> (+1, 0.), + + Point<2> (-1., 1./2.), + Point<2> (-1./2, 1./2.), + Point<2> (0., 1./2.), + Point<2> (+1./2, 1./2.), + Point<2> (+1, 1./2.), + + Point<2> (-1., 1.), + Point<2> (-1./2, 1.), + Point<2> (0., 1.), + Point<2> (+1./2, 1.), + Point<2> (+1, 1.) }; const unsigned int n_vertices = sizeof(vertices_1) / sizeof(vertices_1[0]); const std::vector > vertices (&vertices_1[0], - &vertices_1[n_vertices]); + &vertices_1[n_vertices]); static const int cell_vertices[][GeometryInfo::vertices_per_cell] = {{0, 1, 5, 6}, - {1, 2, 6, 7}, - {2, 3, 7, 8}, - {3, 4, 8, 9}, - {5, 6, 10, 11}, - {8, 9, 12, 13}, - {10, 11, 14, 15}, - {12, 13, 17, 18}, - {14, 15, 19, 20}, - {15, 16, 20, 21}, - {16, 17, 21, 22}, - {17, 18, 22, 23}}; + {1, 2, 6, 7}, + {2, 3, 7, 8}, + {3, 4, 8, 9}, + {5, 6, 10, 11}, + {8, 9, 12, 13}, + {10, 11, 14, 15}, + {12, 13, 17, 18}, + {14, 15, 19, 20}, + {15, 16, 20, 21}, + {16, 17, 21, 22}, + {17, 18, 22, 23}}; const unsigned int n_cells = sizeof(cell_vertices) / sizeof(cell_vertices[0]); std::vector > cells (n_cells, CellData()); for (unsigned int i=0; i::vertices_per_cell; - ++j) - cells[i].vertices[j] = cell_vertices[i][j]; - cells[i].material_id = 0; + for (unsigned int j=0; + j::vertices_per_cell; + ++j) + cells[i].vertices[j] = cell_vertices[i][j]; + cells[i].material_id = 0; } triangulation.create_triangulation (vertices, - cells, - SubCellData()); + cells, + SubCellData()); triangulation.refine_global (3); } - // @sect4{LaplaceProblem::run} + // @sect4{LaplaceProblem::run} - // This function implements the logic of the - // program, as did the respective function in - // most of the previous programs already, see - // for example step-6. - // - // Basically, it contains the adaptive loop: - // in the first iteration create a coarse - // grid, and then set up the linear system, - // assemble it, solve, and postprocess the - // solution including mesh refinement. Then - // start over again. In the meantime, also - // output some information for those staring - // at the screen trying to figure out what - // the program does: + // This function implements the logic of the + // program, as did the respective function in + // most of the previous programs already, see + // for example step-6. + // + // Basically, it contains the adaptive loop: + // in the first iteration create a coarse + // grid, and then set up the linear system, + // assemble it, solve, and postprocess the + // solution including mesh refinement. Then + // start over again. In the meantime, also + // output some information for those staring + // at the screen trying to figure out what + // the program does: template void LaplaceProblem::run () { for (unsigned int cycle=0; cycle<6; ++cycle) { - std::cout << "Cycle " << cycle << ':' << std::endl; - - if (cycle == 0) - create_coarse_grid (); - - setup_system (); - - std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl - << " Number of constraints : " - << constraints.n_constraints() - << std::endl; - - assemble_system (); - solve (); - postprocess (cycle); + std::cout << "Cycle " << cycle << ':' << std::endl; + + if (cycle == 0) + create_coarse_grid (); + + setup_system (); + + std::cout << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl + << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << std::endl + << " Number of constraints : " + << constraints.n_constraints() + << std::endl; + + assemble_system (); + solve (); + postprocess (cycle); } } - // @sect4{LaplaceProblem::estimate_smoothness} + // @sect4{LaplaceProblem::estimate_smoothness} - // This last function of significance - // implements the algorithm to estimate the - // smoothness exponent using the algorithms - // explained in detail in the - // introduction. We will therefore only - // comment on those points that are of - // implementational importance. + // This last function of significance + // implements the algorithm to estimate the + // smoothness exponent using the algorithms + // explained in detail in the + // introduction. We will therefore only + // comment on those points that are of + // implementational importance. template void LaplaceProblem:: estimate_smoothness (Vector &smoothness_indicators) const { - // The first thing we need to do is - // to define the Fourier vectors - // ${\bf k}$ for which we want to - // compute Fourier coefficients of - // the solution on each cell. In - // 2d, we pick those vectors ${\bf - // k}=(\pi i, \pi j)^T$ for which - // $\sqrt{i^2+j^2}\le N$, with - // $i,j$ integers and $N$ being the - // maximal polynomial degree we use - // for the finite elements in this - // program. The 3d case is handled - // analogously. 1d and dimensions - // higher than 3 are not - // implemented, and we guard our - // implementation by making sure - // that we receive an exception in - // case someone tries to compile - // the program for any of these - // dimensions. - // - // We exclude ${\bf k}=0$ to avoid problems - // computing $|{\bf k}|^{-mu}$ and $\ln - // |{\bf k}|$. The other vectors are stored - // in the field k_vectors. In - // addition, we store the square of the - // magnitude of each of these vectors (up - // to a factor $\pi^2$) in the - // k_vectors_magnitude array - // -- we will need that when we attempt to - // find out which of those Fourier - // coefficients corresponding to Fourier - // vectors of the same magnitude is the - // largest: + // The first thing we need to do is + // to define the Fourier vectors + // ${\bf k}$ for which we want to + // compute Fourier coefficients of + // the solution on each cell. In + // 2d, we pick those vectors ${\bf + // k}=(\pi i, \pi j)^T$ for which + // $\sqrt{i^2+j^2}\le N$, with + // $i,j$ integers and $N$ being the + // maximal polynomial degree we use + // for the finite elements in this + // program. The 3d case is handled + // analogously. 1d and dimensions + // higher than 3 are not + // implemented, and we guard our + // implementation by making sure + // that we receive an exception in + // case someone tries to compile + // the program for any of these + // dimensions. + // + // We exclude ${\bf k}=0$ to avoid problems + // computing $|{\bf k}|^{-mu}$ and $\ln + // |{\bf k}|$. The other vectors are stored + // in the field k_vectors. In + // addition, we store the square of the + // magnitude of each of these vectors (up + // to a factor $\pi^2$) in the + // k_vectors_magnitude array + // -- we will need that when we attempt to + // find out which of those Fourier + // coefficients corresponding to Fourier + // vectors of the same magnitude is the + // largest: const unsigned int N = max_degree; std::vector > k_vectors; std::vector k_vectors_magnitude; switch (dim) { - case 2: - { - for (unsigned int i=0; i(numbers::PI * i, - numbers::PI * j)); - k_vectors_magnitude.push_back (i*i+j*j); - } - - break; - } - - case 3: - { - for (unsigned int i=0; i(numbers::PI * i, - numbers::PI * j, - numbers::PI * k)); - k_vectors_magnitude.push_back (i*i+j*j+k*k); - } - - break; - } - - default: - Assert (false, ExcNotImplemented()); + case 2: + { + for (unsigned int i=0; i(numbers::PI * i, + numbers::PI * j)); + k_vectors_magnitude.push_back (i*i+j*j); + } + + break; + } + + case 3: + { + for (unsigned int i=0; i(numbers::PI * i, + numbers::PI * j, + numbers::PI * k)); + k_vectors_magnitude.push_back (i*i+j*j+k*k); + } + + break; + } + + default: + Assert (false, ExcNotImplemented()); } - // After we have set up the Fourier - // vectors, we also store their total - // number for simplicity, and compute the - // logarithm of the magnitude of each of - // these vectors since we will need it many - // times over further down below: + // After we have set up the Fourier + // vectors, we also store their total + // number for simplicity, and compute the + // logarithm of the magnitude of each of + // these vectors since we will need it many + // times over further down below: const unsigned n_fourier_modes = k_vectors.size(); std::vector ln_k (n_fourier_modes); for (unsigned int i=0; i > > fourier_transform_matrices (fe_collection.size()); - // In order to compute them, we of - // course can't perform the Fourier - // transform analytically, but have - // to approximate it using - // quadrature. To this end, we use - // a quadrature formula that is - // obtained by iterating a 2-point - // Gauss formula as many times as - // the maximal exponent we use for - // the term $e^{i{\bf k}\cdot{\bf - // x}}$: + // In order to compute them, we of + // course can't perform the Fourier + // transform analytically, but have + // to approximate it using + // quadrature. To this end, we use + // a quadrature formula that is + // obtained by iterating a 2-point + // Gauss formula as many times as + // the maximal exponent we use for + // the term $e^{i{\bf k}\cdot{\bf + // x}}$: QGauss<1> base_quadrature (2); QIterated quadrature (base_quadrature, N); - // With this, we then loop over all finite - // elements in use, reinitialize the - // respective matrix ${\cal F}$ to the - // right size, and integrate each entry of - // the matrix numerically as ${\cal - // F}_{{\bf k},j}=\sum_q e^{i{\bf k}\cdot - // {\bf x}}\varphi_j({\bf x}_q) - // w_q$, where $x_q$ - // are the quadrature points and $w_q$ are - // the quadrature weights. Note that the - // imaginary unit $i=\sqrt{-1}$ is obtained - // from the standard C++ classes using - // std::complex@(0,1). - - // Because we work on the unit cell, we can - // do all this work without a mapping from - // reference to real cell and consequently - // do not need the FEValues class. + // With this, we then loop over all finite + // elements in use, reinitialize the + // respective matrix ${\cal F}$ to the + // right size, and integrate each entry of + // the matrix numerically as ${\cal + // F}_{{\bf k},j}=\sum_q e^{i{\bf k}\cdot + // {\bf x}}\varphi_j({\bf x}_q) + // w_q$, where $x_q$ + // are the quadrature points and $w_q$ are + // the quadrature weights. Note that the + // imaginary unit $i=\sqrt{-1}$ is obtained + // from the standard C++ classes using + // std::complex@(0,1). + + // Because we work on the unit cell, we can + // do all this work without a mapping from + // reference to real cell and consequently + // do not need the FEValues class. for (unsigned int fe=0; fe sum = 0; - for (unsigned int q=0; q x_q = quadrature.point(q); - sum += std::exp(std::complex(0,1) * - (k_vectors[k] * x_q)) * - fe_collection[fe].shape_value(j,x_q) * - quadrature.weight(q); - } - fourier_transform_matrices[fe](k,j) - = sum / std::pow(2*numbers::PI, 1.*dim/2); - } + fourier_transform_matrices[fe].reinit (n_fourier_modes, + fe_collection[fe].dofs_per_cell); + + for (unsigned int k=0; k sum = 0; + for (unsigned int q=0; q x_q = quadrature.point(q); + sum += std::exp(std::complex(0,1) * + (k_vectors[k] * x_q)) * + fe_collection[fe].shape_value(j,x_q) * + quadrature.weight(q); + } + fourier_transform_matrices[fe](k,j) + = sum / std::pow(2*numbers::PI, 1.*dim/2); + } } - // The next thing is to loop over all cells - // and do our work there, i.e. to locally - // do the Fourier transform and estimate - // the decay coefficient. We will use the - // following two arrays as scratch arrays - // in the loop and allocate them here to - // avoid repeated memory allocations: + // The next thing is to loop over all cells + // and do our work there, i.e. to locally + // do the Fourier transform and estimate + // the decay coefficient. We will use the + // following two arrays as scratch arrays + // in the loop and allocate them here to + // avoid repeated memory allocations: std::vector > fourier_coefficients (n_fourier_modes); Vector local_dof_values; - // Then here is the loop: + // Then here is the loop: typename hp::DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), endc = dof_handler.end(); for (unsigned int index=0; cell!=endc; ++cell, ++index) { - // Inside the loop, we first need to - // get the values of the local degrees - // of freedom (which we put into the - // local_dof_values array - // after setting it to the right size) - // and then need to compute the Fourier - // transform by multiplying this vector - // with the matrix ${\cal F}$ - // corresponding to this finite - // element. We need to write out the - // multiplication by hand because the - // objects holding the data do not have - // vmult-like functions - // declared: - local_dof_values.reinit (cell->get_fe().dofs_per_cell); - cell->get_dof_values (solution, local_dof_values); - - for (unsigned int f=0; fget_fe().dofs_per_cell; ++i) - fourier_coefficients[f] += - fourier_transform_matrices[cell->active_fe_index()](f,i) - * - local_dof_values(i); - } - - // The next thing, as explained in the - // introduction, is that we wanted to - // only fit our exponential decay of - // Fourier coefficients to the largest - // coefficients for each possible value - // of $|{\bf k}|$. To this end, we - // create a map that for each magnitude - // $|{\bf k}|$ stores the largest $|\hat - // U_{{\bf k}}|$ found so far, i.e. we - // overwrite the existing value (or add - // it to the map) if no value for the - // current $|{\bf k}|$ exists yet, or if - // the current value is larger than the - // previously stored one: - std::map k_to_max_U_map; - for (unsigned int f=0; flocal_dof_values array + // after setting it to the right size) + // and then need to compute the Fourier + // transform by multiplying this vector + // with the matrix ${\cal F}$ + // corresponding to this finite + // element. We need to write out the + // multiplication by hand because the + // objects holding the data do not have + // vmult-like functions + // declared: + local_dof_values.reinit (cell->get_fe().dofs_per_cell); + cell->get_dof_values (solution, local_dof_values); + + for (unsigned int f=0; fget_fe().dofs_per_cell; ++i) + fourier_coefficients[f] += + fourier_transform_matrices[cell->active_fe_index()](f,i) + * + local_dof_values(i); + } + + // The next thing, as explained in the + // introduction, is that we wanted to + // only fit our exponential decay of + // Fourier coefficients to the largest + // coefficients for each possible value + // of $|{\bf k}|$. To this end, we + // create a map that for each magnitude + // $|{\bf k}|$ stores the largest $|\hat + // U_{{\bf k}}|$ found so far, i.e. we + // overwrite the existing value (or add + // it to the map) if no value for the + // current $|{\bf k}|$ exists yet, or if + // the current value is larger than the + // previously stored one: + std::map k_to_max_U_map; + for (unsigned int f=0; ftry block and catch whatever - // exceptions are thrown, thereby producing - // meaningful output if anything should go - // wrong: + // The main function is again verbatim what + // we had before: wrap creating and running + // an object of the main class into a + // try block and catch whatever + // exceptions are thrown, thereby producing + // meaningful output if anything should go + // wrong: int main () { try @@ -1065,25 +1065,25 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-28/step-28.cc b/deal.II/examples/step-28/step-28.cc index 1d27bde4bf..3cf757a6b6 100644 --- a/deal.II/examples/step-28/step-28.cc +++ b/deal.II/examples/step-28/step-28.cc @@ -82,94 +82,94 @@ #include #include - // The last step is as in all - // previous programs: + // The last step is as in all + // previous programs: namespace Step28 { using namespace dealii; - // @sect3{Material data} - - // First up, we need to define a - // class that provides material data - // (including diffusion coefficients, - // removal cross sections, scattering - // cross sections, fission cross - // sections and fission spectra) to - // the main class. - // - // The parameter to the constructor - // determines for how many energy - // groups we set up the relevant - // tables. At present, this program - // only includes data for 2 energy - // groups, but a more sophisticated - // program may be able to initialize - // the data structures for more - // groups as well, depending on how - // many energy groups are selected in - // the parameter file. - // - // For each of the different - // coefficient types, there is one - // function that returns the value of - // this coefficient for a particular - // energy group (or combination of - // energy groups, as for the - // distribution cross section - // $\chi_g\nu\Sigma_{f,g'}$ or - // scattering cross section - // $\Sigma_{s,g'\to g}$). In addition - // to the energy group or groups, - // these coefficients depend on the - // type of fuel or control rod, as - // explained in the introduction. The - // functions therefore take an - // additional parameter, @p - // material_id, that identifies the - // particular kind of rod. Within - // this program, we use - // n_materials=8 - // different kinds of rods. - // - // Except for the scattering cross - // section, each of the coefficients - // therefore can be represented as an - // entry in a two-dimensional array - // of floating point values indexed - // by the energy group number as well - // as the material ID. The Table - // class template is the ideal way to - // store such data. Finally, the - // scattering coefficient depends on - // both two energy group indices and - // therefore needs to be stored in a - // three-dimensional array, for which - // we again use the Table class, - // where this time the first template - // argument (denoting the - // dimensionality of the array) of - // course needs to be three: + // @sect3{Material data} + + // First up, we need to define a + // class that provides material data + // (including diffusion coefficients, + // removal cross sections, scattering + // cross sections, fission cross + // sections and fission spectra) to + // the main class. + // + // The parameter to the constructor + // determines for how many energy + // groups we set up the relevant + // tables. At present, this program + // only includes data for 2 energy + // groups, but a more sophisticated + // program may be able to initialize + // the data structures for more + // groups as well, depending on how + // many energy groups are selected in + // the parameter file. + // + // For each of the different + // coefficient types, there is one + // function that returns the value of + // this coefficient for a particular + // energy group (or combination of + // energy groups, as for the + // distribution cross section + // $\chi_g\nu\Sigma_{f,g'}$ or + // scattering cross section + // $\Sigma_{s,g'\to g}$). In addition + // to the energy group or groups, + // these coefficients depend on the + // type of fuel or control rod, as + // explained in the introduction. The + // functions therefore take an + // additional parameter, @p + // material_id, that identifies the + // particular kind of rod. Within + // this program, we use + // n_materials=8 + // different kinds of rods. + // + // Except for the scattering cross + // section, each of the coefficients + // therefore can be represented as an + // entry in a two-dimensional array + // of floating point values indexed + // by the energy group number as well + // as the material ID. The Table + // class template is the ideal way to + // store such data. Finally, the + // scattering coefficient depends on + // both two energy group indices and + // therefore needs to be stored in a + // three-dimensional array, for which + // we again use the Table class, + // where this time the first template + // argument (denoting the + // dimensionality of the array) of + // course needs to be three: class MaterialData { public: MaterialData (const unsigned int n_groups); double get_diffusion_coefficient (const unsigned int group, - const unsigned int material_id) const; + const unsigned int material_id) const; double get_removal_XS (const unsigned int group, - const unsigned int material_id) const; + const unsigned int material_id) const; double get_fission_XS (const unsigned int group, - const unsigned int material_id) const; + const unsigned int material_id) const; double get_fission_dist_XS (const unsigned int group_1, - const unsigned int group_2, - const unsigned int material_id) const; + const unsigned int group_2, + const unsigned int material_id) const; double get_scattering_XS (const unsigned int group_1, - const unsigned int group_2, - const unsigned int material_id) const; + const unsigned int group_2, + const unsigned int material_id) const; double get_fission_spectrum (const unsigned int group, - const unsigned int material_id) const; + const unsigned int material_id) const; private: const unsigned int n_groups; @@ -182,122 +182,122 @@ namespace Step28 Table<2,double> chi; }; - // The constructor of the class is - // used to initialize all the - // material data arrays. It takes the - // number of energy groups as an - // argument (an throws an error if - // that value is not equal to two, - // since at presently only data for - // two energy groups is implemented; - // however, using this, the function - // remains flexible and extendible - // into the future). In the member - // initialization part at the - // beginning, it also resizes the - // arrays to their correct sizes. - // - // At present, material data is - // stored for 8 different types of - // material. This, as well, may - // easily be extended in the future. + // The constructor of the class is + // used to initialize all the + // material data arrays. It takes the + // number of energy groups as an + // argument (an throws an error if + // that value is not equal to two, + // since at presently only data for + // two energy groups is implemented; + // however, using this, the function + // remains flexible and extendible + // into the future). In the member + // initialization part at the + // beginning, it also resizes the + // arrays to their correct sizes. + // + // At present, material data is + // stored for 8 different types of + // material. This, as well, may + // easily be extended in the future. MaterialData::MaterialData (const unsigned int n_groups) - : - n_groups (n_groups), - n_materials (8), - diffusion (n_materials, n_groups), - sigma_r (n_materials, n_groups), - nu_sigma_f (n_materials, n_groups), - sigma_s (n_materials, n_groups, n_groups), - chi (n_materials, n_groups) + : + n_groups (n_groups), + n_materials (8), + diffusion (n_materials, n_groups), + sigma_r (n_materials, n_groups), + nu_sigma_f (n_materials, n_groups), + sigma_s (n_materials, n_groups, n_groups), + chi (n_materials, n_groups) { switch (n_groups) { - case 2: - { - for (unsigned int m=0; mEnergyGroup class} - - // The first interesting class is the - // one that contains everything that - // is specific to a single energy - // group. To group things that belong - // together into individual objects, - // we declare a structure that holds - // the Triangulation and DoFHandler - // objects for the mesh used for a - // single energy group, and a number - // of other objects and member - // functions that we will discuss in - // the following sections. - // - // The main reason for this class is - // as follows: for both the forward - // problem (with a specified right - // hand side) as well as for the - // eigenvalue problem, one typically - // solves a sequence of problems for - // a single energy group each, rather - // than the fully coupled - // problem. This becomes - // understandable once one realizes - // that the system matrix for a - // single energy group is symmetric - // and positive definite (it is - // simply a diffusion operator), - // whereas the matrix for the fully - // coupled problem is generally - // nonsymmetric and not definite. It - // is also very large and quite full - // if more than a few energy groups - // are involved. - // - // Let us first look at the equation - // to solve in the case of an - // external right hand side (for the time - // independent case): - // @f{eqnarray*} - // -\nabla \cdot(D_g(x) \nabla \phi_g(x)) - // + - // \Sigma_{r,g}(x)\phi_g(x) - // = - // \chi_g\sum_{g'=1}^G\nu\Sigma_{f,g'}(x)\phi_{g'}(x) - // + - // \sum_{g'\ne g}\Sigma_{s,g'\to g}(x)\phi_{g'}(x) - // + - // s_{\mathrm{ext},g}(x) - // @f} - // - // We would typically solve this - // equation by moving all the terms - // on the right hand side with $g'=g$ - // to the left hand side, and solving - // for $\phi_g$. Of course, we don't - // know $\phi_{g'}$ yet, since the - // equations for those variables - // include right hand side terms - // involving $\phi_g$. What one - // typically does in such situations - // is to iterate: compute - // @f{eqnarray*} - // -\nabla \cdot(D_g(x) \nabla \phi^{(n)}_g(x)) - // &+& - // \Sigma_{r,g}(x)\phi^{(n)}_g(x) - // \\ &=& - // \chi_g\sum_{g'=1}^{g-1}\nu\Sigma_{f,g'}(x)\phi^{(n)}_{g'}(x) - // + - // \chi_g\sum_{g'=g}^G\nu\Sigma_{f,g'}(x)\phi^{(n-1)}_{g'}(x) - // + - // \sum_{g'\ne g, g'g}\Sigma_{s,g'\to g}(x)\phi^{(n-1)}_{g'}(x) - // + - // s_{\mathrm{ext},g}(x) - // @f} - // - // In other words, we solve the - // equation one by one, using values - // for $\phi_{g'}$ from the previous - // iteration $n-1$ if $g'\ge g$ and - // already computed values for - // $\phi_{g'}$ from the present - // iteration if $g'EnergyGroup class} + + // The first interesting class is the + // one that contains everything that + // is specific to a single energy + // group. To group things that belong + // together into individual objects, + // we declare a structure that holds + // the Triangulation and DoFHandler + // objects for the mesh used for a + // single energy group, and a number + // of other objects and member + // functions that we will discuss in + // the following sections. + // + // The main reason for this class is + // as follows: for both the forward + // problem (with a specified right + // hand side) as well as for the + // eigenvalue problem, one typically + // solves a sequence of problems for + // a single energy group each, rather + // than the fully coupled + // problem. This becomes + // understandable once one realizes + // that the system matrix for a + // single energy group is symmetric + // and positive definite (it is + // simply a diffusion operator), + // whereas the matrix for the fully + // coupled problem is generally + // nonsymmetric and not definite. It + // is also very large and quite full + // if more than a few energy groups + // are involved. + // + // Let us first look at the equation + // to solve in the case of an + // external right hand side (for the time + // independent case): + // @f{eqnarray*} + // -\nabla \cdot(D_g(x) \nabla \phi_g(x)) + // + + // \Sigma_{r,g}(x)\phi_g(x) + // = + // \chi_g\sum_{g'=1}^G\nu\Sigma_{f,g'}(x)\phi_{g'}(x) + // + + // \sum_{g'\ne g}\Sigma_{s,g'\to g}(x)\phi_{g'}(x) + // + + // s_{\mathrm{ext},g}(x) + // @f} + // + // We would typically solve this + // equation by moving all the terms + // on the right hand side with $g'=g$ + // to the left hand side, and solving + // for $\phi_g$. Of course, we don't + // know $\phi_{g'}$ yet, since the + // equations for those variables + // include right hand side terms + // involving $\phi_g$. What one + // typically does in such situations + // is to iterate: compute + // @f{eqnarray*} + // -\nabla \cdot(D_g(x) \nabla \phi^{(n)}_g(x)) + // &+& + // \Sigma_{r,g}(x)\phi^{(n)}_g(x) + // \\ &=& + // \chi_g\sum_{g'=1}^{g-1}\nu\Sigma_{f,g'}(x)\phi^{(n)}_{g'}(x) + // + + // \chi_g\sum_{g'=g}^G\nu\Sigma_{f,g'}(x)\phi^{(n-1)}_{g'}(x) + // + + // \sum_{g'\ne g, g'g}\Sigma_{s,g'\to g}(x)\phi^{(n-1)}_{g'}(x) + // + + // s_{\mathrm{ext},g}(x) + // @f} + // + // In other words, we solve the + // equation one by one, using values + // for $\phi_{g'}$ from the previous + // iteration $n-1$ if $g'\ge g$ and + // already computed values for + // $\phi_{g'}$ from the present + // iteration if $g' class EnergyGroup { public: - // @sect5{Public member functions} - // - // The class has a good number of - // public member functions, since - // its the way it operates is - // controlled from the outside, - // and therefore all functions - // that do something significant - // need to be called from another - // class. Let's start off with - // book-keeping: the class - // obviously needs to know which - // energy group it represents, - // which material data to use, - // and from what coarse grid to - // start. The constructor takes - // this information and - // initializes the relevant - // member variables with that - // (see below). - // - // Then we also need functions - // that set up the linear system, - // i.e. correctly size the matrix - // and its sparsity pattern, etc, - // given a finite element object - // to use. The - // setup_linear_system - // function does that. Finally, - // for this initial block, there - // are two functions that return - // the number of active cells and - // degrees of freedom used in - // this object -- using this, we - // can make the triangulation and - // DoF handler member variables - // private, and do not have to - // grant external use to it, - // enhancing encapsulation: + // @sect5{Public member functions} + // + // The class has a good number of + // public member functions, since + // its the way it operates is + // controlled from the outside, + // and therefore all functions + // that do something significant + // need to be called from another + // class. Let's start off with + // book-keeping: the class + // obviously needs to know which + // energy group it represents, + // which material data to use, + // and from what coarse grid to + // start. The constructor takes + // this information and + // initializes the relevant + // member variables with that + // (see below). + // + // Then we also need functions + // that set up the linear system, + // i.e. correctly size the matrix + // and its sparsity pattern, etc, + // given a finite element object + // to use. The + // setup_linear_system + // function does that. Finally, + // for this initial block, there + // are two functions that return + // the number of active cells and + // degrees of freedom used in + // this object -- using this, we + // can make the triangulation and + // DoF handler member variables + // private, and do not have to + // grant external use to it, + // enhancing encapsulation: EnergyGroup (const unsigned int group, - const MaterialData &material_data, - const Triangulation &coarse_grid, - const FiniteElement &fe); + const MaterialData &material_data, + const Triangulation &coarse_grid, + const FiniteElement &fe); void setup_linear_system (); unsigned int n_active_cells () const; unsigned int n_dofs () const; - // Then there are functions that - // assemble the linear system for - // each iteration and the present - // energy group. Note that the - // matrix is independent of the - // iteration number, so only has - // to be computed once for each - // refinement cycle. The - // situation is a bit more - // involved for the right hand - // side that has to be updated in - // each inverse power iteration, - // and that is further - // complicated by the fact that - // computing it may involve - // several different meshes as - // explained in the - // introduction. To make things - // more flexible with regard to - // solving the forward or the - // eigenvalue problem, we split - // the computation of the right - // hand side into a function that - // assembles the extraneous - // source and in-group - // contributions (which we will - // call with a zero function as - // source terms for the - // eigenvalue problem) and one - // that computes contributions to - // the right hand side from - // another energy group: + // Then there are functions that + // assemble the linear system for + // each iteration and the present + // energy group. Note that the + // matrix is independent of the + // iteration number, so only has + // to be computed once for each + // refinement cycle. The + // situation is a bit more + // involved for the right hand + // side that has to be updated in + // each inverse power iteration, + // and that is further + // complicated by the fact that + // computing it may involve + // several different meshes as + // explained in the + // introduction. To make things + // more flexible with regard to + // solving the forward or the + // eigenvalue problem, we split + // the computation of the right + // hand side into a function that + // assembles the extraneous + // source and in-group + // contributions (which we will + // call with a zero function as + // source terms for the + // eigenvalue problem) and one + // that computes contributions to + // the right hand side from + // another energy group: void assemble_system_matrix (); void assemble_ingroup_rhs (const Function &extraneous_source); void assemble_cross_group_rhs (const EnergyGroup &g_prime); - // Next we need a set of - // functions that actually - // compute the solution of a - // linear system, and do - // something with it (such as - // computing the fission source - // contribution mentioned in the - // introduction, writing - // graphical information to an - // output file, computing error - // indicators, or actually - // refining the grid based on - // these criteria and thresholds - // for refinement and - // coarsening). All these - // functions will later be called - // from the driver class - // NeutronDiffusionProblem, - // or any other class you may - // want to implement to solve a - // problem involving the neutron - // flux equations: + // Next we need a set of + // functions that actually + // compute the solution of a + // linear system, and do + // something with it (such as + // computing the fission source + // contribution mentioned in the + // introduction, writing + // graphical information to an + // output file, computing error + // indicators, or actually + // refining the grid based on + // these criteria and thresholds + // for refinement and + // coarsening). All these + // functions will later be called + // from the driver class + // NeutronDiffusionProblem, + // or any other class you may + // want to implement to solve a + // problem involving the neutron + // flux equations: void solve (); double get_fission_source () const; @@ -627,56 +627,56 @@ namespace Step28 void estimate_errors (Vector &error_indicators) const; void refine_grid (const Vector &error_indicators, - const double refine_threshold, - const double coarsen_threshold); - - // @sect5{Public data members} - // - // As is good practice in object - // oriented programming, we hide - // most data members by making - // them private. However, we have - // to grant the class that drives - // the process access to the - // solution vector as well as the - // solution of the previous - // iteration, since in the power - // iteration, the solution vector - // is scaled in every iteration - // by the present guess of the - // eigenvalue we are looking for: + const double refine_threshold, + const double coarsen_threshold); + + // @sect5{Public data members} + // + // As is good practice in object + // oriented programming, we hide + // most data members by making + // them private. However, we have + // to grant the class that drives + // the process access to the + // solution vector as well as the + // solution of the previous + // iteration, since in the power + // iteration, the solution vector + // is scaled in every iteration + // by the present guess of the + // eigenvalue we are looking for: public: Vector solution; Vector solution_old; - // @sect5{Private data members} - // - // The rest of the data members - // are private. Compared to all - // the previous tutorial - // programs, the only new data - // members are an integer storing - // which energy group this object - // represents, and a reference to - // the material data object that - // this object's constructor gets - // passed from the driver - // class. Likewise, the - // constructor gets a reference - // to the finite element object - // we are to use. - // - // Finally, we have to apply - // boundary values to the linear - // system in each iteration, - // i.e. quite frequently. Rather - // than interpolating them every - // time, we interpolate them once - // on each new mesh and then - // store them along with all the - // other data of this class: + // @sect5{Private data members} + // + // The rest of the data members + // are private. Compared to all + // the previous tutorial + // programs, the only new data + // members are an integer storing + // which energy group this object + // represents, and a reference to + // the material data object that + // this object's constructor gets + // passed from the driver + // class. Likewise, the + // constructor gets a reference + // to the finite element object + // we are to use. + // + // Finally, we have to apply + // boundary values to the linear + // system in each iteration, + // i.e. quite frequently. Rather + // than interpolating them every + // time, we interpolate them once + // on each new mesh and then + // store them along with all the + // other data of this class: private: const unsigned int group; @@ -695,64 +695,64 @@ namespace Step28 ConstraintMatrix hanging_node_constraints; - // @sect5{Private member functionss} - // - // There is one private member - // function in this class. It - // recursively walks over cells - // of two meshes to compute the - // cross-group right hand side - // terms. The algorithm for this - // is explained in the - // introduction to this - // program. The arguments to this - // function are a reference to an - // object representing the energy - // group against which we want to - // integrate a right hand side - // term, an iterator to a cell of - // the mesh used for the present - // energy group, an iterator to a - // corresponding cell on the - // other mesh, and the matrix - // that interpolates the degrees - // of freedom from the coarser of - // the two cells to the finer - // one: + // @sect5{Private member functionss} + // + // There is one private member + // function in this class. It + // recursively walks over cells + // of two meshes to compute the + // cross-group right hand side + // terms. The algorithm for this + // is explained in the + // introduction to this + // program. The arguments to this + // function are a reference to an + // object representing the energy + // group against which we want to + // integrate a right hand side + // term, an iterator to a cell of + // the mesh used for the present + // energy group, an iterator to a + // corresponding cell on the + // other mesh, and the matrix + // that interpolates the degrees + // of freedom from the coarser of + // the two cells to the finer + // one: private: void assemble_cross_group_rhs_recursive (const EnergyGroup &g_prime, - const typename DoFHandler::cell_iterator &cell_g, - const typename DoFHandler::cell_iterator &cell_g_prime, - const FullMatrix prolongation_matrix); + const typename DoFHandler::cell_iterator &cell_g, + const typename DoFHandler::cell_iterator &cell_g_prime, + const FullMatrix prolongation_matrix); }; - // @sect4{Implementation of the EnergyGroup class} - - // The first few functions of this - // class are mostly - // self-explanatory. The constructor - // only sets a few data members and - // creates a copy of the given - // triangulation as the base for the - // triangulation used for this energy - // group. The next two functions - // simply return data from private - // data members, thereby enabling us - // to make these data members - // private. + // @sect4{Implementation of the EnergyGroup class} + + // The first few functions of this + // class are mostly + // self-explanatory. The constructor + // only sets a few data members and + // creates a copy of the given + // triangulation as the base for the + // triangulation used for this energy + // group. The next two functions + // simply return data from private + // data members, thereby enabling us + // to make these data members + // private. template EnergyGroup::EnergyGroup (const unsigned int group, - const MaterialData &material_data, - const Triangulation &coarse_grid, - const FiniteElement &fe) - : - group (group), - material_data (material_data), - fe (fe), - dof_handler (triangulation) + const MaterialData &material_data, + const Triangulation &coarse_grid, + const FiniteElement &fe) + : + group (group), + material_data (material_data), + fe (fe), + dof_handler (triangulation) { triangulation.copy_triangulation (coarse_grid); dof_handler.distribute_dofs (fe); @@ -778,27 +778,27 @@ namespace Step28 - // @sect5{EnergyGroup::setup_linear_system} - // - // The first "real" function is the - // one that sets up the mesh, - // matrices, etc, on the new mesh or - // after mesh refinement. We use this - // function to initialize sparse - // system matrices, and the right - // hand side vector. If the solution - // vector has never been set before - // (as indicated by a zero size), we - // also initialize it and set it to a - // default value. We don't do that if - // it already has a non-zero size - // (i.e. this function is called - // after mesh refinement) since in - // that case we want to preserve the - // solution across mesh refinement - // (something we do in the - // EnergyGroup::refine_grid - // function). + // @sect5{EnergyGroup::setup_linear_system} + // + // The first "real" function is the + // one that sets up the mesh, + // matrices, etc, on the new mesh or + // after mesh refinement. We use this + // function to initialize sparse + // system matrices, and the right + // hand side vector. If the solution + // vector has never been set before + // (as indicated by a zero size), we + // also initialize it and set it to a + // default value. We don't do that if + // it already has a non-zero size + // (i.e. this function is called + // after mesh refinement) since in + // that case we want to preserve the + // solution across mesh refinement + // (something we do in the + // EnergyGroup::refine_grid + // function). template void EnergyGroup::setup_linear_system () @@ -807,13 +807,13 @@ namespace Step28 hanging_node_constraints.clear (); DoFTools::make_hanging_node_constraints (dof_handler, - hanging_node_constraints); + hanging_node_constraints); hanging_node_constraints.close (); system_matrix.clear (); sparsity_pattern.reinit (n_dofs, n_dofs, - dof_handler.max_couplings_between_dofs()); + dof_handler.max_couplings_between_dofs()); DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); hanging_node_constraints.condense (sparsity_pattern); sparsity_pattern.compress (); @@ -824,100 +824,100 @@ namespace Step28 if (solution.size() == 0) { - solution.reinit (n_dofs); - solution_old.reinit(n_dofs); - solution_old = 1.0; - solution = solution_old; + solution.reinit (n_dofs); + solution_old.reinit(n_dofs); + solution_old = 1.0; + solution = solution_old; } - // At the end of this function, we - // update the list of boundary - // nodes and their values, by first - // clearing this list and the - // re-interpolating boundary values - // (remember that this function is - // called after first setting up - // the mesh, and each time after - // mesh refinement). - // - // To understand the code, it is - // necessary to realize that we - // create the mesh using the - // GridGenerator::subdivided_hyper_rectangle - // function (in - // NeutronDiffusionProblem::initialize_problem) - // where we set the last parameter - // to true. This means that - // boundaries of the domain are - // "colored", i.e. the four (or - // six, in 3d) sides of the domain - // are assigned different boundary - // indicators. As it turns out, the - // bottom boundary gets indicator - // zero, the top one boundary - // indicator one, and left and - // right boundaries get indicators - // two and three, respectively. - // - // In this program, we simulate - // only one, namely the top right, - // quarter of a reactor. That is, - // we want to interpolate boundary - // conditions only on the top and - // right boundaries, while do - // nothing on the bottom and left - // boundaries (i.e. impose natural, - // no-flux Neumann boundary - // conditions). This is most easily - // generalized to arbitrary - // dimension by saying that we want - // to interpolate on those - // boundaries with indicators 1, 3, - // ..., which we do in the - // following loop (note that calls - // to - // VectorTools::interpolate_boundary_values - // are additive, i.e. they do not - // first clear the boundary value - // map): + // At the end of this function, we + // update the list of boundary + // nodes and their values, by first + // clearing this list and the + // re-interpolating boundary values + // (remember that this function is + // called after first setting up + // the mesh, and each time after + // mesh refinement). + // + // To understand the code, it is + // necessary to realize that we + // create the mesh using the + // GridGenerator::subdivided_hyper_rectangle + // function (in + // NeutronDiffusionProblem::initialize_problem) + // where we set the last parameter + // to true. This means that + // boundaries of the domain are + // "colored", i.e. the four (or + // six, in 3d) sides of the domain + // are assigned different boundary + // indicators. As it turns out, the + // bottom boundary gets indicator + // zero, the top one boundary + // indicator one, and left and + // right boundaries get indicators + // two and three, respectively. + // + // In this program, we simulate + // only one, namely the top right, + // quarter of a reactor. That is, + // we want to interpolate boundary + // conditions only on the top and + // right boundaries, while do + // nothing on the bottom and left + // boundaries (i.e. impose natural, + // no-flux Neumann boundary + // conditions). This is most easily + // generalized to arbitrary + // dimension by saying that we want + // to interpolate on those + // boundaries with indicators 1, 3, + // ..., which we do in the + // following loop (note that calls + // to + // VectorTools::interpolate_boundary_values + // are additive, i.e. they do not + // first clear the boundary value + // map): boundary_values.clear(); for (unsigned int i=0; i(), - boundary_values); + 2*i+1, + ZeroFunction(), + boundary_values); } - // @sect5{EnergyGroup::assemble_system_matrix} - // - // Next we need functions assembling - // the system matrix and right hand - // sides. Assembling the matrix is - // straightforward given the - // equations outlined in the - // introduction as well as what we've - // seen in previous example - // programs. Note the use of - // cell->material_id() to get at - // the kind of material from which a - // cell is made up of. Note also how - // we set the order of the quadrature - // formula so that it is always - // appropriate for the finite element - // in use. - // - // Finally, note that since we only - // assemble the system matrix here, - // we can't yet eliminate boundary - // values (we need the right hand - // side vector for this). We defer - // this to the EnergyGroup::solve - // function, at which point all the - // information is available. + // @sect5{EnergyGroup::assemble_system_matrix} + // + // Next we need functions assembling + // the system matrix and right hand + // sides. Assembling the matrix is + // straightforward given the + // equations outlined in the + // introduction as well as what we've + // seen in previous example + // programs. Note the use of + // cell->material_id() to get at + // the kind of material from which a + // cell is made up of. Note also how + // we set the order of the quadrature + // formula so that it is always + // appropriate for the finite element + // in use. + // + // Finally, note that since we only + // assemble the system matrix here, + // we can't yet eliminate boundary + // values (we need the right hand + // side vector for this). We defer + // this to the EnergyGroup::solve + // function, at which point all the + // information is available. template void EnergyGroup::assemble_system_matrix () @@ -925,8 +925,8 @@ namespace Step28 const QGauss quadrature_formula(fe.degree + 1); FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_JxW_values); + update_values | update_gradients | + update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -942,35 +942,35 @@ namespace Step28 for (; cell!=endc; ++cell) { - cell_matrix = 0; - - fe_values.reinit (cell); - - const double diffusion_coefficient - = material_data.get_diffusion_coefficient (group, cell->material_id()); - const double removal_XS - = material_data.get_removal_XS (group,cell->material_id()); - - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - - for (unsigned int i=0; imaterial_id()); + const double removal_XS + = material_data.get_removal_XS (group,cell->material_id()); + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + + for (unsigned int i=0; iEnergyGroup::assemble_ingroup_rhs} - // - // As explained in the documentation - // of the EnergyGroup class, we - // split assembling the right hand - // side into two parts: the ingroup - // and the cross-group - // couplings. First, we need a - // function to assemble the right - // hand side of one specific group - // here, i.e. including an extraneous - // source (that we will set to zero - // for the eigenvalue problem) as - // well as the ingroup fission - // contributions. (In-group - // scattering has already been - // accounted for with the definition - // of removal cross section.) The - // function's workings are pretty - // standard as far as assembling - // right hand sides go, and therefore - // does not require more comments - // except that we mention that the - // right hand side vector is set to - // zero at the beginning of the - // function -- something we are not - // going to do for the cross-group - // terms that simply add to the right - // hand side vector. + // @sect5{EnergyGroup::assemble_ingroup_rhs} + // + // As explained in the documentation + // of the EnergyGroup class, we + // split assembling the right hand + // side into two parts: the ingroup + // and the cross-group + // couplings. First, we need a + // function to assemble the right + // hand side of one specific group + // here, i.e. including an extraneous + // source (that we will set to zero + // for the eigenvalue problem) as + // well as the ingroup fission + // contributions. (In-group + // scattering has already been + // accounted for with the definition + // of removal cross section.) The + // function's workings are pretty + // standard as far as assembling + // right hand sides go, and therefore + // does not require more comments + // except that we mention that the + // right hand side vector is set to + // zero at the beginning of the + // function -- something we are not + // going to do for the cross-group + // terms that simply add to the right + // hand side vector. template void EnergyGroup::assemble_ingroup_rhs (const Function &extraneous_source) { @@ -1018,8 +1018,8 @@ namespace Step28 const unsigned int n_q_points = quadrature_formula.size(); FEValues fe_values (fe, quadrature_formula, - update_values | update_quadrature_points | - update_JxW_values); + update_values | update_quadrature_points | + update_JxW_values); Vector cell_rhs (dofs_per_cell); std::vector extraneous_source_values (n_q_points); @@ -1033,61 +1033,61 @@ namespace Step28 for (; cell!=endc; ++cell) { - cell_rhs = 0; + cell_rhs = 0; - fe_values.reinit (cell); + fe_values.reinit (cell); - const double fission_dist_XS - = material_data.get_fission_dist_XS (group, group, cell->material_id()); + const double fission_dist_XS + = material_data.get_fission_dist_XS (group, group, cell->material_id()); - extraneous_source.value_list (fe_values.get_quadrature_points(), - extraneous_source_values); + extraneous_source.value_list (fe_values.get_quadrature_points(), + extraneous_source_values); - fe_values.get_function_values (solution_old, solution_old_values); + fe_values.get_function_values (solution_old, solution_old_values); - cell->get_dof_indices (local_dof_indices); + cell->get_dof_indices (local_dof_indices); - for (unsigned int q_point=0; q_pointEnergyGroup::assemble_cross_group_rhs} - // - // The more interesting function for - // assembling the right hand side - // vector for the equation of a - // single energy group is the one - // that couples energy group $g$ and - // $g'$. As explained in the - // introduction, we first have to - // find the set of cells common to - // the meshes of the two energy - // groups. First we call - // get_finest_common_cells to - // obtain this list of pairs of - // common cells from both - // meshes. Both cells in a pair may - // not be active but at least one of - // them is. We then hand each of - // these cell pairs off to a function - // tha computes the right hand side - // terms recursively. - // - // Note that ingroup coupling is - // handled already before, so we exit - // the function early if $g=g'$. + // @sect5{EnergyGroup::assemble_cross_group_rhs} + // + // The more interesting function for + // assembling the right hand side + // vector for the equation of a + // single energy group is the one + // that couples energy group $g$ and + // $g'$. As explained in the + // introduction, we first have to + // find the set of cells common to + // the meshes of the two energy + // groups. First we call + // get_finest_common_cells to + // obtain this list of pairs of + // common cells from both + // meshes. Both cells in a pair may + // not be active but at least one of + // them is. We then hand each of + // these cell pairs off to a function + // tha computes the right hand side + // terms recursively. + // + // Note that ingroup coupling is + // handled already before, so we exit + // the function early if $g=g'$. template void EnergyGroup::assemble_cross_group_rhs (const EnergyGroup &g_prime) { @@ -1098,7 +1098,7 @@ namespace Step28 typename DoFHandler::cell_iterator> > cell_list = GridTools::get_finest_common_cells (dof_handler, - g_prime.dof_handler); + g_prime.dof_handler); typename std::list::cell_iterator, typename DoFHandler::cell_iterator> > @@ -1107,236 +1107,236 @@ namespace Step28 for (; cell_iter!=cell_list.end(); ++cell_iter) { - FullMatrix unit_matrix (fe.dofs_per_cell); - for (unsigned int i=0; ifirst, - cell_iter->second, - unit_matrix); + FullMatrix unit_matrix (fe.dofs_per_cell); + for (unsigned int i=0; ifirst, + cell_iter->second, + unit_matrix); } } - // @sect5{EnergyGroup::assemble_cross_group_rhs_recursive} - // - // This is finally the function that - // handles assembling right hand side - // terms on potentially different - // meshes recursively, using the - // algorithm described in the - // introduction. The function takes a - // reference to the object - // representing energy group $g'$, as - // well as iterators to corresponding - // cells in the meshes for energy - // groups $g$ and $g'$. At first, - // i.e. when this function is called - // from the one above, these two - // cells will be matching cells on - // two meshes; however, one of the - // two may be further refined, and we - // will call the function recursively - // with one of the two iterators - // replaced by one of the children of - // the original cell. - // - // The last argument is the matrix - // product matrix $B_{c^{(k)}}^T - // \cdots B_{c'}^T B_c^T$ from the - // introduction that interpolates - // from the coarser of the two cells - // to the finer one. If the two cells - // match, then this is the identity - // matrix -- exactly what we pass to - // this function initially. - // - // The function has to consider two - // cases: that both of the two cells - // are not further refined, i.e. have - // no children, in which case we can - // finally assemble the right hand - // side contributions of this pair of - // cells; and that one of the two - // cells is further refined, in which - // case we have to keep recursing by - // looping over the children of the - // one cell that is not active. These - // two cases will be discussed below: + // @sect5{EnergyGroup::assemble_cross_group_rhs_recursive} + // + // This is finally the function that + // handles assembling right hand side + // terms on potentially different + // meshes recursively, using the + // algorithm described in the + // introduction. The function takes a + // reference to the object + // representing energy group $g'$, as + // well as iterators to corresponding + // cells in the meshes for energy + // groups $g$ and $g'$. At first, + // i.e. when this function is called + // from the one above, these two + // cells will be matching cells on + // two meshes; however, one of the + // two may be further refined, and we + // will call the function recursively + // with one of the two iterators + // replaced by one of the children of + // the original cell. + // + // The last argument is the matrix + // product matrix $B_{c^{(k)}}^T + // \cdots B_{c'}^T B_c^T$ from the + // introduction that interpolates + // from the coarser of the two cells + // to the finer one. If the two cells + // match, then this is the identity + // matrix -- exactly what we pass to + // this function initially. + // + // The function has to consider two + // cases: that both of the two cells + // are not further refined, i.e. have + // no children, in which case we can + // finally assemble the right hand + // side contributions of this pair of + // cells; and that one of the two + // cells is further refined, in which + // case we have to keep recursing by + // looping over the children of the + // one cell that is not active. These + // two cases will be discussed below: template void EnergyGroup:: assemble_cross_group_rhs_recursive (const EnergyGroup &g_prime, - const typename DoFHandler::cell_iterator &cell_g, - const typename DoFHandler::cell_iterator &cell_g_prime, - const FullMatrix prolongation_matrix) + const typename DoFHandler::cell_iterator &cell_g, + const typename DoFHandler::cell_iterator &cell_g_prime, + const FullMatrix prolongation_matrix) { - // The first case is that both - // cells are no further refined. In - // that case, we can assemble the - // relevant terms (see the - // introduction). This involves - // assembling the mass matrix on - // the finer of the two cells (in - // fact there are two mass matrices - // with different coefficients, one - // for the fission distribution - // cross section - // $\chi_g\nu\Sigma_{f,g'}$ and one - // for the scattering cross section - // $\Sigma_{s,g'\to g}$). This is - // straight forward, but note how - // we determine which of the two - // cells is ther finer one by - // looking at the refinement level - // of the two cells: + // The first case is that both + // cells are no further refined. In + // that case, we can assemble the + // relevant terms (see the + // introduction). This involves + // assembling the mass matrix on + // the finer of the two cells (in + // fact there are two mass matrices + // with different coefficients, one + // for the fission distribution + // cross section + // $\chi_g\nu\Sigma_{f,g'}$ and one + // for the scattering cross section + // $\Sigma_{s,g'\to g}$). This is + // straight forward, but note how + // we determine which of the two + // cells is ther finer one by + // looking at the refinement level + // of the two cells: if (!cell_g->has_children() && !cell_g_prime->has_children()) { - const QGauss quadrature_formula (fe.degree+1); - const unsigned int n_q_points = quadrature_formula.size(); - - FEValues fe_values (fe, quadrature_formula, - update_values | update_JxW_values); - - if (cell_g->level() > cell_g_prime->level()) - fe_values.reinit (cell_g); - else - fe_values.reinit (cell_g_prime); - - const double fission_dist_XS - = material_data.get_fission_dist_XS (group, g_prime.group, - cell_g_prime->material_id()); - - const double scattering_XS - = material_data.get_scattering_XS (g_prime.group, group, - cell_g_prime->material_id()); - - FullMatrix local_mass_matrix_f (fe.dofs_per_cell, - fe.dofs_per_cell); - FullMatrix local_mass_matrix_g (fe.dofs_per_cell, - fe.dofs_per_cell); - - for (unsigned int q_point=0; q_pointvmult - // function, or the product with the - // transpose matrix using Tvmult. - // After doing so, we transfer the - // result into the global right hand - // side vector of energy group $g$. - Vector g_prime_new_values (fe.dofs_per_cell); - Vector g_prime_old_values (fe.dofs_per_cell); - cell_g_prime->get_dof_values (g_prime.solution_old, g_prime_old_values); - cell_g_prime->get_dof_values (g_prime.solution, g_prime_new_values); - - Vector cell_rhs (fe.dofs_per_cell); - Vector tmp (fe.dofs_per_cell); - - if (cell_g->level() > cell_g_prime->level()) - { - prolongation_matrix.vmult (tmp, g_prime_old_values); - local_mass_matrix_f.vmult (cell_rhs, tmp); - - prolongation_matrix.vmult (tmp, g_prime_new_values); - local_mass_matrix_g.vmult_add (cell_rhs, tmp); - } - else - { - local_mass_matrix_f.vmult (tmp, g_prime_old_values); - prolongation_matrix.Tvmult (cell_rhs, tmp); - - local_mass_matrix_g.vmult (tmp, g_prime_new_values); - prolongation_matrix.Tvmult_add (cell_rhs, tmp); - } - - std::vector local_dof_indices (fe.dofs_per_cell); - cell_g->get_dof_indices (local_dof_indices); - - for (unsigned int i=0; i quadrature_formula (fe.degree+1); + const unsigned int n_q_points = quadrature_formula.size(); + + FEValues fe_values (fe, quadrature_formula, + update_values | update_JxW_values); + + if (cell_g->level() > cell_g_prime->level()) + fe_values.reinit (cell_g); + else + fe_values.reinit (cell_g_prime); + + const double fission_dist_XS + = material_data.get_fission_dist_XS (group, g_prime.group, + cell_g_prime->material_id()); + + const double scattering_XS + = material_data.get_scattering_XS (g_prime.group, group, + cell_g_prime->material_id()); + + FullMatrix local_mass_matrix_f (fe.dofs_per_cell, + fe.dofs_per_cell); + FullMatrix local_mass_matrix_g (fe.dofs_per_cell, + fe.dofs_per_cell); + + for (unsigned int q_point=0; q_pointvmult + // function, or the product with the + // transpose matrix using Tvmult. + // After doing so, we transfer the + // result into the global right hand + // side vector of energy group $g$. + Vector g_prime_new_values (fe.dofs_per_cell); + Vector g_prime_old_values (fe.dofs_per_cell); + cell_g_prime->get_dof_values (g_prime.solution_old, g_prime_old_values); + cell_g_prime->get_dof_values (g_prime.solution, g_prime_new_values); + + Vector cell_rhs (fe.dofs_per_cell); + Vector tmp (fe.dofs_per_cell); + + if (cell_g->level() > cell_g_prime->level()) + { + prolongation_matrix.vmult (tmp, g_prime_old_values); + local_mass_matrix_f.vmult (cell_rhs, tmp); + + prolongation_matrix.vmult (tmp, g_prime_new_values); + local_mass_matrix_g.vmult_add (cell_rhs, tmp); + } + else + { + local_mass_matrix_f.vmult (tmp, g_prime_old_values); + prolongation_matrix.Tvmult (cell_rhs, tmp); + + local_mass_matrix_g.vmult (tmp, g_prime_new_values); + prolongation_matrix.Tvmult_add (cell_rhs, tmp); + } + + std::vector local_dof_indices (fe.dofs_per_cell); + cell_g->get_dof_indices (local_dof_indices); + + for (unsigned int i=0; immult), and then hand the - // result off to this very same - // function again, but with the - // cell that has children replaced - // by one of its children: + // The alternative is that one of + // the two cells is further + // refined. In that case, we have + // to loop over all the children, + // multiply the existing + // interpolation (prolongation) + // product of matrices from the + // left with the interpolation from + // the present cell to its child + // (using the matrix-matrix + // multiplication function + // mmult), and then hand the + // result off to this very same + // function again, but with the + // cell that has children replaced + // by one of its children: else for (unsigned int child=0; child::max_children_per_cell;++child) - { - FullMatrix new_matrix (fe.dofs_per_cell, fe.dofs_per_cell); - fe.get_prolongation_matrix(child).mmult (new_matrix, - prolongation_matrix); - - if (cell_g->has_children()) - assemble_cross_group_rhs_recursive (g_prime, - cell_g->child(child), cell_g_prime, - new_matrix); - else - assemble_cross_group_rhs_recursive (g_prime, - cell_g, cell_g_prime->child(child), - new_matrix); - } + { + FullMatrix new_matrix (fe.dofs_per_cell, fe.dofs_per_cell); + fe.get_prolongation_matrix(child).mmult (new_matrix, + prolongation_matrix); + + if (cell_g->has_children()) + assemble_cross_group_rhs_recursive (g_prime, + cell_g->child(child), cell_g_prime, + new_matrix); + else + assemble_cross_group_rhs_recursive (g_prime, + cell_g, cell_g_prime->child(child), + new_matrix); + } } - // @sect5{EnergyGroup::get_fission_source} - // - // In the (inverse) power iteration, - // we use the integrated fission - // source to update the - // $k$-eigenvalue. Given its - // definition, the following function - // is essentially self-explanatory: + // @sect5{EnergyGroup::get_fission_source} + // + // In the (inverse) power iteration, + // we use the integrated fission + // source to update the + // $k$-eigenvalue. Given its + // definition, the following function + // is essentially self-explanatory: template double EnergyGroup::get_fission_source () const { @@ -1344,7 +1344,7 @@ namespace Step28 const unsigned int n_q_points = quadrature_formula.size(); FEValues fe_values (fe, quadrature_formula, - update_values | update_JxW_values); + update_values | update_JxW_values); std::vector solution_values (n_q_points); @@ -1355,46 +1355,46 @@ namespace Step28 endc = dof_handler.end(); for (; cell!=endc; ++cell) { - fe_values.reinit (cell); + fe_values.reinit (cell); - const double fission_XS - = material_data.get_fission_XS(group, cell->material_id()); + const double fission_XS + = material_data.get_fission_XS(group, cell->material_id()); - fe_values.get_function_values (solution, solution_values); + fe_values.get_function_values (solution, solution_values); - for (unsigned int q_point=0; q_pointEnergyGroup::solve} - // - // Next a function that solves the - // linear system assembled - // before. Things are pretty much - // standard, except that we delayed - // applying boundary values until we - // get here, since in all the - // previous functions we were still - // adding up contributions the right - // hand side vector. + // @sect5{EnergyGroup::solve} + // + // Next a function that solves the + // linear system assembled + // before. Things are pretty much + // standard, except that we delayed + // applying boundary values until we + // get here, since in all the + // previous functions we were still + // adding up contributions the right + // hand side vector. template void EnergyGroup::solve () { hanging_node_constraints.condense (system_rhs); MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); + system_matrix, + solution, + system_rhs); SolverControl solver_control (system_matrix.m(), - 1e-12*system_rhs.l2_norm()); + 1e-12*system_rhs.l2_norm()); SolverCG<> cg (solver_control); PreconditionSSOR<> preconditioner; @@ -1407,56 +1407,56 @@ namespace Step28 - // @sect5{EnergyGroup::estimate_errors} - // - // Mesh refinement is split into two - // functions. The first estimates the - // error for each cell, normalizes it - // by the magnitude of the solution, - // and returns it in the vector given - // as an argument. The calling - // function collects all error - // indicators from all energy groups, - // and computes thresholds for - // refining and coarsening cells. + // @sect5{EnergyGroup::estimate_errors} + // + // Mesh refinement is split into two + // functions. The first estimates the + // error for each cell, normalizes it + // by the magnitude of the solution, + // and returns it in the vector given + // as an argument. The calling + // function collects all error + // indicators from all energy groups, + // and computes thresholds for + // refining and coarsening cells. template void EnergyGroup::estimate_errors (Vector &error_indicators) const { KellyErrorEstimator::estimate (dof_handler, - QGauss (fe.degree + 1), - typename FunctionMap::type(), - solution, - error_indicators); + QGauss (fe.degree + 1), + typename FunctionMap::type(), + solution, + error_indicators); error_indicators /= solution.linfty_norm(); } - // @sect5{EnergyGroup::refine_grid} - // - // The second part is to refine the - // grid given the error indicators - // compute in the previous function - // and error thresholds above which - // cells shall be refined or below - // which cells shall be - // coarsened. Note that we do not use - // any of the functions in - // GridRefinement here, - // but rather set refinement flags - // ourselves. - // - // After setting these flags, we use - // the SolutionTransfer class to move - // the solution vector from the old - // to the new mesh. The procedure - // used here is described in detail - // in the documentation of that - // class: + // @sect5{EnergyGroup::refine_grid} + // + // The second part is to refine the + // grid given the error indicators + // compute in the previous function + // and error thresholds above which + // cells shall be refined or below + // which cells shall be + // coarsened. Note that we do not use + // any of the functions in + // GridRefinement here, + // but rather set refinement flags + // ourselves. + // + // After setting these flags, we use + // the SolutionTransfer class to move + // the solution vector from the old + // to the new mesh. The procedure + // used here is described in detail + // in the documentation of that + // class: template void EnergyGroup::refine_grid (const Vector &error_indicators, - const double refine_threshold, - const double coarsen_threshold) + const double refine_threshold, + const double coarsen_threshold) { typename Triangulation::active_cell_iterator cell = triangulation.begin_active(), @@ -1464,9 +1464,9 @@ namespace Step28 for (unsigned int cell_index=0; cell!=endc; ++cell, ++cell_index) if (error_indicators(cell_index) > refine_threshold) - cell->set_refine_flag (); + cell->set_refine_flag (); else if (error_indicators(cell_index) < coarsen_threshold) - cell->set_coarsen_flag (); + cell->set_coarsen_flag (); SolutionTransfer soltrans(dof_handler); @@ -1484,33 +1484,33 @@ namespace Step28 } - // @sect5{EnergyGroup::output_results} - // - // The last function of this class - // outputs meshes and solutions after - // each mesh iteration. This has been - // shown many times before. The only - // thing worth pointing out is the - // use of the - // Utilities::int_to_string - // function to convert an integer - // into its string - // representation. The second - // argument of that function denotes - // how many digits we shall use -- if - // this value was larger than one, - // then the number would be padded by - // leading zeros. + // @sect5{EnergyGroup::output_results} + // + // The last function of this class + // outputs meshes and solutions after + // each mesh iteration. This has been + // shown many times before. The only + // thing worth pointing out is the + // use of the + // Utilities::int_to_string + // function to convert an integer + // into its string + // representation. The second + // argument of that function denotes + // how many digits we shall use -- if + // this value was larger than one, + // then the number would be padded by + // leading zeros. template void EnergyGroup::output_results (const unsigned int cycle) const { { const std::string filename = std::string("grid-") + - Utilities::int_to_string(group,1) + - "." + - Utilities::int_to_string(cycle,1) + - ".eps"; + Utilities::int_to_string(group,1) + + "." + + Utilities::int_to_string(cycle,1) + + ".eps"; std::ofstream output (filename.c_str()); GridOut grid_out; @@ -1519,10 +1519,10 @@ namespace Step28 { const std::string filename = std::string("solution-") + - Utilities::int_to_string(group,1) + - "." + - Utilities::int_to_string(cycle,1) + - ".gmv"; + Utilities::int_to_string(group,1) + + "." + + Utilities::int_to_string(cycle,1) + + ".gmv"; DataOut data_out; @@ -1537,76 +1537,76 @@ namespace Step28 - // @sect3{The NeutronDiffusionProblem class template} - - // This is the main class of the - // program, not because it implements - // all the functionality (in fact, - // most of it is implemented in the - // EnergyGroup class) - // but because it contains the - // driving algorithm that determines - // what to compute and when. It is - // mostly as shown in many of the - // other tutorial programs in that it - // has a public run - // function and private functions - // doing all the rest. In several - // places, we have to do something - // for all energy groups, in which - // case we will start threads for - // each group to let these things run - // in parallel if deal.II was - // configured for multithreading. - // For strategies of parallelization, - // take a look at the @ref threads module. - // - // The biggest difference to previous - // example programs is that we also - // declare a nested class that has - // member variables for all the - // run-time parameters that can be - // passed to the program in an input - // file. Right now, these are the - // number of energy groups, the - // number of refinement cycles, the - // polynomial degree of the finite - // element to be used, and the - // tolerance used to determine when - // convergence of the inverse power - // iteration has occurred. In - // addition, we have a constructor of - // this class that sets all these - // values to their default values, a - // function - // declare_parameters - // that described to the - // ParameterHandler class already - // used in step-19 - // what parameters are accepted in - // the input file, and a function - // get_parameters that - // can extract the values of these - // parameters from a ParameterHandler - // object. + // @sect3{The NeutronDiffusionProblem class template} + + // This is the main class of the + // program, not because it implements + // all the functionality (in fact, + // most of it is implemented in the + // EnergyGroup class) + // but because it contains the + // driving algorithm that determines + // what to compute and when. It is + // mostly as shown in many of the + // other tutorial programs in that it + // has a public run + // function and private functions + // doing all the rest. In several + // places, we have to do something + // for all energy groups, in which + // case we will start threads for + // each group to let these things run + // in parallel if deal.II was + // configured for multithreading. + // For strategies of parallelization, + // take a look at the @ref threads module. + // + // The biggest difference to previous + // example programs is that we also + // declare a nested class that has + // member variables for all the + // run-time parameters that can be + // passed to the program in an input + // file. Right now, these are the + // number of energy groups, the + // number of refinement cycles, the + // polynomial degree of the finite + // element to be used, and the + // tolerance used to determine when + // convergence of the inverse power + // iteration has occurred. In + // addition, we have a constructor of + // this class that sets all these + // values to their default values, a + // function + // declare_parameters + // that described to the + // ParameterHandler class already + // used in step-19 + // what parameters are accepted in + // the input file, and a function + // get_parameters that + // can extract the values of these + // parameters from a ParameterHandler + // object. template class NeutronDiffusionProblem { public: class Parameters { - public: - Parameters (); + public: + Parameters (); - static void declare_parameters (ParameterHandler &prm); - void get_parameters (ParameterHandler &prm); + static void declare_parameters (ParameterHandler &prm); + void get_parameters (ParameterHandler &prm); - unsigned int n_groups; - unsigned int n_refinement_cycles; + unsigned int n_groups; + unsigned int n_refinement_cycles; - unsigned int fe_degree; + unsigned int fe_degree; - double convergence_tolerance; + double convergence_tolerance; }; @@ -1617,18 +1617,18 @@ namespace Step28 void run (); private: - // @sect5{Private member functions} - - // There are not that many member - // functions in this class since - // most of the functionality has - // been moved into the - // EnergyGroup class - // and is simply called from the - // run() member - // function of this class. The - // ones that remain have - // self-explanatory names: + // @sect5{Private member functions} + + // There are not that many member + // functions in this class since + // most of the functionality has + // been moved into the + // EnergyGroup class + // and is simply called from the + // run() member + // function of this class. The + // ones that remain have + // self-explanatory names: void initialize_problem(); void refine_grid (); @@ -1636,72 +1636,72 @@ namespace Step28 double get_total_fission_source () const; - // @sect5{Private member variables} - - // Next, we have a few member - // variables. In particular, - // these are (i) a reference to - // the parameter object (owned by - // the main function of this - // program, and passed to the - // constructor of this class), - // (ii) an object describing the - // material parameters for the - // number of energy groups - // requested in the input file, - // and (iii) the finite element - // to be used by all energy - // groups: + // @sect5{Private member variables} + + // Next, we have a few member + // variables. In particular, + // these are (i) a reference to + // the parameter object (owned by + // the main function of this + // program, and passed to the + // constructor of this class), + // (ii) an object describing the + // material parameters for the + // number of energy groups + // requested in the input file, + // and (iii) the finite element + // to be used by all energy + // groups: const Parameters ¶meters; const MaterialData material_data; FE_Q fe; - // Furthermore, we have (iv) the - // value of the computed - // eigenvalue at the present - // iteration. This is, in fact, - // the only part of the solution - // that is shared between all - // energy groups -- all other - // parts of the solution, such as - // neutron fluxes are particular - // to one or the other energy - // group, and are therefore - // stored in objects that - // describe a single energy - // group: + // Furthermore, we have (iv) the + // value of the computed + // eigenvalue at the present + // iteration. This is, in fact, + // the only part of the solution + // that is shared between all + // energy groups -- all other + // parts of the solution, such as + // neutron fluxes are particular + // to one or the other energy + // group, and are therefore + // stored in objects that + // describe a single energy + // group: double k_eff; - // Finally, (v), we have an array - // of pointers to the energy - // group objects. The length of - // this array is, of course, - // equal to the number of energy - // groups specified in the - // parameter file. + // Finally, (v), we have an array + // of pointers to the energy + // group objects. The length of + // this array is, of course, + // equal to the number of energy + // groups specified in the + // parameter file. std::vector*> energy_groups; }; - // @sect4{Implementation of the NeutronDiffusionProblem::Parameters class} + // @sect4{Implementation of the NeutronDiffusionProblem::Parameters class} - // Before going on to the - // implementation of the outer class, - // we have to implement the functions - // of the parameters structure. This - // is pretty straightforward and, in - // fact, looks pretty much the same - // for all such parameters classes - // using the ParameterHandler - // capabilities. We will therefore - // not comment further on this: + // Before going on to the + // implementation of the outer class, + // we have to implement the functions + // of the parameters structure. This + // is pretty straightforward and, in + // fact, looks pretty much the same + // for all such parameters classes + // using the ParameterHandler + // capabilities. We will therefore + // not comment further on this: template NeutronDiffusionProblem::Parameters::Parameters () - : - n_groups (2), - n_refinement_cycles (5), - fe_degree (2), - convergence_tolerance (1e-12) + : + n_groups (2), + n_refinement_cycles (5), + fe_degree (2), + convergence_tolerance (1e-12) {} @@ -1712,18 +1712,18 @@ namespace Step28 declare_parameters (ParameterHandler &prm) { prm.declare_entry ("Number of energy groups", "2", - Patterns::Integer (), - "The number of energy different groups considered"); + Patterns::Integer (), + "The number of energy different groups considered"); prm.declare_entry ("Refinement cycles", "5", - Patterns::Integer (), - "Number of refinement cycles to be performed"); + Patterns::Integer (), + "Number of refinement cycles to be performed"); prm.declare_entry ("Finite element degree", "2", - Patterns::Integer (), - "Polynomial degree of the finite element to be used"); + Patterns::Integer (), + "Polynomial degree of the finite element to be used"); prm.declare_entry ("Power iteration tolerance", "1e-12", - Patterns::Double (), - "Inner power iterations are stopped when the change in k_eff falls " - "below this tolerance"); + Patterns::Double (), + "Inner power iterations are stopped when the change in k_eff falls " + "below this tolerance"); } @@ -1742,20 +1742,20 @@ namespace Step28 - // @sect4{Implementation of the NeutronDiffusionProblem class} + // @sect4{Implementation of the NeutronDiffusionProblem class} - // Now for the - // NeutronDiffusionProblem - // class. The constructor and - // destructor have nothing of much - // interest: + // Now for the + // NeutronDiffusionProblem + // class. The constructor and + // destructor have nothing of much + // interest: template NeutronDiffusionProblem:: NeutronDiffusionProblem (const Parameters ¶meters) - : - parameters (parameters), - material_data (parameters.n_groups), - fe (parameters.fe_degree) + : + parameters (parameters), + material_data (parameters.n_groups), + fe (parameters.fe_degree) {} @@ -1769,54 +1769,54 @@ namespace Step28 energy_groups.resize (0); } - // @sect5{NeutronDiffusionProblem::initialize_problem} - // - // The first function of interest is - // the one that sets up the geometry - // of the reactor core. This is - // described in more detail in the - // introduction. - // - // The first part of the function - // defines geometry data, and then - // creates a coarse mesh that has as - // many cells as there are fuel rods - // (or pin cells, for that matter) in - // that part of the reactor core that - // we simulate. As mentioned when - // interpolating boundary values - // above, the last parameter to the - // GridGenerator::subdivided_hyper_rectangle - // function specifies that sides of - // the domain shall have unique - // boundary indicators that will - // later allow us to determine in a - // simple way which of the boundaries - // have Neumann and which have - // Dirichlet conditions attached to - // them. + // @sect5{NeutronDiffusionProblem::initialize_problem} + // + // The first function of interest is + // the one that sets up the geometry + // of the reactor core. This is + // described in more detail in the + // introduction. + // + // The first part of the function + // defines geometry data, and then + // creates a coarse mesh that has as + // many cells as there are fuel rods + // (or pin cells, for that matter) in + // that part of the reactor core that + // we simulate. As mentioned when + // interpolating boundary values + // above, the last parameter to the + // GridGenerator::subdivided_hyper_rectangle + // function specifies that sides of + // the domain shall have unique + // boundary indicators that will + // later allow us to determine in a + // simple way which of the boundaries + // have Neumann and which have + // Dirichlet conditions attached to + // them. template void NeutronDiffusionProblem::initialize_problem() { const unsigned int rods_per_assembly_x = 17, - rods_per_assembly_y = 17; + rods_per_assembly_y = 17; const double pin_pitch_x = 1.26, - pin_pitch_y = 1.26; + pin_pitch_y = 1.26; const double assembly_height = 200; const unsigned int assemblies_x = 2, - assemblies_y = 2, - assemblies_z = 1; + assemblies_y = 2, + assemblies_z = 1; const Point bottom_left = Point(); const Point upper_right = (dim == 2 - ? - Point (assemblies_x*rods_per_assembly_x*pin_pitch_x, - assemblies_y*rods_per_assembly_y*pin_pitch_y) - : - Point (assemblies_x*rods_per_assembly_x*pin_pitch_x, - assemblies_y*rods_per_assembly_y*pin_pitch_y, - assemblies_z*assembly_height)); + ? + Point (assemblies_x*rods_per_assembly_x*pin_pitch_x, + assemblies_y*rods_per_assembly_y*pin_pitch_y) + : + Point (assemblies_x*rods_per_assembly_x*pin_pitch_x, + assemblies_y*rods_per_assembly_y*pin_pitch_y, + assemblies_z*assembly_height)); std::vector n_subdivisions; n_subdivisions.push_back (assemblies_x*rods_per_assembly_x); @@ -1827,244 +1827,244 @@ namespace Step28 Triangulation coarse_grid; GridGenerator::subdivided_hyper_rectangle (coarse_grid, - n_subdivisions, - bottom_left, - upper_right, - true); - - - // The second part of the function - // deals with material numbers of - // pin cells of each type of - // assembly. Here, we define four - // different types of assembly, for - // which we describe the - // arrangement of fuel rods in the - // following tables. - // - // The assemblies described here - // are taken from the benchmark - // mentioned in the introduction - // and are (in this order): - //
    - //
  1. 'UX' Assembly: UO2 fuel assembly - // with 24 guide tubes and a central - // Moveable Fission Chamber - //
  2. 'UA' Assembly: UO2 fuel assembly - // with 24 AIC and a central - // Moveable Fission Chamber - //
  3. 'PX' Assembly: MOX fuel assembly - // with 24 guide tubes and a central - // Moveable Fission Chamber - //
  4. 'R' Assembly: a reflector. - //
- // - // Note that the numbers listed - // here and taken from the - // benchmark description are, in - // good old Fortran fashion, - // one-based. We will later - // subtract one from each number - // when assigning materials to - // individual cells to convert - // things into the C-style - // zero-based indexing. + n_subdivisions, + bottom_left, + upper_right, + true); + + + // The second part of the function + // deals with material numbers of + // pin cells of each type of + // assembly. Here, we define four + // different types of assembly, for + // which we describe the + // arrangement of fuel rods in the + // following tables. + // + // The assemblies described here + // are taken from the benchmark + // mentioned in the introduction + // and are (in this order): + //
    + //
  1. 'UX' Assembly: UO2 fuel assembly + // with 24 guide tubes and a central + // Moveable Fission Chamber + //
  2. 'UA' Assembly: UO2 fuel assembly + // with 24 AIC and a central + // Moveable Fission Chamber + //
  3. 'PX' Assembly: MOX fuel assembly + // with 24 guide tubes and a central + // Moveable Fission Chamber + //
  4. 'R' Assembly: a reflector. + //
+ // + // Note that the numbers listed + // here and taken from the + // benchmark description are, in + // good old Fortran fashion, + // one-based. We will later + // subtract one from each number + // when assigning materials to + // individual cells to convert + // things into the C-style + // zero-based indexing. const unsigned int n_assemblies=4; const unsigned int assembly_materials[n_assemblies][rods_per_assembly_x][rods_per_assembly_y] = { - { - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 5, 1, 1, 5, 1, 1, 7, 1, 1, 5, 1, 1, 5, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 } - }, - { - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 8, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 8, 1, 1, 8, 1, 1, 7, 1, 1, 8, 1, 1, 8, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 8, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, - { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 } - }, - { - { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }, - { 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2 }, - { 2, 3, 3, 3, 3, 5, 3, 3, 5, 3, 3, 5, 3, 3, 3, 3, 2 }, - { 2, 3, 3, 5, 3, 4, 4, 4, 4, 4, 4, 4, 3, 5, 3, 3, 2 }, - { 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2 }, - { 2, 3, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 3, 2 }, - { 2, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2 }, - { 2, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2 }, - { 2, 3, 5, 4, 4, 5, 4, 4, 7, 4, 4, 5, 4, 4, 5, 3, 2 }, - { 2, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2 }, - { 2, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2 }, - { 2, 3, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 3, 2 }, - { 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2 }, - { 2, 3, 3, 5, 3, 4, 4, 4, 4, 4, 4, 4, 3, 5, 3, 3, 2 }, - { 2, 3, 3, 3, 3, 5, 3, 3, 5, 3, 3, 5, 3, 3, 3, 3, 2 }, - { 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2 }, - { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 } - }, - { - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, - { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 } - } + { + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 5, 1, 1, 5, 1, 1, 7, 1, 1, 5, 1, 1, 5, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 5, 1, 1, 5, 1, 1, 5, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 } + }, + { + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 8, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 8, 1, 1, 8, 1, 1, 7, 1, 1, 8, 1, 1, 8, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 8, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, + { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 } + }, + { + { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }, + { 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2 }, + { 2, 3, 3, 3, 3, 5, 3, 3, 5, 3, 3, 5, 3, 3, 3, 3, 2 }, + { 2, 3, 3, 5, 3, 4, 4, 4, 4, 4, 4, 4, 3, 5, 3, 3, 2 }, + { 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2 }, + { 2, 3, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 3, 2 }, + { 2, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2 }, + { 2, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2 }, + { 2, 3, 5, 4, 4, 5, 4, 4, 7, 4, 4, 5, 4, 4, 5, 3, 2 }, + { 2, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2 }, + { 2, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 2 }, + { 2, 3, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 4, 4, 5, 3, 2 }, + { 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2 }, + { 2, 3, 3, 5, 3, 4, 4, 4, 4, 4, 4, 4, 3, 5, 3, 3, 2 }, + { 2, 3, 3, 3, 3, 5, 3, 3, 5, 3, 3, 5, 3, 3, 3, 3, 2 }, + { 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2 }, + { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 } + }, + { + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 } + } }; - // After the description of the - // materials that make up an - // assembly, we have to specify the - // arrangement of assemblies within - // the core. We use a symmetric - // pattern that in fact only uses - // the 'UX' and 'PX' assemblies: + // After the description of the + // materials that make up an + // assembly, we have to specify the + // arrangement of assemblies within + // the core. We use a symmetric + // pattern that in fact only uses + // the 'UX' and 'PX' assemblies: const unsigned int core[assemblies_x][assemblies_y][assemblies_z] = {{{0}, {2}}, {{2}, {0}}}; - // We are now in a position to - // actually set material IDs for - // each cell. To this end, we loop - // over all cells, look at the - // location of the cell's center, - // and determine which assembly and - // fuel rod this would be in. (We - // add a few checks to see that the - // locations we compute are within - // the bounds of the arrays in - // which we have to look up - // materials.) At the end of the - // loop, we set material - // identifiers accordingly: + // We are now in a position to + // actually set material IDs for + // each cell. To this end, we loop + // over all cells, look at the + // location of the cell's center, + // and determine which assembly and + // fuel rod this would be in. (We + // add a few checks to see that the + // locations we compute are within + // the bounds of the arrays in + // which we have to look up + // materials.) At the end of the + // loop, we set material + // identifiers accordingly: for (typename Triangulation::active_cell_iterator - cell = coarse_grid.begin_active(); - cell!=coarse_grid.end(); - ++cell) + cell = coarse_grid.begin_active(); + cell!=coarse_grid.end(); + ++cell) { - const Point cell_center = cell->center(); + const Point cell_center = cell->center(); - const unsigned int tmp_x = int(cell_center[0]/pin_pitch_x); - const unsigned int ax = tmp_x/rods_per_assembly_x; - const unsigned int cx = tmp_x - ax * rods_per_assembly_x; + const unsigned int tmp_x = int(cell_center[0]/pin_pitch_x); + const unsigned int ax = tmp_x/rods_per_assembly_x; + const unsigned int cx = tmp_x - ax * rods_per_assembly_x; - const unsigned tmp_y = int(cell_center[1]/pin_pitch_y); - const unsigned int ay = tmp_y/rods_per_assembly_y; - const unsigned int cy = tmp_y - ay * rods_per_assembly_y; + const unsigned tmp_y = int(cell_center[1]/pin_pitch_y); + const unsigned int ay = tmp_y/rods_per_assembly_y; + const unsigned int cy = tmp_y - ay * rods_per_assembly_y; - const unsigned int az = (dim == 2 - ? - 0 - : - int (cell_center[dim-1]/assembly_height)); + const unsigned int az = (dim == 2 + ? + 0 + : + int (cell_center[dim-1]/assembly_height)); - Assert (ax < assemblies_x, ExcInternalError()); - Assert (ay < assemblies_y, ExcInternalError()); - Assert (az < assemblies_z, ExcInternalError()); + Assert (ax < assemblies_x, ExcInternalError()); + Assert (ay < assemblies_y, ExcInternalError()); + Assert (az < assemblies_z, ExcInternalError()); - Assert (core[ax][ay][az] < n_assemblies, ExcInternalError()); + Assert (core[ax][ay][az] < n_assemblies, ExcInternalError()); - Assert (cx < rods_per_assembly_x, ExcInternalError()); - Assert (cy < rods_per_assembly_y, ExcInternalError()); + Assert (cx < rods_per_assembly_x, ExcInternalError()); + Assert (cy < rods_per_assembly_y, ExcInternalError()); - cell->set_material_id(assembly_materials[core[ax][ay][az]][cx][cy] - 1); + cell->set_material_id(assembly_materials[core[ax][ay][az]][cx][cy] - 1); } - // With the coarse mesh so - // initialized, we create the - // appropriate number of energy - // group objects and let them - // initialize their individual - // meshes with the coarse mesh - // generated above: + // With the coarse mesh so + // initialized, we create the + // appropriate number of energy + // group objects and let them + // initialize their individual + // meshes with the coarse mesh + // generated above: energy_groups.resize (parameters.n_groups); for (unsigned int group=0; group (group, material_data, - coarse_grid, fe); + coarse_grid, fe); } - // @sect5{NeutronDiffusionProblem::get_total_fission_source} - // - // In the eigenvalue computation, we - // need to calculate total fission - // neutron source after each power - // iteration. The total power then is - // used to renew k-effective. - // - // Since the total fission source is a sum - // over all the energy groups, and since each - // of these sums can be computed - // independently, we actually do this in - // parallel. One of the problems is that the - // function in the EnergyGroup - // class that computes the fission source - // returns a value. If we now simply spin off - // a new thread, we have to later capture the - // return value of the function run on that - // thread. The way this can be done is to use - // the return value of the - // Threads::new_thread function, which - // returns an object of type - // Threads::Thread@ if the function - // spawned returns a double. We can then later - // ask this object for the returned value - // (when doing so, the - // Threads::Thread::return_value - // function first waits for the thread to - // finish if it hasn't done so already). - // - // The way this function then works - // is to first spawn one thread for - // each energy group we work with, - // then one-by-one collecting the - // returned values of each thread and - // return the sum. + // @sect5{NeutronDiffusionProblem::get_total_fission_source} + // + // In the eigenvalue computation, we + // need to calculate total fission + // neutron source after each power + // iteration. The total power then is + // used to renew k-effective. + // + // Since the total fission source is a sum + // over all the energy groups, and since each + // of these sums can be computed + // independently, we actually do this in + // parallel. One of the problems is that the + // function in the EnergyGroup + // class that computes the fission source + // returns a value. If we now simply spin off + // a new thread, we have to later capture the + // return value of the function run on that + // thread. The way this can be done is to use + // the return value of the + // Threads::new_thread function, which + // returns an object of type + // Threads::Thread@ if the function + // spawned returns a double. We can then later + // ask this object for the returned value + // (when doing so, the + // Threads::Thread::return_value + // function first waits for the thread to + // finish if it hasn't done so already). + // + // The way this function then works + // is to first spawn one thread for + // each energy group we work with, + // then one-by-one collecting the + // returned values of each thread and + // return the sum. template double NeutronDiffusionProblem::get_total_fission_source () const { std::vector > threads; for (unsigned int group=0; group::get_fission_source, - *energy_groups[group])); + *energy_groups[group])); double fission_source = 0; for (unsigned int group=0; groupNeutronDiffusionProblem::refine_grid} - // - // The next function lets the - // individual energy group objects - // refine their meshes. Much of this, - // again, is a task that can be done - // independently in parallel: first, - // let all the energy group objects - // calculate their error indicators - // in parallel, then compute the - // maximum error indicator over all - // energy groups and determine - // thresholds for refinement and - // coarsening of cells, and then ask - // all the energy groups to refine - // their meshes accordingly, again in - // parallel. + // @sect5{NeutronDiffusionProblem::refine_grid} + // + // The next function lets the + // individual energy group objects + // refine their meshes. Much of this, + // again, is a task that can be done + // independently in parallel: first, + // let all the energy group objects + // calculate their error indicators + // in parallel, then compute the + // maximum error indicator over all + // energy groups and determine + // thresholds for refinement and + // coarsening of cells, and then ask + // all the energy groups to refine + // their meshes accordingly, again in + // parallel. template void NeutronDiffusionProblem::refine_grid () { @@ -2105,9 +2105,9 @@ namespace Step28 { Threads::ThreadGroup<> threads; for (unsigned int group=0; group::estimate_errors, - *energy_groups[group], - group_error_indicators.block(group)); + threads += Threads::new_thread (&EnergyGroup::estimate_errors, + *energy_groups[group], + group_error_indicators.block(group)); threads.join_all (); } @@ -2118,28 +2118,28 @@ namespace Step28 { Threads::ThreadGroup<> threads; for (unsigned int group=0; group::refine_grid, - *energy_groups[group], - group_error_indicators.block(group), - refine_threshold, - coarsen_threshold); + threads += Threads::new_thread (&EnergyGroup::refine_grid, + *energy_groups[group], + group_error_indicators.block(group), + refine_threshold, + coarsen_threshold); threads.join_all (); } } - // @sect5{NeutronDiffusionProblem::run} - // - // Finally, this is the function - // where the meat is: iterate on a - // sequence of meshes, and on each of - // them do a power iteration to - // compute the eigenvalue. - // - // Given the description of the - // algorithm in the introduction, - // there is actually not much to - // comment on: + // @sect5{NeutronDiffusionProblem::run} + // + // Finally, this is the function + // where the meat is: iterate on a + // sequence of meshes, and on each of + // them do a power iteration to + // compute the eigenvalue. + // + // Given the description of the + // algorithm in the introduction, + // there is actually not much to + // comment on: template void NeutronDiffusionProblem::run () { @@ -2152,121 +2152,121 @@ namespace Step28 for (unsigned int cycle=0; cyclesolution *= k_eff; - } - - for (unsigned int group=0; groupsetup_linear_system (); - - std::cout << " Numbers of active cells: "; - for (unsigned int group=0; groupn_active_cells() - << ' '; - std::cout << std::endl; - std::cout << " Numbers of degrees of freedom: "; - for (unsigned int group=0; groupn_dofs() - << ' '; - std::cout << std::endl << std::endl; - - - Threads::ThreadGroup<> threads; - for (unsigned int group=0; group::assemble_system_matrix, - *energy_groups[group]); - threads.join_all (); - - double error; - unsigned int iteration = 1; - do - { - for (unsigned int group=0; groupassemble_ingroup_rhs (ZeroFunction()); - - for (unsigned int bgroup=0; bgroupassemble_cross_group_rhs (*energy_groups[bgroup]); - - energy_groups[group]->solve (); - } - - k_eff = get_total_fission_source(); - error = fabs(k_eff-k_eff_old)/fabs(k_eff); - std::cout << " Iteration " << iteration - << ": k_eff=" << k_eff - << std::endl; - k_eff_old=k_eff; - - for (unsigned int group=0; groupsolution_old = energy_groups[group]->solution; - energy_groups[group]->solution_old /= k_eff; - } - - ++iteration; - } - while((error > parameters.convergence_tolerance) - && - (iteration < 500)); - - for (unsigned int group=0; groupoutput_results (cycle); - - std::cout << std::endl; - std::cout << " Cycle=" << cycle - << ", n_dofs=" << energy_groups[0]->n_dofs() + energy_groups[1]->n_dofs() - << ", k_eff=" << k_eff - << ", time=" << timer() - << std::endl; - - - std::cout << std::endl << std::endl; + std::cout << "Cycle " << cycle << ':' << std::endl; + + if (cycle == 0) + initialize_problem(); + else + { + refine_grid (); + for (unsigned int group=0; groupsolution *= k_eff; + } + + for (unsigned int group=0; groupsetup_linear_system (); + + std::cout << " Numbers of active cells: "; + for (unsigned int group=0; groupn_active_cells() + << ' '; + std::cout << std::endl; + std::cout << " Numbers of degrees of freedom: "; + for (unsigned int group=0; groupn_dofs() + << ' '; + std::cout << std::endl << std::endl; + + + Threads::ThreadGroup<> threads; + for (unsigned int group=0; group::assemble_system_matrix, + *energy_groups[group]); + threads.join_all (); + + double error; + unsigned int iteration = 1; + do + { + for (unsigned int group=0; groupassemble_ingroup_rhs (ZeroFunction()); + + for (unsigned int bgroup=0; bgroupassemble_cross_group_rhs (*energy_groups[bgroup]); + + energy_groups[group]->solve (); + } + + k_eff = get_total_fission_source(); + error = fabs(k_eff-k_eff_old)/fabs(k_eff); + std::cout << " Iteration " << iteration + << ": k_eff=" << k_eff + << std::endl; + k_eff_old=k_eff; + + for (unsigned int group=0; groupsolution_old = energy_groups[group]->solution; + energy_groups[group]->solution_old /= k_eff; + } + + ++iteration; + } + while((error > parameters.convergence_tolerance) + && + (iteration < 500)); + + for (unsigned int group=0; groupoutput_results (cycle); + + std::cout << std::endl; + std::cout << " Cycle=" << cycle + << ", n_dofs=" << energy_groups[0]->n_dofs() + energy_groups[1]->n_dofs() + << ", k_eff=" << k_eff + << ", time=" << timer() + << std::endl; + + + std::cout << std::endl << std::endl; } } } - // @sect3{The main() function} + // @sect3{The main() function} // - // The last thing in the program in - // the main() - // function. The structure is as in - // most other tutorial programs, with - // the only exception that we here - // handle a parameter file. To this - // end, we first look at the command - // line arguments passed to this - // function: if no input file is - // specified on the command line, - // then use "project.prm", otherwise - // take the filename given as the - // first argument on the command - // line. - // - // With this, we create a - // ParameterHandler object, let the - // NeutronDiffusionProblem::Parameters - // class declare all the parameters - // it wants to see in the input file - // (or, take the default values, if - // nothing is listed in the parameter - // file), then read the input file, - // ask the parameters object to - // extract the values, and finally - // hand everything off to an object - // of type - // NeutronDiffusionProblem - // for computation of the eigenvalue: + // The last thing in the program in + // the main() + // function. The structure is as in + // most other tutorial programs, with + // the only exception that we here + // handle a parameter file. To this + // end, we first look at the command + // line arguments passed to this + // function: if no input file is + // specified on the command line, + // then use "project.prm", otherwise + // take the filename given as the + // first argument on the command + // line. + // + // With this, we create a + // ParameterHandler object, let the + // NeutronDiffusionProblem::Parameters + // class declare all the parameters + // it wants to see in the input file + // (or, take the default values, if + // nothing is listed in the parameter + // file), then read the input file, + // ask the parameters object to + // extract the values, and finally + // hand everything off to an object + // of type + // NeutronDiffusionProblem + // for computation of the eigenvalue: int main (int argc, char ** argv) { try @@ -2278,9 +2278,9 @@ int main (int argc, char ** argv) std::string filename; if (argc < 2) - filename = "project.prm"; + filename = "project.prm"; else - filename = argv[1]; + filename = argv[1]; const unsigned int dim = 2; @@ -2301,25 +2301,25 @@ int main (int argc, char ** argv) catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-29/step-29.cc b/deal.II/examples/step-29/step-29.cc index 2e91a0c153..cd369dd70b 100644 --- a/deal.II/examples/step-29/step-29.cc +++ b/deal.II/examples/step-29/step-29.cc @@ -10,10 +10,10 @@ - // @sect3{Include files} + // @sect3{Include files} - // The following header files are unchanged - // from step-7 and have been discussed before: + // The following header files are unchanged + // from step-7 and have been discussed before: #include #include @@ -42,79 +42,79 @@ #include - // This header file contains the - // necessary declarations for the - // ParameterHandler class that we - // will use to read our parameters - // from a configuration file: + // This header file contains the + // necessary declarations for the + // ParameterHandler class that we + // will use to read our parameters + // from a configuration file: #include - // For solving the linear system, - // we'll use the sparse - // LU-decomposition provided by - // UMFPACK (see the SparseDirectUMFPACK - // class), for which the following - // header file is needed. Note that - // in order to compile this tutorial - // program, the deal.II-library needs - // to be built with UMFPACK support, - // which can be most easily achieved - // by giving the - // --with-umfpack switch when - // configuring the library: + // For solving the linear system, + // we'll use the sparse + // LU-decomposition provided by + // UMFPACK (see the SparseDirectUMFPACK + // class), for which the following + // header file is needed. Note that + // in order to compile this tutorial + // program, the deal.II-library needs + // to be built with UMFPACK support, + // which can be most easily achieved + // by giving the + // --with-umfpack switch when + // configuring the library: #include - // The FESystem class allows us to - // stack several FE-objects to one - // compound, vector-valued finite - // element field. The necessary - // declarations for this class are - // provided in this header file: + // The FESystem class allows us to + // stack several FE-objects to one + // compound, vector-valued finite + // element field. The necessary + // declarations for this class are + // provided in this header file: #include - // Finally, include the header file - // that declares the Timer class that - // we will use to determine how much - // time each of the operations of our - // program takes: + // Finally, include the header file + // that declares the Timer class that + // we will use to determine how much + // time each of the operations of our + // program takes: #include - // As the last step at the beginning of this - // program, we put everything that is in this - // program into its namespace and, within it, - // make everything that is in the deal.II - // namespace globally available, without the - // need to prefix everything with - // dealii::: + // As the last step at the beginning of this + // program, we put everything that is in this + // program into its namespace and, within it, + // make everything that is in the deal.II + // namespace globally available, without the + // need to prefix everything with + // dealii::: namespace Step29 { using namespace dealii; - // @sect3{The DirichletBoundaryValues class} - - // First we define a class for the - // function representing the - // Dirichlet boundary values. This - // has been done many times before - // and therefore does not need much - // explanation. - // - // Since there are two values $v$ and - // $w$ that need to be prescribed at - // the boundary, we have to tell the - // base class that this is a - // vector-valued function with two - // components, and the - // vector_value function - // and its cousin - // vector_value_list must - // return vectors with two entries. In - // our case the function is very - // simple, it just returns 1 for the - // real part $v$ and 0 for the - // imaginary part $w$ regardless of - // the point where it is evaluated. + // @sect3{The DirichletBoundaryValues class} + + // First we define a class for the + // function representing the + // Dirichlet boundary values. This + // has been done many times before + // and therefore does not need much + // explanation. + // + // Since there are two values $v$ and + // $w$ that need to be prescribed at + // the boundary, we have to tell the + // base class that this is a + // vector-valued function with two + // components, and the + // vector_value function + // and its cousin + // vector_value_list must + // return vectors with two entries. In + // our case the function is very + // simple, it just returns 1 for the + // real part $v$ and 0 for the + // imaginary part $w$ regardless of + // the point where it is evaluated. template class DirichletBoundaryValues : public Function { @@ -122,17 +122,17 @@ namespace Step29 DirichletBoundaryValues() : Function (2) {}; virtual void vector_value (const Point &p, - Vector &values) const; + Vector &values) const; virtual void vector_value_list (const std::vector > &points, - std::vector > &value_list) const; + std::vector > &value_list) const; }; template inline void DirichletBoundaryValues::vector_value (const Point &/*p*/, - Vector &values) const + Vector &values) const { Assert (values.size() == 2, ExcDimensionMismatch (values.size(), 2)); @@ -143,29 +143,29 @@ namespace Step29 template void DirichletBoundaryValues::vector_value_list (const std::vector > &points, - std::vector > &value_list) const + std::vector > &value_list) const { Assert (value_list.size() == points.size(), - ExcDimensionMismatch (value_list.size(), points.size())); + ExcDimensionMismatch (value_list.size(), points.size())); for (unsigned int p=0; p::vector_value (points[p], value_list[p]); } - // @sect3{The ParameterReader class} - - // The next class is responsible for - // preparing the ParameterHandler - // object and reading parameters from - // an input file. It includes a - // function - // declare_parameters - // that declares all the necessary - // parameters and a - // read_parameters - // function that is called from - // outside to initiate the parameter - // reading process. + // @sect3{The ParameterReader class} + + // The next class is responsible for + // preparing the ParameterHandler + // object and reading parameters from + // an input file. It includes a + // function + // declare_parameters + // that declares all the necessary + // parameters and a + // read_parameters + // function that is called from + // outside to initiate the parameter + // reading process. class ParameterReader : public Subscriptor { public: @@ -177,161 +177,161 @@ namespace Step29 ParameterHandler &prm; }; - // The constructor stores a reference to - // the ParameterHandler object that is passed to it: + // The constructor stores a reference to + // the ParameterHandler object that is passed to it: ParameterReader::ParameterReader(ParameterHandler ¶mhandler) - : - prm(paramhandler) + : + prm(paramhandler) {} - // @sect4{ParameterReader::declare_parameters} - - // The declare_parameters - // function declares all the - // parameters that our - // ParameterHandler object will be - // able to read from input files, - // along with their types, range - // conditions and the subsections they - // appear in. We will wrap all the - // entries that go into a section in a - // pair of braces to force the editor - // to indent them by one level, making - // it simpler to read which entries - // together form a section: + // @sect4{ParameterReader::declare_parameters} + + // The declare_parameters + // function declares all the + // parameters that our + // ParameterHandler object will be + // able to read from input files, + // along with their types, range + // conditions and the subsections they + // appear in. We will wrap all the + // entries that go into a section in a + // pair of braces to force the editor + // to indent them by one level, making + // it simpler to read which entries + // together form a section: void ParameterReader::declare_parameters() { - // Parameters for mesh and geometry - // include the number of global - // refinement steps that are applied - // to the initial coarse mesh and the - // focal distance $d$ of the - // transducer lens. For the number of - // refinement steps, we allow integer - // values in the range $[0,\infty)$, - // where the omitted second argument - // to the Patterns::Integer object - // denotes the half-open interval. - // For the focal distance any number - // greater than zero is accepted: + // Parameters for mesh and geometry + // include the number of global + // refinement steps that are applied + // to the initial coarse mesh and the + // focal distance $d$ of the + // transducer lens. For the number of + // refinement steps, we allow integer + // values in the range $[0,\infty)$, + // where the omitted second argument + // to the Patterns::Integer object + // denotes the half-open interval. + // For the focal distance any number + // greater than zero is accepted: prm.enter_subsection ("Mesh & geometry parameters"); { prm.declare_entry("Number of refinements", "6", - Patterns::Integer(0), - "Number of global mesh refinement steps " - "applied to initial coarse grid"); + Patterns::Integer(0), + "Number of global mesh refinement steps " + "applied to initial coarse grid"); prm.declare_entry("Focal distance", "0.3", - Patterns::Double(0), - "Distance of the focal point of the lens " - "to the x-axis"); + Patterns::Double(0), + "Distance of the focal point of the lens " + "to the x-axis"); } prm.leave_subsection (); - // The next subsection is devoted to - // the physical parameters appearing - // in the equation, which are the - // frequency $\omega$ and wave speed - // $c$. Again, both need to lie in the - // half-open interval $[0,\infty)$ - // represented by calling the - // Patterns::Double class with only - // the left end-point as argument: + // The next subsection is devoted to + // the physical parameters appearing + // in the equation, which are the + // frequency $\omega$ and wave speed + // $c$. Again, both need to lie in the + // half-open interval $[0,\infty)$ + // represented by calling the + // Patterns::Double class with only + // the left end-point as argument: prm.enter_subsection ("Physical constants"); { prm.declare_entry("c", "1.5e5", - Patterns::Double(0), - "Wave speed"); + Patterns::Double(0), + "Wave speed"); prm.declare_entry("omega", "5.0e7", - Patterns::Double(0), - "Frequency"); + Patterns::Double(0), + "Frequency"); } prm.leave_subsection (); - // Last but not least we would like - // to be able to change some - // properties of the output, like - // filename and format, through - // entries in the configuration - // file, which is the purpose of - // the last subsection: + // Last but not least we would like + // to be able to change some + // properties of the output, like + // filename and format, through + // entries in the configuration + // file, which is the purpose of + // the last subsection: prm.enter_subsection ("Output parameters"); { prm.declare_entry("Output file", "solution", - Patterns::Anything(), - "Name of the output file (without extension)"); - - // Since different output formats - // may require different - // parameters for generating - // output (like for example, - // postscript output needs - // viewpoint angles, line widths, - // colors etc), it would be - // cumbersome if we had to - // declare all these parameters - // by hand for every possible - // output format supported in the - // library. Instead, each output - // format has a - // FormatFlags::declare_parameters - // function, which declares all - // the parameters specific to - // that format in an own - // subsection. The following call - // of - // DataOutInterface<1>::declare_parameters - // executes - // declare_parameters - // for all available output - // formats, so that for each - // format an own subsection will - // be created with parameters - // declared for that particular - // output format. (The actual - // value of the template - // parameter in the call, - // @<1@> above, does - // not matter here: the function - // does the same work independent - // of the dimension, but happens - // to be in a - // template-parameter-dependent - // class.) To find out what - // parameters there are for which - // output format, you can either - // consult the documentation of - // the DataOutBase class, or - // simply run this program - // without a parameter file - // present. It will then create a - // file with all declared - // parameters set to their - // default values, which can - // conveniently serve as a - // starting point for setting the - // parameters to the values you - // desire. + Patterns::Anything(), + "Name of the output file (without extension)"); + + // Since different output formats + // may require different + // parameters for generating + // output (like for example, + // postscript output needs + // viewpoint angles, line widths, + // colors etc), it would be + // cumbersome if we had to + // declare all these parameters + // by hand for every possible + // output format supported in the + // library. Instead, each output + // format has a + // FormatFlags::declare_parameters + // function, which declares all + // the parameters specific to + // that format in an own + // subsection. The following call + // of + // DataOutInterface<1>::declare_parameters + // executes + // declare_parameters + // for all available output + // formats, so that for each + // format an own subsection will + // be created with parameters + // declared for that particular + // output format. (The actual + // value of the template + // parameter in the call, + // @<1@> above, does + // not matter here: the function + // does the same work independent + // of the dimension, but happens + // to be in a + // template-parameter-dependent + // class.) To find out what + // parameters there are for which + // output format, you can either + // consult the documentation of + // the DataOutBase class, or + // simply run this program + // without a parameter file + // present. It will then create a + // file with all declared + // parameters set to their + // default values, which can + // conveniently serve as a + // starting point for setting the + // parameters to the values you + // desire. DataOutInterface<1>::declare_parameters (prm); } prm.leave_subsection (); } - // @sect4{ParameterReader::read_parameters} - - // This is the main function in the - // ParameterReader class. It gets - // called from outside, first - // declares all the parameters, and - // then reads them from the input - // file whose filename is provided by - // the caller. After the call to this - // function is complete, the - // prm object can be - // used to retrieve the values of the - // parameters read in from the file: + // @sect4{ParameterReader::read_parameters} + + // This is the main function in the + // ParameterReader class. It gets + // called from outside, first + // declares all the parameters, and + // then reads them from the input + // file whose filename is provided by + // the caller. After the call to this + // function is complete, the + // prm object can be + // used to retrieve the values of the + // parameters read in from the file: void ParameterReader::read_parameters (const std::string parameter_file) { declare_parameters(); @@ -341,76 +341,76 @@ namespace Step29 - // @sect3{The ComputeIntensity class} - - // As mentioned in the introduction, - // the quantity that we are really - // after is the spatial distribution - // of the intensity of the ultrasound - // wave, which corresponds to - // $|u|=\sqrt{v^2+w^2}$. Now we could - // just be content with having $v$ - // and $w$ in our output, and use a - // suitable visualization or - // postprocessing tool to derive - // $|u|$ from the solution we - // computed. However, there is also a - // way to output data derived from - // the solution in deal.II, and we - // are going to make use of this - // mechanism here. - - // So far we have always used the - // DataOut::add_data_vector function - // to add vectors containing output - // data to a DataOut object. There - // is a special version of this - // function that in addition to the - // data vector has an additional - // argument of type - // DataPostprocessor. What happens - // when this function is used for - // output is that at each point where - // output data is to be generated, - // the DataPostprocessor::compute_derived_quantities_scalar or DataPostprocessor::compute_derived_quantities_vector - // function of the specified - // DataPostprocessor object is - // invoked to compute the output - // quantities from the values, the - // gradients and the second - // derivatives of the finite element - // function represented by the data - // vector (in the case of face - // related data, normal vectors are - // available as well). Hence, this - // allows us to output any quantity - // that can locally be derived from - // the values of the solution and its - // derivatives. Of course, the - // ultrasound intensity $|u|$ is such - // a quantity and its computation - // doesn't even involve any - // derivatives of $v$ or $w$. - - // In practice, the - // DataPostprocessor class only - // provides an interface to this - // functionality, and we need to - // derive our own class from it in - // order to implement the functions - // specified by the interface. In - // the most general case one has to - // implement several member - // functions but if the output - // quantity is a single scalar then - // some of this boilerplate code - // can be handled by a more - // specialized class, - // DataPostprocessorScalar and we - // can derive from that one - // instead. This is what the - // ComputeIntensity - // class does: + // @sect3{The ComputeIntensity class} + + // As mentioned in the introduction, + // the quantity that we are really + // after is the spatial distribution + // of the intensity of the ultrasound + // wave, which corresponds to + // $|u|=\sqrt{v^2+w^2}$. Now we could + // just be content with having $v$ + // and $w$ in our output, and use a + // suitable visualization or + // postprocessing tool to derive + // $|u|$ from the solution we + // computed. However, there is also a + // way to output data derived from + // the solution in deal.II, and we + // are going to make use of this + // mechanism here. + + // So far we have always used the + // DataOut::add_data_vector function + // to add vectors containing output + // data to a DataOut object. There + // is a special version of this + // function that in addition to the + // data vector has an additional + // argument of type + // DataPostprocessor. What happens + // when this function is used for + // output is that at each point where + // output data is to be generated, + // the DataPostprocessor::compute_derived_quantities_scalar or DataPostprocessor::compute_derived_quantities_vector + // function of the specified + // DataPostprocessor object is + // invoked to compute the output + // quantities from the values, the + // gradients and the second + // derivatives of the finite element + // function represented by the data + // vector (in the case of face + // related data, normal vectors are + // available as well). Hence, this + // allows us to output any quantity + // that can locally be derived from + // the values of the solution and its + // derivatives. Of course, the + // ultrasound intensity $|u|$ is such + // a quantity and its computation + // doesn't even involve any + // derivatives of $v$ or $w$. + + // In practice, the + // DataPostprocessor class only + // provides an interface to this + // functionality, and we need to + // derive our own class from it in + // order to implement the functions + // specified by the interface. In + // the most general case one has to + // implement several member + // functions but if the output + // quantity is a single scalar then + // some of this boilerplate code + // can be handled by a more + // specialized class, + // DataPostprocessorScalar and we + // can derive from that one + // instead. This is what the + // ComputeIntensity + // class does: template class ComputeIntensity : public DataPostprocessorScalar { @@ -420,82 +420,82 @@ namespace Step29 virtual void compute_derived_quantities_vector (const std::vector< Vector< double > > &uh, - const std::vector< std::vector< Tensor< 1, dim > > > &duh, - const std::vector< std::vector< Tensor< 2, dim > > > &dduh, - const std::vector< Point< dim > > &normals, - const std::vector > &evaluation_points, - std::vector< Vector< double > > &computed_quantities) const; + const std::vector< std::vector< Tensor< 1, dim > > > &duh, + const std::vector< std::vector< Tensor< 2, dim > > > &dduh, + const std::vector< Point< dim > > &normals, + const std::vector > &evaluation_points, + std::vector< Vector< double > > &computed_quantities) const; }; - // In the constructor, we need to - // call the constructor of the base - // class with two arguments. The - // first denotes the name by which - // the single scalar quantity - // computed by this class should be - // represented in output files. In - // our case, the postprocessor has - // $|u|$ as output, so we use - // "Intensity". - // - // The second argument is a set of - // flags that indicate which data is - // needed by the postprocessor in - // order to compute the output - // quantities. This can be any - // subset of update_values, - // update_gradients and - // update_hessians (and, in the case - // of face data, also - // update_normal_vectors), which are - // documented in UpdateFlags. Of - // course, computation of the - // derivatives requires additional - // resources, so only the flags for - // data that is really needed should - // be given here, just as we do when - // we use FEValues objects. In our - // case, only the function values of - // $v$ and $w$ are needed to compute - // $|u|$, so we're good with the - // update_values flag. + // In the constructor, we need to + // call the constructor of the base + // class with two arguments. The + // first denotes the name by which + // the single scalar quantity + // computed by this class should be + // represented in output files. In + // our case, the postprocessor has + // $|u|$ as output, so we use + // "Intensity". + // + // The second argument is a set of + // flags that indicate which data is + // needed by the postprocessor in + // order to compute the output + // quantities. This can be any + // subset of update_values, + // update_gradients and + // update_hessians (and, in the case + // of face data, also + // update_normal_vectors), which are + // documented in UpdateFlags. Of + // course, computation of the + // derivatives requires additional + // resources, so only the flags for + // data that is really needed should + // be given here, just as we do when + // we use FEValues objects. In our + // case, only the function values of + // $v$ and $w$ are needed to compute + // $|u|$, so we're good with the + // update_values flag. template ComputeIntensity::ComputeIntensity () - : - DataPostprocessorScalar ("Intensity", - update_values) + : + DataPostprocessorScalar ("Intensity", + update_values) {} - // The actual prostprocessing happens - // in the following function. Its - // inputs are a vector representing - // values of the function (which is - // here vector-valued) representing - // the data vector given to - // DataOut::add_data_vector, - // evaluated at all quadrature points - // where we generate output, and some - // tensor objects representing - // derivatives (that we don't use - // here since $|u|$ is computed from - // just $v$ and $w$, and for which we - // assign no name to the - // corresponding function argument). - // The derived quantities are - // returned in the - // computed_quantities - // vector. Remember that this - // function may only use data for - // which the respective update flag - // is specified by - // get_needed_update_flags. For - // example, we may not use the - // derivatives here, since our - // implementation of - // get_needed_update_flags - // requests that only function values - // are provided. + // The actual prostprocessing happens + // in the following function. Its + // inputs are a vector representing + // values of the function (which is + // here vector-valued) representing + // the data vector given to + // DataOut::add_data_vector, + // evaluated at all quadrature points + // where we generate output, and some + // tensor objects representing + // derivatives (that we don't use + // here since $|u|$ is computed from + // just $v$ and $w$, and for which we + // assign no name to the + // corresponding function argument). + // The derived quantities are + // returned in the + // computed_quantities + // vector. Remember that this + // function may only use data for + // which the respective update flag + // is specified by + // get_needed_update_flags. For + // example, we may not use the + // derivatives here, since our + // implementation of + // get_needed_update_flags + // requests that only function values + // are provided. template void ComputeIntensity::compute_derived_quantities_vector ( @@ -508,42 +508,42 @@ namespace Step29 ) const { Assert(computed_quantities.size() == uh.size(), - ExcDimensionMismatch (computed_quantities.size(), uh.size())); - - // The computation itself is - // straightforward: We iterate over - // each entry in the output vector - // and compute $|u|$ from the - // corresponding values of $v$ and - // $w$: + ExcDimensionMismatch (computed_quantities.size(), uh.size())); + + // The computation itself is + // straightforward: We iterate over + // each entry in the output vector + // and compute $|u|$ from the + // corresponding values of $v$ and + // $w$: for (unsigned int i=0; iUltrasoundProblem class} - - // Finally here is the main class of - // this program. It's member - // functions are very similar to the - // previous examples, in particular - // step-4, and the list of member - // variables does not contain any - // major surprises either. The - // ParameterHandler object that is - // passed to the constructor is - // stored as a reference to allow - // easy access to the parameters from - // all functions of the class. Since - // we are working with vector valued - // finite elements, the FE object we - // are using is of type FESystem. + // @sect3{The UltrasoundProblem class} + + // Finally here is the main class of + // this program. It's member + // functions are very similar to the + // previous examples, in particular + // step-4, and the list of member + // variables does not contain any + // major surprises either. The + // ParameterHandler object that is + // passed to the constructor is + // stored as a reference to allow + // easy access to the parameters from + // all functions of the class. Since + // we are working with vector valued + // finite elements, the FE object we + // are using is of type FESystem. template class UltrasoundProblem { @@ -572,20 +572,20 @@ namespace Step29 - // The constructor takes the - // ParameterHandler object and stores - // it in a reference. It also - // initializes the DoF-Handler and - // the finite element system, which - // consists of two copies of the - // scalar Q1 field, one for $v$ and - // one for $w$: + // The constructor takes the + // ParameterHandler object and stores + // it in a reference. It also + // initializes the DoF-Handler and + // the finite element system, which + // consists of two copies of the + // scalar Q1 field, one for $v$ and + // one for $w$: template UltrasoundProblem::UltrasoundProblem (ParameterHandler& param) - : - prm(param), - dof_handler(triangulation), - fe(FE_Q(1), 2) + : + prm(param), + dof_handler(triangulation), + fe(FE_Q(1), 2) {} @@ -595,87 +595,87 @@ namespace Step29 dof_handler.clear(); } - // @sect4{UltrasoundProblem::make_grid} + // @sect4{UltrasoundProblem::make_grid} - // Here we setup the grid for our - // domain. As mentioned in the - // exposition, the geometry is just a - // unit square (in 2d) with the part - // of the boundary that represents - // the transducer lens replaced by a - // sector of a circle. + // Here we setup the grid for our + // domain. As mentioned in the + // exposition, the geometry is just a + // unit square (in 2d) with the part + // of the boundary that represents + // the transducer lens replaced by a + // sector of a circle. template void UltrasoundProblem::make_grid () { - // First we generate some logging - // output and start a timer so we - // can compute execution time when - // this function is done: + // First we generate some logging + // output and start a timer so we + // can compute execution time when + // this function is done: deallog << "Generating grid... "; Timer timer; timer.start (); - // Then we query the values for the - // focal distance of the transducer - // lens and the number of mesh - // refinement steps from our - // ParameterHandler object: + // Then we query the values for the + // focal distance of the transducer + // lens and the number of mesh + // refinement steps from our + // ParameterHandler object: prm.enter_subsection ("Mesh & geometry parameters"); - const double focal_distance = prm.get_double("Focal distance"); - const unsigned int n_refinements = prm.get_integer("Number of refinements"); + const double focal_distance = prm.get_double("Focal distance"); + const unsigned int n_refinements = prm.get_integer("Number of refinements"); prm.leave_subsection (); - // Next, two points are defined for - // position and focal point of the - // transducer lens, which is the - // center of the circle whose - // segment will form the transducer - // part of the boundary. We compute - // the radius of this circle in - // such a way that the segment fits - // in the interval [0.4,0.6] on the - // x-axis. Notice that this is the - // only point in the program where - // things are slightly different in - // 2D and 3D. Even though this - // tutorial only deals with the 2D - // case, the necessary additions to - // make this program functional in - // 3D are so minimal that we opt - // for including them: - const Point transducer = (dim == 2) ? - Point (0.5, 0.0) : - Point (0.5, 0.5, 0.0), - focal_point = (dim == 2) ? - Point (0.5, focal_distance) : - Point (0.5, 0.5, focal_distance); + // Next, two points are defined for + // position and focal point of the + // transducer lens, which is the + // center of the circle whose + // segment will form the transducer + // part of the boundary. We compute + // the radius of this circle in + // such a way that the segment fits + // in the interval [0.4,0.6] on the + // x-axis. Notice that this is the + // only point in the program where + // things are slightly different in + // 2D and 3D. Even though this + // tutorial only deals with the 2D + // case, the necessary additions to + // make this program functional in + // 3D are so minimal that we opt + // for including them: + const Point transducer = (dim == 2) ? + Point (0.5, 0.0) : + Point (0.5, 0.5, 0.0), + focal_point = (dim == 2) ? + Point (0.5, focal_distance) : + Point (0.5, 0.5, focal_distance); const double radius = std::sqrt( (focal_point.distance(transducer) * - focal_point.distance(transducer)) + - ((dim==2) ? 0.01 : 0.02)); - - - // As initial coarse grid we take a - // simple unit square with 5 - // subdivisions in each - // direction. The number of - // subdivisions is chosen so that - // the line segment $[0.4,0.6]$ - // that we want to designate as the - // transducer boundary is spanned - // by a single face. Then we step - // through all cells to find the - // faces where the transducer is to - // be located, which in fact is - // just the single edge from 0.4 to - // 0.6 on the x-axis. This is where - // we want the refinements to be - // made according to a circle - // shaped boundary, so we mark this - // edge with a different boundary - // indicator. + focal_point.distance(transducer)) + + ((dim==2) ? 0.01 : 0.02)); + + + // As initial coarse grid we take a + // simple unit square with 5 + // subdivisions in each + // direction. The number of + // subdivisions is chosen so that + // the line segment $[0.4,0.6]$ + // that we want to designate as the + // transducer boundary is spanned + // by a single face. Then we step + // through all cells to find the + // faces where the transducer is to + // be located, which in fact is + // just the single edge from 0.4 to + // 0.6 on the x-axis. This is where + // we want the refinements to be + // made according to a circle + // shaped boundary, so we mark this + // edge with a different boundary + // indicator. GridGenerator::subdivided_hyper_cube (triangulation, 5, 0, 1); typename Triangulation::cell_iterator @@ -684,63 +684,63 @@ namespace Step29 for (; cell!=endc; ++cell) for (unsigned int face=0; face::faces_per_cell; ++face) - if ( cell->face(face)->at_boundary() && - ((cell->face(face)->center() - transducer).square() < 0.01) ) - - cell->face(face)->set_boundary_indicator (1); - - // For the circle part of the - // transducer lens, a hyper-ball - // object is used (which, of course, - // in 2D just represents a circle), - // with radius and center as computed - // above. By marking this object as - // static, we ensure that - // it lives until the end of the - // program and thereby longer than the - // triangulation object we will - // associated with it. We then assign - // this boundary-object to the part of - // the boundary with boundary - // indicator 1: + if ( cell->face(face)->at_boundary() && + ((cell->face(face)->center() - transducer).square() < 0.01) ) + + cell->face(face)->set_boundary_indicator (1); + + // For the circle part of the + // transducer lens, a hyper-ball + // object is used (which, of course, + // in 2D just represents a circle), + // with radius and center as computed + // above. By marking this object as + // static, we ensure that + // it lives until the end of the + // program and thereby longer than the + // triangulation object we will + // associated with it. We then assign + // this boundary-object to the part of + // the boundary with boundary + // indicator 1: static const HyperBallBoundary boundary(focal_point, radius); triangulation.set_boundary(1, boundary); - // Now global refinement is - // executed. Cells near the - // transducer location will be - // automatically refined according - // to the circle shaped boundary of - // the transducer lens: + // Now global refinement is + // executed. Cells near the + // transducer location will be + // automatically refined according + // to the circle shaped boundary of + // the transducer lens: triangulation.refine_global (n_refinements); - // Lastly, we generate some more - // logging output. We stop the - // timer and query the number of - // CPU seconds elapsed since the - // beginning of the function: + // Lastly, we generate some more + // logging output. We stop the + // timer and query the number of + // CPU seconds elapsed since the + // beginning of the function: timer.stop (); deallog << "done (" - << timer() - << "s)" - << std::endl; + << timer() + << "s)" + << std::endl; deallog << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl; + << triangulation.n_active_cells() + << std::endl; } - // @sect4{UltrasoundProblem::setup_system} - // - // Initialization of the system - // matrix, sparsity patterns and - // vectors are the same as in - // previous examples and therefore do - // not need further comment. As in - // the previous function, we also - // output the run time of what we do - // here: + // @sect4{UltrasoundProblem::setup_system} + // + // Initialization of the system + // matrix, sparsity patterns and + // vectors are the same as in + // previous examples and therefore do + // not need further comment. As in + // the previous function, we also + // output the run time of what we do + // here: template void UltrasoundProblem::setup_system () { @@ -751,8 +751,8 @@ namespace Step29 dof_handler.distribute_dofs (fe); sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); sparsity_pattern.compress(); @@ -763,20 +763,20 @@ namespace Step29 timer.stop (); deallog << "done (" - << timer() - << "s)" - << std::endl; + << timer() + << "s)" + << std::endl; deallog << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl; + << dof_handler.n_dofs() + << std::endl; } - // @sect4{UltrasoundProblem::assemble_system} - // As before, this function takes - // care of assembling the system - // matrix and right hand side vector: + // @sect4{UltrasoundProblem::assemble_system} + // As before, this function takes + // care of assembling the system + // matrix and right hand side vector: template void UltrasoundProblem::assemble_system () { @@ -784,64 +784,64 @@ namespace Step29 Timer timer; timer.start (); - // First we query wavespeed and - // frequency from the - // ParameterHandler object and - // store them in local variables, - // as they will be used frequently - // throughout this function. + // First we query wavespeed and + // frequency from the + // ParameterHandler object and + // store them in local variables, + // as they will be used frequently + // throughout this function. prm.enter_subsection ("Physical constants"); const double omega = prm.get_double("omega"), - c = prm.get_double("c"); + c = prm.get_double("c"); prm.leave_subsection (); - // As usual, for computing - // integrals ordinary Gauss - // quadrature rule is used. Since - // our bilinear form involves - // boundary integrals on - // $\Gamma_2$, we also need a - // quadrature rule for surface - // integration on the faces, which - // are $dim-1$ dimensional: + // As usual, for computing + // integrals ordinary Gauss + // quadrature rule is used. Since + // our bilinear form involves + // boundary integrals on + // $\Gamma_2$, we also need a + // quadrature rule for surface + // integration on the faces, which + // are $dim-1$ dimensional: QGauss quadrature_formula(2); QGauss face_quadrature_formula(2); - const unsigned int n_q_points = quadrature_formula.size(), - n_face_q_points = face_quadrature_formula.size(), - dofs_per_cell = fe.dofs_per_cell; - - // The FEValues objects will - // evaluate the shape functions for - // us. For the part of the - // bilinear form that involves - // integration on $\Omega$, we'll - // need the values and gradients of - // the shape functions, and of - // course the quadrature weights. - // For the terms involving the - // boundary integrals, only shape - // function values and the - // quadrature weights are - // necessary. + const unsigned int n_q_points = quadrature_formula.size(), + n_face_q_points = face_quadrature_formula.size(), + dofs_per_cell = fe.dofs_per_cell; + + // The FEValues objects will + // evaluate the shape functions for + // us. For the part of the + // bilinear form that involves + // integration on $\Omega$, we'll + // need the values and gradients of + // the shape functions, and of + // course the quadrature weights. + // For the terms involving the + // boundary integrals, only shape + // function values and the + // quadrature weights are + // necessary. FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_JxW_values); + update_values | update_gradients | + update_JxW_values); FEFaceValues fe_face_values (fe, face_quadrature_formula, - update_values | update_JxW_values); - - // As usual, the system matrix is - // assembled cell by cell, and we - // need a matrix for storing the - // local cell contributions as well - // as an index vector to transfer - // the cell contributions to the - // appropriate location in the - // global system matrix after. + update_values | update_JxW_values); + + // As usual, the system matrix is + // assembled cell by cell, and we + // need a matrix for storing the + // local cell contributions as well + // as an index vector to transfer + // the cell contributions to the + // appropriate location in the + // global system matrix after. FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); std::vector local_dof_indices (dofs_per_cell); @@ -852,411 +852,411 @@ namespace Step29 for (; cell!=endc; ++cell) { - // On each cell, we first need - // to reset the local - // contribution matrix and - // request the FEValues object - // to compute the shape - // functions for the current - // cell: - cell_matrix = 0; - fe_values.reinit (cell); - - for (unsigned int i=0; i::faces_per_cell; ++face) - if (cell->face(face)->at_boundary() && - (cell->face(face)->boundary_indicator() == 0) ) - { - - - // These faces will - // certainly contribute - // to the off-diagonal - // blocks of the system - // matrix, so we ask the - // FEFaceValues object to - // provide us with the - // shape function values - // on this face: - fe_face_values.reinit (cell, face); - - - // Next, we loop through - // all DoFs of the - // current cell to find - // pairs that belong to - // different components - // and both have support - // on the current face: - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - - - // ...and then add the entries to - // the system matrix one by - // one: - for (unsigned int i=0; i::faces_per_cell; ++face) + if (cell->face(face)->at_boundary() && + (cell->face(face)->boundary_indicator() == 0) ) + { + + + // These faces will + // certainly contribute + // to the off-diagonal + // blocks of the system + // matrix, so we ask the + // FEFaceValues object to + // provide us with the + // shape function values + // on this face: + fe_face_values.reinit (cell, face); + + + // Next, we loop through + // all DoFs of the + // current cell to find + // pairs that belong to + // different components + // and both have support + // on the current face: + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + + + // ...and then add the entries to + // the system matrix one by + // one: + for (unsigned int i=0; iDirichletBoundaryValues - // class we defined above: + // The only thing left are the + // Dirichlet boundary values on + // $\Gamma_1$, which is + // characterized by the boundary + // indicator 1. The Dirichlet + // values are provided by the + // DirichletBoundaryValues + // class we defined above: std::map boundary_values; VectorTools::interpolate_boundary_values (dof_handler, - 1, - DirichletBoundaryValues(), - boundary_values); + 1, + DirichletBoundaryValues(), + boundary_values); MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); + system_matrix, + solution, + system_rhs); timer.stop (); deallog << "done (" - << timer() - << "s)" - << std::endl; + << timer() + << "s)" + << std::endl; } - // @sect4{UltrasoundProblem::solve} - - // As already mentioned in the - // introduction, the system matrix is - // neither symmetric nor definite, - // and so it is not quite obvious how - // to come up with an iterative - // solver and a preconditioner that - // do a good job on this matrix. We - // chose instead to go a different - // way and solve the linear system - // with the sparse LU decomposition - // provided by UMFPACK. This is often - // a good first choice for 2D - // problems and works reasonably well - // even for a large number of DoFs. - // The deal.II interface to UMFPACK - // is given by the - // SparseDirectUMFPACK class, which - // is very easy to use and allows us - // to solve our linear system with - // just 3 lines of code. - - // Note again that for compiling this - // example program, you need to have - // the deal.II library built with - // UMFPACK support, which can be - // achieved by providing the - // --with-umfpack switch to - // the configure script prior to - // compilation of the library. + // @sect4{UltrasoundProblem::solve} + + // As already mentioned in the + // introduction, the system matrix is + // neither symmetric nor definite, + // and so it is not quite obvious how + // to come up with an iterative + // solver and a preconditioner that + // do a good job on this matrix. We + // chose instead to go a different + // way and solve the linear system + // with the sparse LU decomposition + // provided by UMFPACK. This is often + // a good first choice for 2D + // problems and works reasonably well + // even for a large number of DoFs. + // The deal.II interface to UMFPACK + // is given by the + // SparseDirectUMFPACK class, which + // is very easy to use and allows us + // to solve our linear system with + // just 3 lines of code. + + // Note again that for compiling this + // example program, you need to have + // the deal.II library built with + // UMFPACK support, which can be + // achieved by providing the + // --with-umfpack switch to + // the configure script prior to + // compilation of the library. template void UltrasoundProblem::solve () { @@ -1264,55 +1264,55 @@ namespace Step29 Timer timer; timer.start (); - // The code to solve the linear - // system is short: First, we - // allocate an object of the right - // type. The following - // initialize call - // provides the matrix that we - // would like to invert to the - // SparseDirectUMFPACK object, and - // at the same time kicks off the - // LU-decomposition. Hence, this is - // also the point where most of the - // computational work in this - // program happens. + // The code to solve the linear + // system is short: First, we + // allocate an object of the right + // type. The following + // initialize call + // provides the matrix that we + // would like to invert to the + // SparseDirectUMFPACK object, and + // at the same time kicks off the + // LU-decomposition. Hence, this is + // also the point where most of the + // computational work in this + // program happens. SparseDirectUMFPACK A_direct; A_direct.initialize(system_matrix); - // After the decomposition, we can - // use A_direct like a - // matrix representing the inverse - // of our system matrix, so to - // compute the solution we just - // have to multiply with the right - // hand side vector: + // After the decomposition, we can + // use A_direct like a + // matrix representing the inverse + // of our system matrix, so to + // compute the solution we just + // have to multiply with the right + // hand side vector: A_direct.vmult (solution, system_rhs); timer.stop (); deallog << "done (" - << timer () - << "s)" - << std::endl; + << timer () + << "s)" + << std::endl; } - // @sect4{UltrasoundProblem::output_results} + // @sect4{UltrasoundProblem::output_results} - // Here we output our solution $v$ - // and $w$ as well as the derived - // quantity $|u|$ in the format - // specified in the parameter - // file. Most of the work for - // deriving $|u|$ from $v$ and $w$ - // was already done in the - // implementation of the - // ComputeIntensity - // class, so that the output routine - // is rather straightforward and very - // similar to what is done in the - // previous tutorials. + // Here we output our solution $v$ + // and $w$ as well as the derived + // quantity $|u|$ in the format + // specified in the parameter + // file. Most of the work for + // deriving $|u|$ from $v$ and $w$ + // was already done in the + // implementation of the + // ComputeIntensity + // class, so that the output routine + // is rather straightforward and very + // similar to what is done in the + // previous tutorials. template void UltrasoundProblem::output_results () const { @@ -1320,27 +1320,27 @@ namespace Step29 Timer timer; timer.start (); - // Define objects of our - // ComputeIntensity - // class and a DataOut object: + // Define objects of our + // ComputeIntensity + // class and a DataOut object: ComputeIntensity intensities; DataOut data_out; data_out.attach_dof_handler (dof_handler); - // Next we query the output-related - // parameters from the - // ParameterHandler. The - // DataOut::parse_parameters call - // acts as a counterpart to the - // DataOutInterface<1>::declare_parameters - // call in - // ParameterReader::declare_parameters. It - // collects all the output format - // related parameters from the - // ParameterHandler and sets the - // corresponding properties of the - // DataOut object accordingly. + // Next we query the output-related + // parameters from the + // ParameterHandler. The + // DataOut::parse_parameters call + // acts as a counterpart to the + // DataOutInterface<1>::declare_parameters + // call in + // ParameterReader::declare_parameters. It + // collects all the output format + // related parameters from the + // ParameterHandler and sets the + // corresponding properties of the + // DataOut object accordingly. prm.enter_subsection("Output parameters"); const std::string output_file = prm.get("Output file"); @@ -1348,57 +1348,57 @@ namespace Step29 prm.leave_subsection (); - // Now we put together the filename from - // the base name provided by the - // ParameterHandler and the suffix which is - // provided by the DataOut class (the - // default suffix is set to the right type - // that matches the one set in the .prm - // file through parse_parameters()): + // Now we put together the filename from + // the base name provided by the + // ParameterHandler and the suffix which is + // provided by the DataOut class (the + // default suffix is set to the right type + // that matches the one set in the .prm + // file through parse_parameters()): const std::string filename = output_file + - data_out.default_suffix(); + data_out.default_suffix(); std::ofstream output (filename.c_str()); - // The solution vectors $v$ and $w$ - // are added to the DataOut object - // in the usual way: + // The solution vectors $v$ and $w$ + // are added to the DataOut object + // in the usual way: std::vector solution_names; solution_names.push_back ("Re_u"); solution_names.push_back ("Im_u"); data_out.add_data_vector (solution, solution_names); - // For the intensity, we just call - // add_data_vector - // again, but this with our - // ComputeIntensity - // object as the second argument, - // which effectively adds $|u|$ to - // the output data: + // For the intensity, we just call + // add_data_vector + // again, but this with our + // ComputeIntensity + // object as the second argument, + // which effectively adds $|u|$ to + // the output data: data_out.add_data_vector (solution, intensities); - // The last steps are as before. Note - // that the actual output format is - // now determined by what is stated in - // the input file, i.e. one can change - // the output format without having to - // re-compile this program: + // The last steps are as before. Note + // that the actual output format is + // now determined by what is stated in + // the input file, i.e. one can change + // the output format without having to + // re-compile this program: data_out.build_patches (); data_out.write (output); timer.stop (); deallog << "done (" - << timer() - << "s)" - << std::endl; + << timer() + << "s)" + << std::endl; } - // @sect4{UltrasoundProblem::run} - // Here we simply execute our - // functions one after the other: + // @sect4{UltrasoundProblem::run} + // Here we simply execute our + // functions one after the other: template void UltrasoundProblem::run () { @@ -1411,22 +1411,22 @@ namespace Step29 } - // @sect4{The main function} - - // Finally the main - // function of the program. It has the - // same structure as in almost all of - // the other tutorial programs. The - // only exception is that we define - // ParameterHandler and - // ParameterReader - // objects, and let the latter read in - // the parameter values from a - // textfile called - // step-29.prm. The - // values so read are then handed over - // to an instance of the - // UltrasoundProblem class: + // @sect4{The main function} + + // Finally the main + // function of the program. It has the + // same structure as in almost all of + // the other tutorial programs. The + // only exception is that we define + // ParameterHandler and + // ParameterReader + // objects, and let the latter read in + // the parameter values from a + // textfile called + // step-29.prm. The + // values so read are then handed over + // to an instance of the + // UltrasoundProblem class: int main () { try @@ -1444,24 +1444,24 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } return 0; diff --git a/deal.II/examples/step-3/step-3.cc b/deal.II/examples/step-3/step-3.cc index 6a13779f7b..cfbfba6806 100644 --- a/deal.II/examples/step-3/step-3.cc +++ b/deal.II/examples/step-3/step-3.cc @@ -12,70 +12,70 @@ // @sect3{Many new include files} - // These include files are already - // known to you. They declare the - // classes which handle - // triangulations and enumeration of - // degrees of freedom: + // These include files are already + // known to you. They declare the + // classes which handle + // triangulations and enumeration of + // degrees of freedom: #include #include - // And this is the file in which the - // functions are declared that - // create grids: + // And this is the file in which the + // functions are declared that + // create grids: #include - // The next three files contain classes which - // are needed for loops over all cells and to - // get the information from the cell - // objects. The first two have been used - // before to get geometric information from - // cells; the last one is new and provides - // information about the degrees of freedom - // local to a cell: + // The next three files contain classes which + // are needed for loops over all cells and to + // get the information from the cell + // objects. The first two have been used + // before to get geometric information from + // cells; the last one is new and provides + // information about the degrees of freedom + // local to a cell: #include #include #include - // In this file contains the description of - // the Lagrange interpolation finite element: + // In this file contains the description of + // the Lagrange interpolation finite element: #include - // And this file is needed for the - // creation of sparsity patterns of - // sparse matrices, as shown in - // previous examples: + // And this file is needed for the + // creation of sparsity patterns of + // sparse matrices, as shown in + // previous examples: #include - // The next two file are needed for - // assembling the matrix using - // quadrature on each cell. The - // classes declared in them will be - // explained below: + // The next two file are needed for + // assembling the matrix using + // quadrature on each cell. The + // classes declared in them will be + // explained below: #include #include - // The following three include files - // we need for the treatment of - // boundary values: + // The following three include files + // we need for the treatment of + // boundary values: #include #include #include - // We're now almost to the end. The second to - // last group of include files is for the - // linear algebra which we employ to solve - // the system of equations arising from the - // finite element discretization of the - // Laplace equation. We will use vectors and - // full matrices for assembling the system of - // equations locally on each cell, and - // transfer the results into a sparse - // matrix. We will then use a Conjugate - // Gradient solver to solve the problem, for - // which we need a preconditioner (in this - // program, we use the identity - // preconditioner which does nothing, but we - // need to include the file anyway): + // We're now almost to the end. The second to + // last group of include files is for the + // linear algebra which we employ to solve + // the system of equations arising from the + // finite element discretization of the + // Laplace equation. We will use vectors and + // full matrices for assembling the system of + // equations locally on each cell, and + // transfer the results into a sparse + // matrix. We will then use a Conjugate + // Gradient solver to solve the problem, for + // which we need a preconditioner (in this + // program, we use the identity + // preconditioner which does nothing, but we + // need to include the file anyway): #include #include #include @@ -83,28 +83,28 @@ #include #include - // Finally, this is for output to a - // file and to the console: + // Finally, this is for output to a + // file and to the console: #include #include #include - // ...and this is to import the - // deal.II namespace into the global - // scope: + // ...and this is to import the + // deal.II namespace into the global + // scope: using namespace dealii; // @sect3{The Step3 class} - // Instead of the procedural programming of - // previous examples, we encapsulate - // everything into a class for this - // program. The class consists of functions - // which each perform certain aspects of a - // finite element program, a `main' function - // which controls what is done first and what - // is done next, and a list of member - // variables. + // Instead of the procedural programming of + // previous examples, we encapsulate + // everything into a class for this + // program. The class consists of functions + // which each perform certain aspects of a + // finite element program, a `main' function + // which controls what is done first and what + // is done next, and a list of member + // variables. // The public part of the class is rather // short: it has a constructor and a function @@ -123,13 +123,13 @@ class Step3 void run (); - // Then there are the member functions - // that mostly do what their names - // suggest and whose have been discussed - // in the introduction already. Since - // they do not need to be called from - // outside, they are made private to this - // class. + // Then there are the member functions + // that mostly do what their names + // suggest and whose have been discussed + // in the introduction already. Since + // they do not need to be called from + // outside, they are made private to this + // class. private: void make_grid (); @@ -138,170 +138,170 @@ class Step3 void solve (); void output_results () const; - // And finally we have some member - // variables. There are variables - // describing the triangulation - // and the global numbering of the - // degrees of freedom (we will - // specify the exact polynomial - // degree of the finite element - // in the constructor of this - // class)... + // And finally we have some member + // variables. There are variables + // describing the triangulation + // and the global numbering of the + // degrees of freedom (we will + // specify the exact polynomial + // degree of the finite element + // in the constructor of this + // class)... Triangulation<2> triangulation; FE_Q<2> fe; DoFHandler<2> dof_handler; - // ...variables for the sparsity - // pattern and values of the - // system matrix resulting from - // the discretization of the - // Laplace equation... + // ...variables for the sparsity + // pattern and values of the + // system matrix resulting from + // the discretization of the + // Laplace equation... SparsityPattern sparsity_pattern; SparseMatrix system_matrix; - // ...and variables which will - // hold the right hand side and - // solution vectors. + // ...and variables which will + // hold the right hand side and + // solution vectors. Vector solution; Vector system_rhs; }; // @sect4{Step3::Step3} - // Here comes the constructor. It does not - // much more than first to specify that we - // want bi-linear elements (denoted by the - // parameter to the finite element object, - // which indicates the polynomial degree), - // and to associate the dof_handler variable - // to the triangulation we use. (Note that - // the triangulation isn't set up with a mesh - // at all at the present time, but the - // DoFHandler doesn't care: it only wants to - // know which triangulation it will be - // associated with, and it only starts to - // care about an actual mesh once you try to - // distribute degree of freedom on the mesh - // using the distribute_dofs() function.) All - // the other member variables of the - // Step3 class have a default - // constructor which does all we want. + // Here comes the constructor. It does not + // much more than first to specify that we + // want bi-linear elements (denoted by the + // parameter to the finite element object, + // which indicates the polynomial degree), + // and to associate the dof_handler variable + // to the triangulation we use. (Note that + // the triangulation isn't set up with a mesh + // at all at the present time, but the + // DoFHandler doesn't care: it only wants to + // know which triangulation it will be + // associated with, and it only starts to + // care about an actual mesh once you try to + // distribute degree of freedom on the mesh + // using the distribute_dofs() function.) All + // the other member variables of the + // Step3 class have a default + // constructor which does all we want. Step3::Step3 () - : + : fe (1), - dof_handler (triangulation) + dof_handler (triangulation) {} // @sect4{Step3::make_grid} // Now, the first thing we've got to - // do is to generate the - // triangulation on which we would - // like to do our computation and - // number each vertex with a degree - // of freedom. We have seen this in - // the previous examples before. + // do is to generate the + // triangulation on which we would + // like to do our computation and + // number each vertex with a degree + // of freedom. We have seen this in + // the previous examples before. void Step3::make_grid () { - // First create the grid and refine - // all cells five times. Since the - // initial grid (which is the - // square [-1,1]x[-1,1]) consists - // of only one cell, the final grid - // has 32 times 32 cells, for a - // total of 1024. + // First create the grid and refine + // all cells five times. Since the + // initial grid (which is the + // square [-1,1]x[-1,1]) consists + // of only one cell, the final grid + // has 32 times 32 cells, for a + // total of 1024. GridGenerator::hyper_cube (triangulation, -1, 1); triangulation.refine_global (5); - // Unsure that 1024 is the correct number? - // Let's see: n_active_cells returns the - // number of active cells: + // Unsure that 1024 is the correct number? + // Let's see: n_active_cells returns the + // number of active cells: std::cout << "Number of active cells: " - << triangulation.n_active_cells() - << std::endl; + << triangulation.n_active_cells() + << std::endl; // Here, by active we mean the cells that aren't - // refined any further. We stress the - // adjective `active', since there are more - // cells, namely the parent cells of the - // finest cells, their parents, etc, up to - // the one cell which made up the initial - // grid. Of course, on the next coarser - // level, the number of cells is one - // quarter that of the cells on the finest - // level, i.e. 256, then 64, 16, 4, and - // 1. We can get the total number of cells - // like this: + // refined any further. We stress the + // adjective `active', since there are more + // cells, namely the parent cells of the + // finest cells, their parents, etc, up to + // the one cell which made up the initial + // grid. Of course, on the next coarser + // level, the number of cells is one + // quarter that of the cells on the finest + // level, i.e. 256, then 64, 16, 4, and + // 1. We can get the total number of cells + // like this: std::cout << "Total number of cells: " - << triangulation.n_cells() - << std::endl; - // Note the distinction between - // n_active_cells() and n_cells(). + << triangulation.n_cells() + << std::endl; + // Note the distinction between + // n_active_cells() and n_cells(). } // @sect4{Step3::setup_system} - // Next we enumerate all the degrees of - // freedom and set up matrix and vector - // objects to hold the system - // data. Enumerating is done by using - // DoFHandler::distribute_dofs(), as we have - // seen in the step-2 example. Since we use - // the FE_Q class and have set the polynomial - // degree to 1 in the constructor, - // i.e. bilinear elements, this associates - // one degree of freedom with each - // vertex. While we're at generating output, - // let us also take a look at how many - // degrees of freedom are generated: + // Next we enumerate all the degrees of + // freedom and set up matrix and vector + // objects to hold the system + // data. Enumerating is done by using + // DoFHandler::distribute_dofs(), as we have + // seen in the step-2 example. Since we use + // the FE_Q class and have set the polynomial + // degree to 1 in the constructor, + // i.e. bilinear elements, this associates + // one degree of freedom with each + // vertex. While we're at generating output, + // let us also take a look at how many + // degrees of freedom are generated: void Step3::setup_system () { dof_handler.distribute_dofs (fe); std::cout << "Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl; - // There should be one DoF for each - // vertex. Since we have a 32 times - // 32 grid, the number of DoFs - // should be 33 times 33, or 1089. - - // As we have seen in the previous example, - // we set up a sparsity pattern by first - // creating a temporary structure, tagging - // those entries that might be nonzero, and - // then copying the data over to the - // SparsityPattern object that can then be - // used by the system matrix. + << dof_handler.n_dofs() + << std::endl; + // There should be one DoF for each + // vertex. Since we have a 32 times + // 32 grid, the number of DoFs + // should be 33 times 33, or 1089. + + // As we have seen in the previous example, + // we set up a sparsity pattern by first + // creating a temporary structure, tagging + // those entries that might be nonzero, and + // then copying the data over to the + // SparsityPattern object that can then be + // used by the system matrix. CompressedSparsityPattern c_sparsity(dof_handler.n_dofs()); DoFTools::make_sparsity_pattern (dof_handler, c_sparsity); sparsity_pattern.copy_from(c_sparsity); - // Note that the - // SparsityPattern object does - // not hold the values of the - // matrix, it only stores the - // places where entries are. The - // entries themselves are stored in - // objects of type SparseMatrix, of - // which our variable system_matrix - // is one. - // - // The distinction between sparsity pattern - // and matrix was made to allow several - // matrices to use the same sparsity - // pattern. This may not seem relevant - // here, but when you consider the size - // which matrices can have, and that it may - // take some time to build the sparsity - // pattern, this becomes important in - // large-scale problems if you have to - // store several matrices in your program. + // Note that the + // SparsityPattern object does + // not hold the values of the + // matrix, it only stores the + // places where entries are. The + // entries themselves are stored in + // objects of type SparseMatrix, of + // which our variable system_matrix + // is one. + // + // The distinction between sparsity pattern + // and matrix was made to allow several + // matrices to use the same sparsity + // pattern. This may not seem relevant + // here, but when you consider the size + // which matrices can have, and that it may + // take some time to build the sparsity + // pattern, this becomes important in + // large-scale problems if you have to + // store several matrices in your program. system_matrix.reinit (sparsity_pattern); - // The last thing to do in this - // function is to set the sizes of - // the right hand side vector and - // the solution vector to the right - // values: + // The last thing to do in this + // function is to set the sizes of + // the right hand side vector and + // the solution vector to the right + // values: solution.reinit (dof_handler.n_dofs()); system_rhs.reinit (dof_handler.n_dofs()); } @@ -309,545 +309,545 @@ void Step3::setup_system () // @sect4{Step3::assemble_system} - // The next step is to compute the entries of - // the matrix and right hand side that form - // the linear system from which we compute - // the solution. This is the central function - // of each finite element program and we have - // discussed the primary steps in the - // introduction already. - // - // The general approach to assemble matrices - // and vectors is to loop over all cells, and - // on each cell compute the contribution of - // that cell to the global matrix and right - // hand side by quadrature. The point to - // realize now is that we need the values of - // the shape functions at the locations of - // quadrature points on the real - // cell. However, both the finite element - // shape functions as well as the quadrature - // points are only defined on the reference - // cell. They are therefore of little help to - // us, and we will in fact hardly ever query - // information about finite element shape - // functions or quadrature points from these - // objects directly. - // - // Rather, what is required is a way to map - // this data from the reference cell to the - // real cell. Classes that can do that are - // derived from the Mapping class, though one - // again often does not have to deal with - // them directly: many functions in the - // library can take a mapping object as - // argument, but when it is omitted they - // simply resort to the standard bilinear Q1 - // mapping. We will go this route, and not - // bother with it for the moment (we come - // back to this in step-10, step-11, and - // step-12). - // - // So what we now have is a collection of - // three classes to deal with: finite - // element, quadrature, and mapping - // objects. That's too much, so there is one - // type of class that orchestrates - // information exchange between these three: - // the FEValues class. If given one instance - // of each three of these objects (or two, - // and an implicit linear mapping), it will - // be able to provide you with information - // about values and gradients of shape - // functions at quadrature points on a real - // cell. + // The next step is to compute the entries of + // the matrix and right hand side that form + // the linear system from which we compute + // the solution. This is the central function + // of each finite element program and we have + // discussed the primary steps in the + // introduction already. + // + // The general approach to assemble matrices + // and vectors is to loop over all cells, and + // on each cell compute the contribution of + // that cell to the global matrix and right + // hand side by quadrature. The point to + // realize now is that we need the values of + // the shape functions at the locations of + // quadrature points on the real + // cell. However, both the finite element + // shape functions as well as the quadrature + // points are only defined on the reference + // cell. They are therefore of little help to + // us, and we will in fact hardly ever query + // information about finite element shape + // functions or quadrature points from these + // objects directly. + // + // Rather, what is required is a way to map + // this data from the reference cell to the + // real cell. Classes that can do that are + // derived from the Mapping class, though one + // again often does not have to deal with + // them directly: many functions in the + // library can take a mapping object as + // argument, but when it is omitted they + // simply resort to the standard bilinear Q1 + // mapping. We will go this route, and not + // bother with it for the moment (we come + // back to this in step-10, step-11, and + // step-12). + // + // So what we now have is a collection of + // three classes to deal with: finite + // element, quadrature, and mapping + // objects. That's too much, so there is one + // type of class that orchestrates + // information exchange between these three: + // the FEValues class. If given one instance + // of each three of these objects (or two, + // and an implicit linear mapping), it will + // be able to provide you with information + // about values and gradients of shape + // functions at quadrature points on a real + // cell. // // Using all this, we will assemble the // linear system for this problem in the // following function: void Step3::assemble_system () { - // Ok, let's start: we need a quadrature - // formula for the evaluation of the - // integrals on each cell. Let's take a - // Gauss formula with two quadrature points - // in each direction, i.e. a total of four - // points since we are in 2D. This - // quadrature formula integrates - // polynomials of degrees up to three - // exactly (in 1D). It is easy to check - // that this is sufficient for the present - // problem: + // Ok, let's start: we need a quadrature + // formula for the evaluation of the + // integrals on each cell. Let's take a + // Gauss formula with two quadrature points + // in each direction, i.e. a total of four + // points since we are in 2D. This + // quadrature formula integrates + // polynomials of degrees up to three + // exactly (in 1D). It is easy to check + // that this is sufficient for the present + // problem: QGauss<2> quadrature_formula(2); - // And we initialize the object which we - // have briefly talked about above. It - // needs to be told which finite element we - // want to use, and the quadrature points - // and their weights (jointly described by - // a Quadrature object). As mentioned, we - // use the implied Q1 mapping, rather than - // specifying one ourselves - // explicitly. Finally, we have to tell it - // what we want it to compute on each cell: - // we need the values of the shape - // functions at the quadrature points (for - // the right hand side $(\varphi,f)$), their - // gradients (for the matrix entries $(\nabla - // \varphi_i, \nabla \varphi_j)$), and also the - // weights of the quadrature points and the - // determinants of the Jacobian - // transformations from the reference cell - // to the real cells. - // - // This list of what kind of information we - // actually need is given as a - // collection of flags as the third - // argument to the constructor of - // FEValues. Since these values have to - // be recomputed, or updated, every time we - // go to a new cell, all of these flags - // start with the prefix update_ and - // then indicate what it actually is that - // we want updated. The flag to give if we - // want the values of the shape functions - // computed is #update_values; for the - // gradients it is - // #update_gradients. The determinants - // of the Jacobians and the quadrature - // weights are always used together, so - // only the products (Jacobians times - // weights, or short JxW) are computed; - // since we need them, we have to list - // #update_JxW_values as well: + // And we initialize the object which we + // have briefly talked about above. It + // needs to be told which finite element we + // want to use, and the quadrature points + // and their weights (jointly described by + // a Quadrature object). As mentioned, we + // use the implied Q1 mapping, rather than + // specifying one ourselves + // explicitly. Finally, we have to tell it + // what we want it to compute on each cell: + // we need the values of the shape + // functions at the quadrature points (for + // the right hand side $(\varphi,f)$), their + // gradients (for the matrix entries $(\nabla + // \varphi_i, \nabla \varphi_j)$), and also the + // weights of the quadrature points and the + // determinants of the Jacobian + // transformations from the reference cell + // to the real cells. + // + // This list of what kind of information we + // actually need is given as a + // collection of flags as the third + // argument to the constructor of + // FEValues. Since these values have to + // be recomputed, or updated, every time we + // go to a new cell, all of these flags + // start with the prefix update_ and + // then indicate what it actually is that + // we want updated. The flag to give if we + // want the values of the shape functions + // computed is #update_values; for the + // gradients it is + // #update_gradients. The determinants + // of the Jacobians and the quadrature + // weights are always used together, so + // only the products (Jacobians times + // weights, or short JxW) are computed; + // since we need them, we have to list + // #update_JxW_values as well: FEValues<2> fe_values (fe, quadrature_formula, - update_values | update_gradients | update_JxW_values); + update_values | update_gradients | update_JxW_values); // The advantage of this approach is that - // we can specify what kind of information - // we actually need on each cell. It is - // easily understandable that this approach - // can significant speed up finite element - // computations, compared to approaches - // where everything, including second - // derivatives, normal vectors to cells, - // etc are computed on each cell, - // regardless whether they are needed or - // not. - - // For use further down below, we define - // two short cuts for values that will be - // used very frequently. First, an - // abbreviation for the number of degrees - // of freedom on each cell (since we are in - // 2D and degrees of freedom are associated - // with vertices only, this number is four, - // but we rather want to write the - // definition of this variable in a way - // that does not preclude us from later - // choosing a different finite element that - // has a different number of degrees of - // freedom per cell, or work in a different - // space dimension). - // - // Secondly, we also define an abbreviation - // for the number of quadrature points - // (here that should be four). In general, - // it is a good idea to use their symbolic - // names instead of hard-coding these - // number even if you know them, since you - // may want to change the quadrature - // formula and/or finite element at some - // time; the program will just work with - // these changes, without the need to - // change anything in this function. - // - // The shortcuts, finally, are only defined - // to make the following loops a bit more - // readable. You will see them in many - // places in larger programs, and - // `dofs_per_cell' and `n_q_points' are - // more or less by convention the standard - // names for these purposes: + // we can specify what kind of information + // we actually need on each cell. It is + // easily understandable that this approach + // can significant speed up finite element + // computations, compared to approaches + // where everything, including second + // derivatives, normal vectors to cells, + // etc are computed on each cell, + // regardless whether they are needed or + // not. + + // For use further down below, we define + // two short cuts for values that will be + // used very frequently. First, an + // abbreviation for the number of degrees + // of freedom on each cell (since we are in + // 2D and degrees of freedom are associated + // with vertices only, this number is four, + // but we rather want to write the + // definition of this variable in a way + // that does not preclude us from later + // choosing a different finite element that + // has a different number of degrees of + // freedom per cell, or work in a different + // space dimension). + // + // Secondly, we also define an abbreviation + // for the number of quadrature points + // (here that should be four). In general, + // it is a good idea to use their symbolic + // names instead of hard-coding these + // number even if you know them, since you + // may want to change the quadrature + // formula and/or finite element at some + // time; the program will just work with + // these changes, without the need to + // change anything in this function. + // + // The shortcuts, finally, are only defined + // to make the following loops a bit more + // readable. You will see them in many + // places in larger programs, and + // `dofs_per_cell' and `n_q_points' are + // more or less by convention the standard + // names for these purposes: const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); - // Now, we said that we wanted to assemble - // the global matrix and vector - // cell-by-cell. We could write the results - // directly into the global matrix, but - // this is not very efficient since access - // to the elements of a sparse matrix is - // slow. Rather, we first compute the - // contribution of each cell in a small - // matrix with the degrees of freedom on - // the present cell, and only transfer them - // to the global matrix when the - // computations are finished for this - // cell. We do the same for the right hand - // side vector. So let's first allocate - // these objects (these being local - // objects, all degrees of freedom are - // coupling with all others, and we should - // use a full matrix object rather than a - // sparse one for the local operations; - // everything will be transferred to a - // global sparse matrix later on): + // Now, we said that we wanted to assemble + // the global matrix and vector + // cell-by-cell. We could write the results + // directly into the global matrix, but + // this is not very efficient since access + // to the elements of a sparse matrix is + // slow. Rather, we first compute the + // contribution of each cell in a small + // matrix with the degrees of freedom on + // the present cell, and only transfer them + // to the global matrix when the + // computations are finished for this + // cell. We do the same for the right hand + // side vector. So let's first allocate + // these objects (these being local + // objects, all degrees of freedom are + // coupling with all others, and we should + // use a full matrix object rather than a + // sparse one for the local operations; + // everything will be transferred to a + // global sparse matrix later on): FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); Vector cell_rhs (dofs_per_cell); - // When assembling the - // contributions of each cell, we - // do this with the local numbering - // of the degrees of freedom - // (i.e. the number running from - // zero through - // dofs_per_cell-1). However, when - // we transfer the result into the - // global matrix, we have to know - // the global numbers of the - // degrees of freedom. When we query - // them, we need a scratch - // (temporary) array for these - // numbers: + // When assembling the + // contributions of each cell, we + // do this with the local numbering + // of the degrees of freedom + // (i.e. the number running from + // zero through + // dofs_per_cell-1). However, when + // we transfer the result into the + // global matrix, we have to know + // the global numbers of the + // degrees of freedom. When we query + // them, we need a scratch + // (temporary) array for these + // numbers: std::vector local_dof_indices (dofs_per_cell); - // Now for the loop over all cells. We have - // seen before how this works, so this - // should be familiar including the - // conventional names for these variables: + // Now for the loop over all cells. We have + // seen before how this works, so this + // should be familiar including the + // conventional names for these variables: DoFHandler<2>::active_cell_iterator cell = dof_handler.begin_active(), endc = dof_handler.end(); for (; cell!=endc; ++cell) { - // We are now sitting on one cell, and - // we would like the values and - // gradients of the shape functions be - // computed, as well as the - // determinants of the Jacobian - // matrices of the mapping between - // reference cell and true cell, at the - // quadrature points. Since all these - // values depend on the geometry of the - // cell, we have to have the FEValues - // object re-compute them on each cell: + // We are now sitting on one cell, and + // we would like the values and + // gradients of the shape functions be + // computed, as well as the + // determinants of the Jacobian + // matrices of the mapping between + // reference cell and true cell, at the + // quadrature points. Since all these + // values depend on the geometry of the + // cell, we have to have the FEValues + // object re-compute them on each cell: fe_values.reinit (cell); - // Next, reset the local cell's - // contributions contributions to - // global matrix and global right hand - // side to zero, before we fill them: + // Next, reset the local cell's + // contributions contributions to + // global matrix and global right hand + // side to zero, before we fill them: cell_matrix = 0; cell_rhs = 0; - // Then finally assemble the matrix: - // For the Laplace problem, the matrix - // on each cell is the integral over - // the gradients of shape function i - // and j. Since we do not integrate, - // but rather use quadrature, this is - // the sum over all quadrature points - // of the integrands times the - // determinant of the Jacobian matrix - // at the quadrature point times the - // weight of this quadrature point. You - // can get the gradient of shape - // function $i$ at quadrature point - // q_point by using - // fe_values.shape_grad(i,q_point); - // this gradient is a 2-dimensional - // vector (in fact it is of type - // Tensor@<1,dim@>, with here dim=2) and - // the product of two such vectors is - // the scalar product, i.e. the product - // of the two shape_grad function calls - // is the dot product. This is in turn - // multiplied by the Jacobian - // determinant and the quadrature point - // weight (that one gets together by - // the call to - // FEValues::JxW() ). Finally, this is - // repeated for all shape functions - // $i$ and $j$: + // Then finally assemble the matrix: + // For the Laplace problem, the matrix + // on each cell is the integral over + // the gradients of shape function i + // and j. Since we do not integrate, + // but rather use quadrature, this is + // the sum over all quadrature points + // of the integrands times the + // determinant of the Jacobian matrix + // at the quadrature point times the + // weight of this quadrature point. You + // can get the gradient of shape + // function $i$ at quadrature point + // q_point by using + // fe_values.shape_grad(i,q_point); + // this gradient is a 2-dimensional + // vector (in fact it is of type + // Tensor@<1,dim@>, with here dim=2) and + // the product of two such vectors is + // the scalar product, i.e. the product + // of the two shape_grad function calls + // is the dot product. This is in turn + // multiplied by the Jacobian + // determinant and the quadrature point + // weight (that one gets together by + // the call to + // FEValues::JxW() ). Finally, this is + // repeated for all shape functions + // $i$ and $j$: for (unsigned int i=0; iget_dof_indices (local_dof_indices); - // Then again loop over all - // shape functions i and j and - // transfer the local elements - // to the global matrix. The - // global numbers can be - // obtained using - // local_dof_indices[i]: + // Then again loop over all + // shape functions i and j and + // transfer the local elements + // to the global matrix. The + // global numbers can be + // obtained using + // local_dof_indices[i]: for (unsigned int i=0; istd::map class. + // Now almost everything is set up for the + // solution of the discrete + // system. However, we have not yet taken + // care of boundary values (in fact, + // Laplace's equation without Dirichlet + // boundary values is not even uniquely + // solvable, since you can add an arbitrary + // constant to the discrete solution). We + // therefore have to do something about the + // situation. + // + // For this, we first obtain a list of the + // degrees of freedom on the boundary and + // the value the shape function shall have + // there. For simplicity, we only + // interpolate the boundary value function, + // rather than projecting it onto the + // boundary. There is a function in the + // library which does exactly this: + // VectorTools::interpolate_boundary_values(). Its + // parameters are (omitting parameters for + // which default values exist and that we + // don't care about): the DoFHandler object + // to get the global numbers of the degrees + // of freedom on the boundary; the + // component of the boundary where the + // boundary values shall be interpolated; + // the boundary value function itself; and + // the output object. + // + // The component of the boundary is meant + // as follows: in many cases, you may want + // to impose certain boundary values only + // on parts of the boundary. For example, + // you may have inflow and outflow + // boundaries in fluid dynamics, or clamped + // and free parts of bodies in deformation + // computations of bodies. Then you will + // want to denote these different parts of + // the boundary by different numbers and + // tell the interpolate_boundary_values + // function to only compute the boundary + // values on a certain part of the boundary + // (e.g. the clamped part, or the inflow + // boundary). By default, all boundaries + // have the number `0', and since we have + // not changed that, this is still so; + // therefore, if we give `0' as the desired + // portion of the boundary, this means we + // get the whole boundary. If you have + // boundaries with kinds of boundaries, you + // have to number them differently. The + // function call below will then only + // determine boundary values for parts of + // the boundary. + // + // The function describing the boundary + // values is an object of type Function + // or of a derived class. One of the + // derived classes is ZeroFunction, + // which describes (not unexpectedly) a + // function which is zero everywhere. We + // create such an object in-place and pass + // it to the VectorTools::interpolate_boundary_values() + // function. + // + // Finally, the output object is a + // list of pairs of global degree + // of freedom numbers (i.e. the + // number of the degrees of freedom + // on the boundary) and their + // boundary values (which are zero + // here for all entries). This + // mapping of DoF numbers to + // boundary values is done by the + // std::map class. std::map boundary_values; VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction<2>(), - boundary_values); - // Now that we got the list of - // boundary DoFs and their - // respective boundary values, - // let's use them to modify the - // system of equations - // accordingly. This is done by the - // following function call: + 0, + ZeroFunction<2>(), + boundary_values); + // Now that we got the list of + // boundary DoFs and their + // respective boundary values, + // let's use them to modify the + // system of equations + // accordingly. This is done by the + // following function call: MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); + system_matrix, + solution, + system_rhs); } // @sect4{Step3::solve} // The following function simply - // solves the discretized - // equation. As the system is quite a - // large one for direct solvers such - // as Gauss elimination or LU - // decomposition, we use a Conjugate - // Gradient algorithm. You should - // remember that the number of - // variables here (only 1089) is a - // very small number for finite - // element computations, where - // 100.000 is a more usual number. - // For this number of variables, - // direct methods are no longer - // usable and you are forced to use - // methods like CG. + // solves the discretized + // equation. As the system is quite a + // large one for direct solvers such + // as Gauss elimination or LU + // decomposition, we use a Conjugate + // Gradient algorithm. You should + // remember that the number of + // variables here (only 1089) is a + // very small number for finite + // element computations, where + // 100.000 is a more usual number. + // For this number of variables, + // direct methods are no longer + // usable and you are forced to use + // methods like CG. void Step3::solve () { - // First, we need to have an object that - // knows how to tell the CG algorithm when - // to stop. This is done by using a - // SolverControl object, and as stopping - // criterion we say: stop after a maximum - // of 1000 iterations (which is far more - // than is needed for 1089 variables; see - // the results section to find out how many - // were really used), and stop if the norm - // of the residual is below $10^{-12}$. In - // practice, the latter criterion will be - // the one which stops the iteration: + // First, we need to have an object that + // knows how to tell the CG algorithm when + // to stop. This is done by using a + // SolverControl object, and as stopping + // criterion we say: stop after a maximum + // of 1000 iterations (which is far more + // than is needed for 1089 variables; see + // the results section to find out how many + // were really used), and stop if the norm + // of the residual is below $10^{-12}$. In + // practice, the latter criterion will be + // the one which stops the iteration: SolverControl solver_control (1000, 1e-12); - // Then we need the solver itself. The - // template parameters to the SolverCG - // class are the matrix type and the type - // of the vectors, but the empty angle - // brackets indicate that we simply take - // the default arguments (which are - // SparseMatrix@ and - // Vector@): + // Then we need the solver itself. The + // template parameters to the SolverCG + // class are the matrix type and the type + // of the vectors, but the empty angle + // brackets indicate that we simply take + // the default arguments (which are + // SparseMatrix@ and + // Vector@): SolverCG<> solver (solver_control); - // Now solve the system of equations. The - // CG solver takes a preconditioner as its - // fourth argument. We don't feel ready to - // delve into this yet, so we tell it to - // use the identity operation as - // preconditioner: + // Now solve the system of equations. The + // CG solver takes a preconditioner as its + // fourth argument. We don't feel ready to + // delve into this yet, so we tell it to + // use the identity operation as + // preconditioner: solver.solve (system_matrix, solution, system_rhs, - PreconditionIdentity()); - // Now that the solver has done its - // job, the solution variable - // contains the nodal values of the - // solution function. + PreconditionIdentity()); + // Now that the solver has done its + // job, the solution variable + // contains the nodal values of the + // solution function. } // @sect4{Step3::output_results} - // The last part of a typical finite - // element program is to output the - // results and maybe do some - // postprocessing (for example - // compute the maximal stress values - // at the boundary, or the average - // flux across the outflow, etc). We - // have no such postprocessing here, - // but we would like to write the - // solution to a file. + // The last part of a typical finite + // element program is to output the + // results and maybe do some + // postprocessing (for example + // compute the maximal stress values + // at the boundary, or the average + // flux across the outflow, etc). We + // have no such postprocessing here, + // but we would like to write the + // solution to a file. void Step3::output_results () const { - // To write the output to a file, - // we need an object which knows - // about output formats and the - // like. This is the DataOut class, - // and we need an object of that - // type: + // To write the output to a file, + // we need an object which knows + // about output formats and the + // like. This is the DataOut class, + // and we need an object of that + // type: DataOut<2> data_out; - // Now we have to tell it where to take the - // values from which it shall write. We - // tell it which DoFHandler object to - // use, and the solution vector (and - // the name by which the solution variable - // shall appear in the output file). If - // we had more than one vector which we - // would like to look at in the output (for - // example right hand sides, errors per - // cell, etc) we would add them as well: + // Now we have to tell it where to take the + // values from which it shall write. We + // tell it which DoFHandler object to + // use, and the solution vector (and + // the name by which the solution variable + // shall appear in the output file). If + // we had more than one vector which we + // would like to look at in the output (for + // example right hand sides, errors per + // cell, etc) we would add them as well: data_out.attach_dof_handler (dof_handler); data_out.add_data_vector (solution, "solution"); - // After the DataOut object knows - // which data it is to work on, we - // have to tell it to process them - // into something the back ends can - // handle. The reason is that we - // have separated the frontend - // (which knows about how to treat - // DoFHandler objects and data - // vectors) from the back end (which - // knows many different output formats) - // and use an intermediate data - // format to transfer data from the - // front- to the backend. The data - // is transformed into this - // intermediate format by the - // following function: + // After the DataOut object knows + // which data it is to work on, we + // have to tell it to process them + // into something the back ends can + // handle. The reason is that we + // have separated the frontend + // (which knows about how to treat + // DoFHandler objects and data + // vectors) from the back end (which + // knows many different output formats) + // and use an intermediate data + // format to transfer data from the + // front- to the backend. The data + // is transformed into this + // intermediate format by the + // following function: data_out.build_patches (); - // Now we have everything in place - // for the actual output. Just open - // a file and write the data into - // it, using GNUPLOT format (there - // are other functions which write - // their data in postscript, AVS, - // GMV, or some other format): + // Now we have everything in place + // for the actual output. Just open + // a file and write the data into + // it, using GNUPLOT format (there + // are other functions which write + // their data in postscript, AVS, + // GMV, or some other format): std::ofstream output ("solution.gpl"); data_out.write_gnuplot (output); } @@ -855,14 +855,14 @@ void Step3::output_results () const // @sect4{Step3::run} - // Finally, the last function of this class - // is the main function which calls all the - // other functions of the Step3 - // class. The order in which this is done - // resembles the order in which most finite - // element programs work. Since the names are - // mostly self-explanatory, there is not much - // to comment about: + // Finally, the last function of this class + // is the main function which calls all the + // other functions of the Step3 + // class. The order in which this is done + // resembles the order in which most finite + // element programs work. Since the names are + // mostly self-explanatory, there is not much + // to comment about: void Step3::run () { make_grid (); @@ -875,15 +875,15 @@ void Step3::run () // @sect3{The main function} - // This is the main function of the - // program. Since the concept of a - // main function is mostly a remnant - // from the pre-object era in C/C++ - // programming, it often does not - // much more than creating an object - // of the top-level class and calling - // its principle function. This is - // what is done here as well: + // This is the main function of the + // program. Since the concept of a + // main function is mostly a remnant + // from the pre-object era in C/C++ + // programming, it often does not + // much more than creating an object + // of the top-level class and calling + // its principle function. This is + // what is done here as well: int main () { Step3 laplace_problem; diff --git a/deal.II/examples/step-30/step-30.cc b/deal.II/examples/step-30/step-30.cc index 954bf76fae..6805f049f0 100644 --- a/deal.II/examples/step-30/step-30.cc +++ b/deal.II/examples/step-30/step-30.cc @@ -10,10 +10,10 @@ /* to the file deal.II/doc/license.html for the text and */ /* further information on this license. */ - // The deal.II include files have already - // been covered in previous examples - // and will thus not be further - // commented on. + // The deal.II include files have already + // been covered in previous examples + // and will thus not be further + // commented on. #include #include #include @@ -36,29 +36,29 @@ #include #include - // And this again is C++: + // And this again is C++: #include #include - // The last step is as in all - // previous programs: + // The last step is as in all + // previous programs: namespace Step30 { using namespace dealii; - // @sect3{Equation data} - // - // The classes describing equation data and the - // actual assembly of individual terms are - // almost entirely copied from step-12. We will - // comment on differences. + // @sect3{Equation data} + // + // The classes describing equation data and the + // actual assembly of individual terms are + // almost entirely copied from step-12. We will + // comment on differences. template class RHS: public Function { public: virtual void value_list (const std::vector > &points, - std::vector &values, - const unsigned int component=0) const; + std::vector &values, + const unsigned int component=0) const; }; @@ -67,8 +67,8 @@ namespace Step30 { public: virtual void value_list (const std::vector > &points, - std::vector &values, - const unsigned int component=0) const; + std::vector &values, + const unsigned int component=0) const; }; @@ -78,93 +78,93 @@ namespace Step30 public: Beta () {} void value_list (const std::vector > &points, - std::vector > &values) const; + std::vector > &values) const; }; template void RHS::value_list(const std::vector > &points, - std::vector &values, - const unsigned int) const + std::vector &values, + const unsigned int) const { Assert(values.size()==points.size(), - ExcDimensionMismatch(values.size(),points.size())); + ExcDimensionMismatch(values.size(),points.size())); for (unsigned int i=0; i void Beta::value_list(const std::vector > &points, - std::vector > &values) const + std::vector > &values) const { Assert(values.size()==points.size(), - ExcDimensionMismatch(values.size(),points.size())); + ExcDimensionMismatch(values.size(),points.size())); for (unsigned int i=0; i 0) - { - values[i](0) = -points[i](1); - values[i](1) = points[i](0); - } - else - { - values[i] = Point(); - values[i](0) = -points[i](1); - } + if (points[i](0) > 0) + { + values[i](0) = -points[i](1); + values[i](1) = points[i](0); + } + else + { + values[i] = Point(); + values[i](0) = -points[i](1); + } } } template void BoundaryValues::value_list(const std::vector > &points, - std::vector &values, - const unsigned int) const + std::vector &values, + const unsigned int) const { Assert(values.size()==points.size(), - ExcDimensionMismatch(values.size(),points.size())); + ExcDimensionMismatch(values.size(),points.size())); for (unsigned int i=0; i class DGTransportEquation { @@ -172,19 +172,19 @@ namespace Step30 DGTransportEquation(); void assemble_cell_term(const FEValues& fe_v, - FullMatrix &ui_vi_matrix, - Vector &cell_vector) const; + FullMatrix &ui_vi_matrix, + Vector &cell_vector) const; void assemble_boundary_term(const FEFaceValues& fe_v, - FullMatrix &ui_vi_matrix, - Vector &cell_vector) const; + FullMatrix &ui_vi_matrix, + Vector &cell_vector) const; void assemble_face_term2(const FEFaceValuesBase& fe_v, - const FEFaceValuesBase& fe_v_neighbor, - FullMatrix &ui_vi_matrix, - FullMatrix &ue_vi_matrix, - FullMatrix &ui_ve_matrix, - FullMatrix &ue_ve_matrix) const; + const FEFaceValuesBase& fe_v_neighbor, + FullMatrix &ui_vi_matrix, + FullMatrix &ue_vi_matrix, + FullMatrix &ui_ve_matrix, + FullMatrix &ue_ve_matrix) const; private: const Beta beta_function; const RHS rhs_function; @@ -192,32 +192,32 @@ namespace Step30 }; - // Likewise, the constructor of the - // class as well as the functions - // assembling the terms corresponding - // to cell interiors and boundary - // faces are unchanged from - // before. The function that - // assembles face terms between cells - // also did not change because all it - // does is operate on two objects of - // type FEFaceValuesBase (which is - // the base class of both - // FEFaceValues and - // FESubfaceValues). Where these - // objects come from, i.e. how they - // are initialized, is of no concern - // to this function: it simply - // assumes that the quadrature points - // on faces or subfaces represented - // by the two objects correspond to - // the same points in physical space. + // Likewise, the constructor of the + // class as well as the functions + // assembling the terms corresponding + // to cell interiors and boundary + // faces are unchanged from + // before. The function that + // assembles face terms between cells + // also did not change because all it + // does is operate on two objects of + // type FEFaceValuesBase (which is + // the base class of both + // FEFaceValues and + // FESubfaceValues). Where these + // objects come from, i.e. how they + // are initialized, is of no concern + // to this function: it simply + // assumes that the quadrature points + // on faces or subfaces represented + // by the two objects correspond to + // the same points in physical space. template DGTransportEquation::DGTransportEquation () - : - beta_function (), - rhs_function (), - boundary_function () + : + beta_function (), + rhs_function (), + boundary_function () {} @@ -237,14 +237,14 @@ namespace Step30 for (unsigned int point=0; point0) - for (unsigned int i=0; i0) + for (unsigned int i=0; i0) - { - for (unsigned int i=0; i0) + { + for (unsigned int i=0; i class DGMethod { @@ -369,29 +369,29 @@ namespace Step30 Triangulation triangulation; const MappingQ1 mapping; - // Again we want to use DG elements of - // degree 1 (but this is only specified in - // the constructor). If you want to use a - // DG method of a different degree replace - // 1 in the constructor by the new degree. + // Again we want to use DG elements of + // degree 1 (but this is only specified in + // the constructor). If you want to use a + // DG method of a different degree replace + // 1 in the constructor by the new degree. const unsigned int degree; FE_DGQ fe; DoFHandler dof_handler; SparsityPattern sparsity_pattern; SparseMatrix system_matrix; - // This is new, the threshold value used in - // the evaluation of the anisotropic jump - // indicator explained in the - // introduction. Its value is set to 3.0 in - // the constructor, but it can easily be - // changed to a different value greater - // than 1. + // This is new, the threshold value used in + // the evaluation of the anisotropic jump + // indicator explained in the + // introduction. Its value is set to 3.0 in + // the constructor, but it can easily be + // changed to a different value greater + // than 1. const double anisotropic_threshold_ratio; - // This is a bool flag indicating whether - // anisotropic refinement shall be used or - // not. It is set by the constructor, which - // takes an argument of the same name. + // This is a bool flag indicating whether + // anisotropic refinement shall be used or + // not. It is set by the constructor, which + // takes an argument of the same name. const bool anisotropic; const QGauss quadrature; @@ -406,41 +406,41 @@ namespace Step30 template DGMethod::DGMethod (const bool anisotropic) - : - mapping (), - // Change here for DG - // methods of - // different degrees. - degree(1), - fe (degree), - dof_handler (triangulation), - anisotropic_threshold_ratio(3.), - anisotropic(anisotropic), - // As beta is a - // linear function, - // we can choose the - // degree of the - // quadrature for - // which the - // resulting - // integration is - // correct. Thus, we - // choose to use - // degree+1 - // gauss points, - // which enables us - // to integrate - // exactly - // polynomials of - // degree - // 2*degree+1, - // enough for all the - // integrals we will - // perform in this - // program. - quadrature (degree+1), - face_quadrature (degree+1), - dg () + : + mapping (), + // Change here for DG + // methods of + // different degrees. + degree(1), + fe (degree), + dof_handler (triangulation), + anisotropic_threshold_ratio(3.), + anisotropic(anisotropic), + // As beta is a + // linear function, + // we can choose the + // degree of the + // quadrature for + // which the + // resulting + // integration is + // correct. Thus, we + // choose to use + // degree+1 + // gauss points, + // which enables us + // to integrate + // exactly + // polynomials of + // degree + // 2*degree+1, + // enough for all the + // integrals we will + // perform in this + // program. + quadrature (degree+1), + face_quadrature (degree+1), + dg () {} @@ -456,9 +456,9 @@ namespace Step30 { dof_handler.distribute_dofs (fe); sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - (GeometryInfo::faces_per_cell - *GeometryInfo::max_children_per_face+1)*fe.dofs_per_cell); + dof_handler.n_dofs(), + (GeometryInfo::faces_per_cell + *GeometryInfo::max_children_per_face+1)*fe.dofs_per_cell); DoFTools::make_flux_sparsity_pattern (dof_handler, sparsity_pattern); @@ -471,22 +471,22 @@ namespace Step30 } - // @sect4{Function: assemble_system2} - // - // We proceed with the - // assemble_system2 function that - // implements the DG discretization in its - // second version. This function is very - // similar to the assemble_system2 - // function from step-12, even the four cases - // considered for the neighbor-relations of a - // cell are the same, namely a) cell is at the - // boundary, b) there are finer neighboring - // cells, c) the neighbor is neither coarser - // nor finer and d) the neighbor is coarser. - // However, the way in which we decide upon - // which case we have are modified in the way - // described in the introduction. + // @sect4{Function: assemble_system2} + // + // We proceed with the + // assemble_system2 function that + // implements the DG discretization in its + // second version. This function is very + // similar to the assemble_system2 + // function from step-12, even the four cases + // considered for the neighbor-relations of a + // cell are the same, namely a) cell is at the + // boundary, b) there are finer neighboring + // cells, c) the neighbor is neither coarser + // nor finer and d) the neighbor is coarser. + // However, the way in which we decide upon + // which case we have are modified in the way + // described in the introduction. template void DGMethod::assemble_system2 () { @@ -495,14 +495,14 @@ namespace Step30 std::vector dofs_neighbor (dofs_per_cell); const UpdateFlags update_flags = update_values - | update_gradients - | update_quadrature_points - | update_JxW_values; + | update_gradients + | update_quadrature_points + | update_JxW_values; const UpdateFlags face_update_flags = update_values - | update_quadrature_points - | update_JxW_values - | update_normal_vectors; + | update_quadrature_points + | update_JxW_values + | update_normal_vectors; const UpdateFlags neighbor_face_update_flags = update_values; @@ -529,202 +529,202 @@ namespace Step30 endc = dof_handler.end(); for (;cell!=endc; ++cell) { - ui_vi_matrix = 0; - cell_vector = 0; - - fe_v.reinit (cell); - - dg.assemble_cell_term(fe_v, - ui_vi_matrix, - cell_vector); - - cell->get_dof_indices (dofs); - - for (unsigned int face_no=0; face_no::faces_per_cell; ++face_no) - { - typename DoFHandler::face_iterator face= - cell->face(face_no); - - // Case a) - if (face->at_boundary()) - { - fe_v_face.reinit (cell, face_no); - - dg.assemble_boundary_term(fe_v_face, - ui_vi_matrix, - cell_vector); - } - else - { - Assert (cell->neighbor(face_no).state() == IteratorState::valid, - ExcInternalError()); - typename DoFHandler::cell_iterator neighbor= - cell->neighbor(face_no); - // Case b), we decide that there - // are finer cells as neighbors - // by asking the face, whether it - // has children. if so, then - // there must also be finer cells - // which are children or farther - // offsprings of our neighbor. - if (face->has_children()) - { - // We need to know, which of - // the neighbors faces points - // in the direction of our - // cell. Using the @p - // neighbor_face_no function - // we get this information - // for both coarser and - // non-coarser neighbors. - const unsigned int neighbor2= - cell->neighbor_face_no(face_no); - - // Now we loop over all - // subfaces, i.e. the - // children and possibly - // grandchildren of the - // current face. - for (unsigned int subface_no=0; - subface_nonumber_of_children(); ++subface_no) - { - // To get the cell behind - // the current subface we - // can use the @p - // neighbor_child_on_subface - // function. it takes - // care of all the - // complicated situations - // of anisotropic - // refinement and - // non-standard faces. - typename DoFHandler::cell_iterator neighbor_child - = cell->neighbor_child_on_subface (face_no, subface_no); - Assert (!neighbor_child->has_children(), ExcInternalError()); - - // The remaining part of - // this case is - // unchanged. - ue_vi_matrix = 0; - ui_ve_matrix = 0; - ue_ve_matrix = 0; - - fe_v_subface.reinit (cell, face_no, subface_no); - fe_v_face_neighbor.reinit (neighbor_child, neighbor2); - - dg.assemble_face_term2(fe_v_subface, - fe_v_face_neighbor, - ui_vi_matrix, - ue_vi_matrix, - ui_ve_matrix, - ue_ve_matrix); - - neighbor_child->get_dof_indices (dofs_neighbor); - - for (unsigned int i=0; ineighbor_is_coarser(face_no) && - (neighbor->index() > cell->index() || - (neighbor->level() < cell->level() && - neighbor->index() == cell->index()))) - { - // Here we know, that the - // neigbor is not coarser - // so we can use the - // usual @p - // neighbor_of_neighbor - // function. However, we - // could also use the - // more general @p - // neighbor_face_no - // function. - const unsigned int neighbor2=cell->neighbor_of_neighbor(face_no); - - ue_vi_matrix = 0; - ui_ve_matrix = 0; - ue_ve_matrix = 0; - - fe_v_face.reinit (cell, face_no); - fe_v_face_neighbor.reinit (neighbor, neighbor2); - - dg.assemble_face_term2(fe_v_face, - fe_v_face_neighbor, - ui_vi_matrix, - ue_vi_matrix, - ui_ve_matrix, - ue_ve_matrix); - - neighbor->get_dof_indices (dofs_neighbor); - - for (unsigned int i=0; iget_dof_indices (dofs); + + for (unsigned int face_no=0; face_no::faces_per_cell; ++face_no) + { + typename DoFHandler::face_iterator face= + cell->face(face_no); + + // Case a) + if (face->at_boundary()) + { + fe_v_face.reinit (cell, face_no); + + dg.assemble_boundary_term(fe_v_face, + ui_vi_matrix, + cell_vector); + } + else + { + Assert (cell->neighbor(face_no).state() == IteratorState::valid, + ExcInternalError()); + typename DoFHandler::cell_iterator neighbor= + cell->neighbor(face_no); + // Case b), we decide that there + // are finer cells as neighbors + // by asking the face, whether it + // has children. if so, then + // there must also be finer cells + // which are children or farther + // offsprings of our neighbor. + if (face->has_children()) + { + // We need to know, which of + // the neighbors faces points + // in the direction of our + // cell. Using the @p + // neighbor_face_no function + // we get this information + // for both coarser and + // non-coarser neighbors. + const unsigned int neighbor2= + cell->neighbor_face_no(face_no); + + // Now we loop over all + // subfaces, i.e. the + // children and possibly + // grandchildren of the + // current face. + for (unsigned int subface_no=0; + subface_nonumber_of_children(); ++subface_no) + { + // To get the cell behind + // the current subface we + // can use the @p + // neighbor_child_on_subface + // function. it takes + // care of all the + // complicated situations + // of anisotropic + // refinement and + // non-standard faces. + typename DoFHandler::cell_iterator neighbor_child + = cell->neighbor_child_on_subface (face_no, subface_no); + Assert (!neighbor_child->has_children(), ExcInternalError()); + + // The remaining part of + // this case is + // unchanged. + ue_vi_matrix = 0; + ui_ve_matrix = 0; + ue_ve_matrix = 0; + + fe_v_subface.reinit (cell, face_no, subface_no); + fe_v_face_neighbor.reinit (neighbor_child, neighbor2); + + dg.assemble_face_term2(fe_v_subface, + fe_v_face_neighbor, + ui_vi_matrix, + ue_vi_matrix, + ui_ve_matrix, + ue_ve_matrix); + + neighbor_child->get_dof_indices (dofs_neighbor); + + for (unsigned int i=0; ineighbor_is_coarser(face_no) && + (neighbor->index() > cell->index() || + (neighbor->level() < cell->level() && + neighbor->index() == cell->index()))) + { + // Here we know, that the + // neigbor is not coarser + // so we can use the + // usual @p + // neighbor_of_neighbor + // function. However, we + // could also use the + // more general @p + // neighbor_face_no + // function. + const unsigned int neighbor2=cell->neighbor_of_neighbor(face_no); + + ue_vi_matrix = 0; + ui_ve_matrix = 0; + ue_ve_matrix = 0; + + fe_v_face.reinit (cell, face_no); + fe_v_face_neighbor.reinit (neighbor, neighbor2); + + dg.assemble_face_term2(fe_v_face, + fe_v_face_neighbor, + ui_vi_matrix, + ue_vi_matrix, + ui_ve_matrix, + ue_ve_matrix); + + neighbor->get_dof_indices (dofs_neighbor); + + for (unsigned int i=0; i void DGMethod::solve (Vector &solution) { @@ -736,72 +736,72 @@ namespace Step30 preconditioner.initialize(system_matrix, fe.dofs_per_cell); solver.solve (system_matrix, solution, right_hand_side, - preconditioner); + preconditioner); } - // @sect3{Refinement} - // - // We refine the grid according to the same - // simple refinement criterion used in step-12, - // namely an approximation to the - // gradient of the solution. + // @sect3{Refinement} + // + // We refine the grid according to the same + // simple refinement criterion used in step-12, + // namely an approximation to the + // gradient of the solution. template void DGMethod::refine_grid () { Vector gradient_indicator (triangulation.n_active_cells()); - // We approximate the gradient, + // We approximate the gradient, DerivativeApproximation::approximate_gradient (mapping, - dof_handler, - solution2, - gradient_indicator); + dof_handler, + solution2, + gradient_indicator); - // and scale it to obtain an error indicator. + // and scale it to obtain an error indicator. typename DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), endc = dof_handler.end(); for (unsigned int cell_no=0; cell!=endc; ++cell, ++cell_no) gradient_indicator(cell_no)*=std::pow(cell->diameter(), 1+1.0*dim/2); - // Then we use this indicator to flag the 30 - // percent of the cells with highest error - // indicator to be refined. + // Then we use this indicator to flag the 30 + // percent of the cells with highest error + // indicator to be refined. GridRefinement::refine_and_coarsen_fixed_number (triangulation, - gradient_indicator, - 0.3, 0.1); - // Now the refinement flags are set for those - // cells with a large error indicator. If - // nothing is done to change this, those - // cells will be refined isotropically. If - // the @p anisotropic flag given to this - // function is set, we now call the - // set_anisotropic_flags() function, which - // uses the jump indicator to reset some of - // the refinement flags to anisotropic - // refinement. + gradient_indicator, + 0.3, 0.1); + // Now the refinement flags are set for those + // cells with a large error indicator. If + // nothing is done to change this, those + // cells will be refined isotropically. If + // the @p anisotropic flag given to this + // function is set, we now call the + // set_anisotropic_flags() function, which + // uses the jump indicator to reset some of + // the refinement flags to anisotropic + // refinement. if (anisotropic) set_anisotropic_flags(); - // Now execute the refinement considering - // anisotropic as well as isotropic - // refinement flags. + // Now execute the refinement considering + // anisotropic as well as isotropic + // refinement flags. triangulation.execute_coarsening_and_refinement (); } - // Once an error indicator has been evaluated - // and the cells with largerst error are - // flagged for refinement we want to loop over - // the flagged cells again to decide whether - // they need isotropic refinemnt or whether - // anisotropic refinement is more - // appropriate. This is the anisotropic jump - // indicator explained in the introduction. + // Once an error indicator has been evaluated + // and the cells with largerst error are + // flagged for refinement we want to loop over + // the flagged cells again to decide whether + // they need isotropic refinemnt or whether + // anisotropic refinement is more + // appropriate. This is the anisotropic jump + // indicator explained in the introduction. template void DGMethod::set_anisotropic_flags () { - // We want to evaluate the jump over faces of - // the flagged cells, so we need some objects - // to evaluate values of the solution on - // faces. + // We want to evaluate the jump over faces of + // the flagged cells, so we need some objects + // to evaluate values of the solution on + // faces. UpdateFlags face_update_flags = UpdateFlags(update_values | update_JxW_values); @@ -809,234 +809,234 @@ namespace Step30 FESubfaceValues fe_v_subface (mapping, fe, face_quadrature, face_update_flags); FEFaceValues fe_v_face_neighbor (mapping, fe, face_quadrature, update_values); - // Now we need to loop over all active cells. + // Now we need to loop over all active cells. typename DoFHandler::active_cell_iterator cell=dof_handler.begin_active(), - endc=dof_handler.end(); + endc=dof_handler.end(); for (; cell!=endc; ++cell) - // We only need to consider cells which are - // flaged for refinement. + // We only need to consider cells which are + // flaged for refinement. if (cell->refine_flag_set()) - { - Point jump; - Point area; - - for (unsigned int face_no=0; face_no::faces_per_cell; ++face_no) - { - typename DoFHandler::face_iterator face = cell->face(face_no); - - if (!face->at_boundary()) - { - Assert (cell->neighbor(face_no).state() == IteratorState::valid, ExcInternalError()); - typename DoFHandler::cell_iterator neighbor = cell->neighbor(face_no); - - std::vector u (fe_v_face.n_quadrature_points); - std::vector u_neighbor (fe_v_face.n_quadrature_points); - - // The four cases of different - // neighbor relations senn in - // the assembly routines are - // repeated much in the same - // way here. - if (face->has_children()) - { - // The neighbor is refined. - // First we store the - // information, which of - // the neighbor's faces - // points in the direction - // of our current - // cell. This property is - // inherited to the - // children. - unsigned int neighbor2=cell->neighbor_face_no(face_no); - // Now we loop over all subfaces, - for (unsigned int subface_no=0; subface_nonumber_of_children(); ++subface_no) - { - // get an iterator - // pointing to the cell - // behind the present - // subface... - typename DoFHandler::cell_iterator neighbor_child = cell->neighbor_child_on_subface(face_no,subface_no); - Assert (!neighbor_child->has_children(), ExcInternalError()); - // ... and reinit the - // respective - // FEFaceValues und - // FESubFaceValues - // objects. - fe_v_subface.reinit (cell, face_no, subface_no); - fe_v_face_neighbor.reinit (neighbor_child, neighbor2); - // We obtain the function values - fe_v_subface.get_function_values(solution2, u); - fe_v_face_neighbor.get_function_values(solution2, u_neighbor); - // as well as the - // quadrature weights, - // multiplied by the - // jacobian determinant. - const std::vector &JxW = fe_v_subface.get_JxW_values (); - // Now we loop over all - // quadrature points - for (unsigned int x=0; xdim - // components. - jump[face_no/2]+=std::fabs(u[x]-u_neighbor[x])*JxW[x]; - // We also sum up - // the scaled - // weights to - // obtain the - // measure of the - // face. - area[face_no/2]+=JxW[x]; - } - } - } - else - { - if (!cell->neighbor_is_coarser(face_no)) - { - // Our current cell and - // the neighbor have - // the same refinement - // along the face under - // consideration. Apart - // from that, we do - // much the same as - // with one of the - // subcells in the - // above case. - unsigned int neighbor2=cell->neighbor_of_neighbor(face_no); - - fe_v_face.reinit (cell, face_no); - fe_v_face_neighbor.reinit (neighbor, neighbor2); - - fe_v_face.get_function_values(solution2, u); - fe_v_face_neighbor.get_function_values(solution2, u_neighbor); - - const std::vector &JxW = fe_v_face.get_JxW_values (); - - for (unsigned int x=0; x neighbor_face_subface - = cell->neighbor_of_coarser_neighbor(face_no); - Assert (neighbor_face_subface.first::faces_per_cell, ExcInternalError()); - Assert (neighbor_face_subface.secondface(neighbor_face_subface.first)->number_of_children(), - ExcInternalError()); - Assert (neighbor->neighbor_child_on_subface(neighbor_face_subface.first, neighbor_face_subface.second) - == cell, ExcInternalError()); - - fe_v_face.reinit (cell, face_no); - fe_v_subface.reinit (neighbor, neighbor_face_subface.first, - neighbor_face_subface.second); - - fe_v_face.get_function_values(solution2, u); - fe_v_subface.get_function_values(solution2, u_neighbor); - - const std::vector &JxW = fe_v_face.get_JxW_values (); - - for (unsigned int x=0; xdim - // coordinate directions of the unit - // cell and compare the average jump - // over the faces orthogional to that - // direction with the average jumnps - // over faces orthogonal to the - // remining direction(s). If the first - // is larger than the latter by a given - // factor, we refine only along hat - // axis. Otherwise we leave the - // refinement flag unchanged, resulting - // in isotropic refinement. - for (unsigned int i=0; i anisotropic_threshold_ratio*(sum_of_average_jumps-average_jumps[i])) - cell->set_refine_flag(RefinementCase::cut_axis(i)); - } + { + Point jump; + Point area; + + for (unsigned int face_no=0; face_no::faces_per_cell; ++face_no) + { + typename DoFHandler::face_iterator face = cell->face(face_no); + + if (!face->at_boundary()) + { + Assert (cell->neighbor(face_no).state() == IteratorState::valid, ExcInternalError()); + typename DoFHandler::cell_iterator neighbor = cell->neighbor(face_no); + + std::vector u (fe_v_face.n_quadrature_points); + std::vector u_neighbor (fe_v_face.n_quadrature_points); + + // The four cases of different + // neighbor relations senn in + // the assembly routines are + // repeated much in the same + // way here. + if (face->has_children()) + { + // The neighbor is refined. + // First we store the + // information, which of + // the neighbor's faces + // points in the direction + // of our current + // cell. This property is + // inherited to the + // children. + unsigned int neighbor2=cell->neighbor_face_no(face_no); + // Now we loop over all subfaces, + for (unsigned int subface_no=0; subface_nonumber_of_children(); ++subface_no) + { + // get an iterator + // pointing to the cell + // behind the present + // subface... + typename DoFHandler::cell_iterator neighbor_child = cell->neighbor_child_on_subface(face_no,subface_no); + Assert (!neighbor_child->has_children(), ExcInternalError()); + // ... and reinit the + // respective + // FEFaceValues und + // FESubFaceValues + // objects. + fe_v_subface.reinit (cell, face_no, subface_no); + fe_v_face_neighbor.reinit (neighbor_child, neighbor2); + // We obtain the function values + fe_v_subface.get_function_values(solution2, u); + fe_v_face_neighbor.get_function_values(solution2, u_neighbor); + // as well as the + // quadrature weights, + // multiplied by the + // jacobian determinant. + const std::vector &JxW = fe_v_subface.get_JxW_values (); + // Now we loop over all + // quadrature points + for (unsigned int x=0; xdim + // components. + jump[face_no/2]+=std::fabs(u[x]-u_neighbor[x])*JxW[x]; + // We also sum up + // the scaled + // weights to + // obtain the + // measure of the + // face. + area[face_no/2]+=JxW[x]; + } + } + } + else + { + if (!cell->neighbor_is_coarser(face_no)) + { + // Our current cell and + // the neighbor have + // the same refinement + // along the face under + // consideration. Apart + // from that, we do + // much the same as + // with one of the + // subcells in the + // above case. + unsigned int neighbor2=cell->neighbor_of_neighbor(face_no); + + fe_v_face.reinit (cell, face_no); + fe_v_face_neighbor.reinit (neighbor, neighbor2); + + fe_v_face.get_function_values(solution2, u); + fe_v_face_neighbor.get_function_values(solution2, u_neighbor); + + const std::vector &JxW = fe_v_face.get_JxW_values (); + + for (unsigned int x=0; x neighbor_face_subface + = cell->neighbor_of_coarser_neighbor(face_no); + Assert (neighbor_face_subface.first::faces_per_cell, ExcInternalError()); + Assert (neighbor_face_subface.secondface(neighbor_face_subface.first)->number_of_children(), + ExcInternalError()); + Assert (neighbor->neighbor_child_on_subface(neighbor_face_subface.first, neighbor_face_subface.second) + == cell, ExcInternalError()); + + fe_v_face.reinit (cell, face_no); + fe_v_subface.reinit (neighbor, neighbor_face_subface.first, + neighbor_face_subface.second); + + fe_v_face.get_function_values(solution2, u); + fe_v_subface.get_function_values(solution2, u_neighbor); + + const std::vector &JxW = fe_v_face.get_JxW_values (); + + for (unsigned int x=0; xdim + // coordinate directions of the unit + // cell and compare the average jump + // over the faces orthogional to that + // direction with the average jumnps + // over faces orthogonal to the + // remining direction(s). If the first + // is larger than the latter by a given + // factor, we refine only along hat + // axis. Otherwise we leave the + // refinement flag unchanged, resulting + // in isotropic refinement. + for (unsigned int i=0; i anisotropic_threshold_ratio*(sum_of_average_jumps-average_jumps[i])) + cell->set_refine_flag(RefinementCase::cut_axis(i)); + } } - // @sect3{The Rest} - // - // The remaining part of the program is again - // unmodified. Only the creation of the - // original triangulation is changed in order - // to reproduce the new domain. + // @sect3{The Rest} + // + // The remaining part of the program is again + // unmodified. Only the creation of the + // original triangulation is changed in order + // to reproduce the new domain. template void DGMethod::output_results (const unsigned int cycle) const { @@ -1073,7 +1073,7 @@ namespace Step30 filename += refine_type + ".gnuplot"; std::cout << "Writing solution to <" << filename << ">..." - << std::endl; + << std::endl; std::ofstream gnuplot_output (filename.c_str()); DataOut data_out; @@ -1091,51 +1091,51 @@ namespace Step30 { for (unsigned int cycle=0; cycle<6; ++cycle) { - std::cout << "Cycle " << cycle << ':' << std::endl; - - if (cycle == 0) - { - // Create the rectangular domain. - Point p1,p2; - p1(0)=0; - p1(0)=-1; - for (unsigned int i=0; i repetitions(dim,1); - repetitions[0]=2; - GridGenerator::subdivided_hyper_rectangle (triangulation, - repetitions, - p1, - p2); - - triangulation.refine_global (5-dim); - } - else - refine_grid (); - - - std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl; - - setup_system (); - - std::cout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl; - - Timer assemble_timer; - assemble_system2 (); - std::cout << "Time of assemble_system2: " - << assemble_timer() - << std::endl; - solve (solution2); - - output_results (cycle); + std::cout << "Cycle " << cycle << ':' << std::endl; + + if (cycle == 0) + { + // Create the rectangular domain. + Point p1,p2; + p1(0)=0; + p1(0)=-1; + for (unsigned int i=0; i repetitions(dim,1); + repetitions[0]=2; + GridGenerator::subdivided_hyper_rectangle (triangulation, + repetitions, + p1, + p2); + + triangulation.refine_global (5-dim); + } + else + refine_grid (); + + + std::cout << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl; + + setup_system (); + + std::cout << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << std::endl; + + Timer assemble_timer; + assemble_system2 (); + std::cout << "Time of assemble_system2: " + << assemble_timer() + << std::endl; + solve (solution2); + + output_results (cycle); } } } @@ -1149,51 +1149,51 @@ int main () using namespace dealii; using namespace Step30; - // If you want to run the program in 3D, - // simply change the following line to - // const unsigned int dim = 3;. + // If you want to run the program in 3D, + // simply change the following line to + // const unsigned int dim = 3;. const unsigned int dim = 2; { - // First, we perform a run with - // isotropic refinement. - std::cout << "Performing a " << dim << "D run with isotropic refinement..." << std::endl - << "------------------------------------------------" << std::endl; - DGMethod dgmethod_iso(false); - dgmethod_iso.run (); + // First, we perform a run with + // isotropic refinement. + std::cout << "Performing a " << dim << "D run with isotropic refinement..." << std::endl + << "------------------------------------------------" << std::endl; + DGMethod dgmethod_iso(false); + dgmethod_iso.run (); } { - // Now we do a second run, this time - // with anisotropic refinement. - std::cout << std::endl - << "Performing a " << dim << "D run with anisotropic refinement..." << std::endl - << "--------------------------------------------------" << std::endl; - DGMethod dgmethod_aniso(true); - dgmethod_aniso.run (); + // Now we do a second run, this time + // with anisotropic refinement. + std::cout << std::endl + << "Performing a " << dim << "D run with anisotropic refinement..." << std::endl + << "--------------------------------------------------" << std::endl; + DGMethod dgmethod_aniso(true); + dgmethod_aniso.run (); } } catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; }; diff --git a/deal.II/examples/step-31/step-31.cc b/deal.II/examples/step-31/step-31.cc index de61b654cc..179522b742 100644 --- a/deal.II/examples/step-31/step-31.cc +++ b/deal.II/examples/step-31/step-31.cc @@ -1052,26 +1052,26 @@ namespace Step31 // compared to the velocity portion, so the // additional work does not pay off. // - // We then proceed with the generation of the - // hanging node constraints that arise from - // adaptive grid refinement for both - // DoFHandler objects. For the velocity, we - // impose no-flux boundary conditions - // $\mathbf{u}\cdot \mathbf{n}=0$ by adding - // constraints to the object that already - // stores the hanging node constraints - // matrix. The second parameter in the - // function describes the first of the - // velocity components in the total dof - // vector, which is zero here. The variable - // no_normal_flux_boundaries - // denotes the boundary indicators for which - // to set the no flux boundary conditions; - // here, this is boundary indicator zero. - // - // After having done so, we count the number - // of degrees of freedom in the various - // blocks: + // We then proceed with the generation of the + // hanging node constraints that arise from + // adaptive grid refinement for both + // DoFHandler objects. For the velocity, we + // impose no-flux boundary conditions + // $\mathbf{u}\cdot \mathbf{n}=0$ by adding + // constraints to the object that already + // stores the hanging node constraints + // matrix. The second parameter in the + // function describes the first of the + // velocity components in the total dof + // vector, which is zero here. The variable + // no_normal_flux_boundaries + // denotes the boundary indicators for which + // to set the no flux boundary conditions; + // here, this is boundary indicator zero. + // + // After having done so, we count the number + // of degrees of freedom in the various + // blocks: template void BoussinesqFlowProblem::setup_dofs () { @@ -1084,12 +1084,12 @@ namespace Step31 stokes_constraints.clear (); DoFTools::make_hanging_node_constraints (stokes_dof_handler, - stokes_constraints); + stokes_constraints); std::set no_normal_flux_boundaries; no_normal_flux_boundaries.insert (0); VectorTools::compute_no_normal_flux_constraints (stokes_dof_handler, 0, - no_normal_flux_boundaries, - stokes_constraints); + no_normal_flux_boundaries, + stokes_constraints); stokes_constraints.close (); } { @@ -1097,102 +1097,102 @@ namespace Step31 temperature_constraints.clear (); DoFTools::make_hanging_node_constraints (temperature_dof_handler, - temperature_constraints); + temperature_constraints); temperature_constraints.close (); } std::vector stokes_dofs_per_block (2); DoFTools::count_dofs_per_block (stokes_dof_handler, stokes_dofs_per_block, - stokes_sub_blocks); + stokes_sub_blocks); const unsigned int n_u = stokes_dofs_per_block[0], - n_p = stokes_dofs_per_block[1], - n_T = temperature_dof_handler.n_dofs(); + n_p = stokes_dofs_per_block[1], + n_T = temperature_dof_handler.n_dofs(); std::cout << "Number of active cells: " - << triangulation.n_active_cells() - << " (on " - << triangulation.n_levels() - << " levels)" - << std::endl - << "Number of degrees of freedom: " - << n_u + n_p + n_T - << " (" << n_u << '+' << n_p << '+'<< n_T <<')' - << std::endl - << std::endl; - - // The next step is to create the sparsity - // pattern for the Stokes and temperature - // system matrices as well as the - // preconditioner matrix from which we - // build the Stokes preconditioner. As in - // step-22, we choose to create the pattern - // not as in the first few tutorial - // programs, but by using the blocked - // version of CompressedSimpleSparsityPattern. - // The reason for doing this is mainly - // memory, that is, the SparsityPattern - // class would consume too much memory when - // used in three spatial dimensions as we - // intend to do for this program. - // - // So, we first release the memory stored - // in the matrices, then set up an object - // of type - // BlockCompressedSimpleSparsityPattern - // consisting of $2\times 2$ blocks (for - // the Stokes system matrix and - // preconditioner) or - // CompressedSimpleSparsityPattern (for - // the temperature part). We then fill - // these objects with the nonzero - // pattern, taking into account that for - // the Stokes system matrix, there are no - // entries in the pressure-pressure block - // (but all velocity vector components - // couple with each other and with the - // pressure). Similarly, in the Stokes - // preconditioner matrix, only the - // diagonal blocks are nonzero, since we - // use the vector Laplacian as discussed - // in the introduction. This operator - // only couples each vector component of - // the Laplacian with itself, but not - // with the other vector - // components. (Application of the - // constraints resulting from the no-flux - // boundary conditions will couple vector - // components at the boundary again, - // however.) - // - // When generating the sparsity pattern, - // we directly apply the constraints from - // hanging nodes and no-flux boundary - // conditions. This approach was already - // used in step-27, but is different from - // the one in early tutorial programs - // where we first built the original - // sparsity pattern and only then added - // the entries resulting from - // constraints. The reason for doing so - // is that later during assembly we are - // going to distribute the constraints - // immediately when transferring local to - // global dofs. Consequently, there will - // be no data written at positions of - // constrained degrees of freedom, so we - // can let the - // DoFTools::make_sparsity_pattern - // function omit these entries by setting - // the last boolean flag to - // false. Once the sparsity - // pattern is ready, we can use it to - // initialize the Trilinos - // matrices. Since the Trilinos matrices - // store the sparsity pattern internally, - // there is no need to keep the sparsity - // pattern around after the - // initialization of the matrix. + << triangulation.n_active_cells() + << " (on " + << triangulation.n_levels() + << " levels)" + << std::endl + << "Number of degrees of freedom: " + << n_u + n_p + n_T + << " (" << n_u << '+' << n_p << '+'<< n_T <<')' + << std::endl + << std::endl; + + // The next step is to create the sparsity + // pattern for the Stokes and temperature + // system matrices as well as the + // preconditioner matrix from which we + // build the Stokes preconditioner. As in + // step-22, we choose to create the pattern + // not as in the first few tutorial + // programs, but by using the blocked + // version of CompressedSimpleSparsityPattern. + // The reason for doing this is mainly + // memory, that is, the SparsityPattern + // class would consume too much memory when + // used in three spatial dimensions as we + // intend to do for this program. + // + // So, we first release the memory stored + // in the matrices, then set up an object + // of type + // BlockCompressedSimpleSparsityPattern + // consisting of $2\times 2$ blocks (for + // the Stokes system matrix and + // preconditioner) or + // CompressedSimpleSparsityPattern (for + // the temperature part). We then fill + // these objects with the nonzero + // pattern, taking into account that for + // the Stokes system matrix, there are no + // entries in the pressure-pressure block + // (but all velocity vector components + // couple with each other and with the + // pressure). Similarly, in the Stokes + // preconditioner matrix, only the + // diagonal blocks are nonzero, since we + // use the vector Laplacian as discussed + // in the introduction. This operator + // only couples each vector component of + // the Laplacian with itself, but not + // with the other vector + // components. (Application of the + // constraints resulting from the no-flux + // boundary conditions will couple vector + // components at the boundary again, + // however.) + // + // When generating the sparsity pattern, + // we directly apply the constraints from + // hanging nodes and no-flux boundary + // conditions. This approach was already + // used in step-27, but is different from + // the one in early tutorial programs + // where we first built the original + // sparsity pattern and only then added + // the entries resulting from + // constraints. The reason for doing so + // is that later during assembly we are + // going to distribute the constraints + // immediately when transferring local to + // global dofs. Consequently, there will + // be no data written at positions of + // constrained degrees of freedom, so we + // can let the + // DoFTools::make_sparsity_pattern + // function omit these entries by setting + // the last boolean flag to + // false. Once the sparsity + // pattern is ready, we can use it to + // initialize the Trilinos + // matrices. Since the Trilinos matrices + // store the sparsity pattern internally, + // there is no need to keep the sparsity + // pattern around after the + // initialization of the matrix. stokes_block_sizes.resize (2); stokes_block_sizes[0] = n_u; stokes_block_sizes[1] = n_p; @@ -1211,14 +1211,14 @@ namespace Step31 Table<2,DoFTools::Coupling> coupling (dim+1, dim+1); for (unsigned int c=0; c coupling (dim+1, dim+1); for (unsigned int c=0; cphi_grad_u and - // phi_p are going to hold the - // values of the basis functions in order to - // faster build up the local matrices, as was - // already done in step-22. Before we start - // the loop over all active cells, we have to - // specify which components are pressure and - // which are velocity. + // @sect4{BoussinesqFlowProblem::assemble_stokes_preconditioner} + // + // This function assembles the matrix we use + // for preconditioning the Stokes + // system. What we need are a vector Laplace + // matrix on the velocity components and a + // mass matrix weighted by $\eta^{-1}$ on the + // pressure component. We start by generating + // a quadrature object of appropriate order, + // the FEValues object that can give values + // and gradients at the quadrature points + // (together with quadrature weights). Next + // we create data structures for the cell + // matrix and the relation between local and + // global DoFs. The vectors + // phi_grad_u and + // phi_p are going to hold the + // values of the basis functions in order to + // faster build up the local matrices, as was + // already done in step-22. Before we start + // the loop over all active cells, we have to + // specify which components are pressure and + // which are velocity. template void BoussinesqFlowProblem::assemble_stokes_preconditioner () @@ -1339,9 +1339,9 @@ namespace Step31 const QGauss quadrature_formula(stokes_degree+2); FEValues stokes_fe_values (stokes_fe, quadrature_formula, - update_JxW_values | - update_values | - update_gradients); + update_JxW_values | + update_values | + update_gradients); const unsigned int dofs_per_cell = stokes_fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -1360,100 +1360,100 @@ namespace Step31 endc = stokes_dof_handler.end(); for (; cell!=endc; ++cell) { - stokes_fe_values.reinit (cell); - local_matrix = 0; - - // The creation of the local matrix is - // rather simple. There are only a - // Laplace term (on the velocity) and a - // mass matrix weighted by $\eta^{-1}$ - // to be generated, so the creation of - // the local matrix is done in two - // lines. Once the local matrix is - // ready (loop over rows and columns in - // the local matrix on each quadrature - // point), we get the local DoF indices - // and write the local information into - // the global matrix. We do this as in - // step-27, i.e. we directly apply the - // constraints from hanging nodes - // locally. By doing so, we don't have - // to do that afterwards, and we don't - // also write into entries of the - // matrix that will actually be set to - // zero again later when eliminating - // constraints. - for (unsigned int q=0; qget_dof_indices (local_dof_indices); - stokes_constraints.distribute_local_to_global (local_matrix, - local_dof_indices, - stokes_preconditioner_matrix); + stokes_fe_values.reinit (cell); + local_matrix = 0; + + // The creation of the local matrix is + // rather simple. There are only a + // Laplace term (on the velocity) and a + // mass matrix weighted by $\eta^{-1}$ + // to be generated, so the creation of + // the local matrix is done in two + // lines. Once the local matrix is + // ready (loop over rows and columns in + // the local matrix on each quadrature + // point), we get the local DoF indices + // and write the local information into + // the global matrix. We do this as in + // step-27, i.e. we directly apply the + // constraints from hanging nodes + // locally. By doing so, we don't have + // to do that afterwards, and we don't + // also write into entries of the + // matrix that will actually be set to + // zero again later when eliminating + // constraints. + for (unsigned int q=0; qget_dof_indices (local_dof_indices); + stokes_constraints.distribute_local_to_global (local_matrix, + local_dof_indices, + stokes_preconditioner_matrix); } } - // @sect4{BoussinesqFlowProblem::build_stokes_preconditioner} - // - // This function generates the inner - // preconditioners that are going to be used - // for the Schur complement block - // preconditioner. Since the preconditioners - // need only to be regenerated when the - // matrices change, this function does not - // have to do anything in case the matrices - // have not changed (i.e., the flag - // rebuild_stokes_preconditioner - // has the value - // false). Otherwise its first - // task is to call - // assemble_stokes_preconditioner - // to generate the preconditioner matrices. - // - // Next, we set up the preconditioner for - // the velocity-velocity matrix - // A. As explained in the - // introduction, we are going to use an - // AMG preconditioner based on a vector - // Laplace matrix $\hat{A}$ (which is - // spectrally close to the Stokes matrix - // A). Usually, the - // TrilinosWrappers::PreconditionAMG - // class can be seen as a good black-box - // preconditioner which does not need any - // special knowledge. In this case, - // however, we have to be careful: since - // we build an AMG for a vector problem, - // we have to tell the preconditioner - // setup which dofs belong to which - // vector component. We do this using the - // function - // DoFTools::extract_constant_modes, a - // function that generates a set of - // dim vectors, where each one - // has ones in the respective component - // of the vector problem and zeros - // elsewhere. Hence, these are the - // constant modes on each component, - // which explains the name of the - // variable. + // @sect4{BoussinesqFlowProblem::build_stokes_preconditioner} + // + // This function generates the inner + // preconditioners that are going to be used + // for the Schur complement block + // preconditioner. Since the preconditioners + // need only to be regenerated when the + // matrices change, this function does not + // have to do anything in case the matrices + // have not changed (i.e., the flag + // rebuild_stokes_preconditioner + // has the value + // false). Otherwise its first + // task is to call + // assemble_stokes_preconditioner + // to generate the preconditioner matrices. + // + // Next, we set up the preconditioner for + // the velocity-velocity matrix + // A. As explained in the + // introduction, we are going to use an + // AMG preconditioner based on a vector + // Laplace matrix $\hat{A}$ (which is + // spectrally close to the Stokes matrix + // A). Usually, the + // TrilinosWrappers::PreconditionAMG + // class can be seen as a good black-box + // preconditioner which does not need any + // special knowledge. In this case, + // however, we have to be careful: since + // we build an AMG for a vector problem, + // we have to tell the preconditioner + // setup which dofs belong to which + // vector component. We do this using the + // function + // DoFTools::extract_constant_modes, a + // function that generates a set of + // dim vectors, where each one + // has ones in the respective component + // of the vector problem and zeros + // elsewhere. Hence, these are the + // constant modes on each component, + // which explains the name of the + // variable. template void BoussinesqFlowProblem::build_stokes_preconditioner () @@ -1466,79 +1466,79 @@ namespace Step31 assemble_stokes_preconditioner (); Amg_preconditioner = std_cxx1x::shared_ptr - (new TrilinosWrappers::PreconditionAMG()); + (new TrilinosWrappers::PreconditionAMG()); std::vector > constant_modes; std::vector velocity_components (dim+1,true); velocity_components[dim] = false; DoFTools::extract_constant_modes (stokes_dof_handler, velocity_components, - constant_modes); + constant_modes); TrilinosWrappers::PreconditionAMG::AdditionalData amg_data; amg_data.constant_modes = constant_modes; - // Next, we set some more options of the - // AMG preconditioner. In particular, we - // need to tell the AMG setup that we use - // quadratic basis functions for the - // velocity matrix (this implies more - // nonzero elements in the matrix, so - // that a more rubust algorithm needs to - // be chosen internally). Moreover, we - // want to be able to control how the - // coarsening structure is build up. The - // way the Trilinos smoothed aggregation - // AMG does this is to look which matrix - // entries are of similar size as the - // diagonal entry in order to - // algebraically build a coarse-grid - // structure. By setting the parameter - // aggregation_threshold to - // 0.02, we specify that all entries that - // are more than two precent of size of - // some diagonal pivots in that row - // should form one coarse grid - // point. This parameter is rather - // ad-hoc, and some fine-tuning of it can - // influence the performance of the - // preconditioner. As a rule of thumb, - // larger values of - // aggregation_threshold - // will decrease the number of - // iterations, but increase the costs per - // iteration. A look at the Trilinos - // documentation will provide more - // information on these parameters. With - // this data set, we then initialize the - // preconditioner with the matrix we want - // it to apply to. - // - // Finally, we also initialize the - // preconditioner for the inversion of - // the pressure mass matrix. This matrix - // is symmetric and well-behaved, so we - // can chose a simple preconditioner. We - // stick with an incomple Cholesky (IC) - // factorization preconditioner, which is - // designed for symmetric matrices. We - // could have also chosen an SSOR - // preconditioner with relaxation factor - // around 1.2, but IC is cheaper for our - // example. We wrap the preconditioners - // into a std_cxx1x::shared_ptr - // pointer, which makes it easier to - // recreate the preconditioner next time - // around since we do not have to care - // about destroying the previously used - // object. + // Next, we set some more options of the + // AMG preconditioner. In particular, we + // need to tell the AMG setup that we use + // quadratic basis functions for the + // velocity matrix (this implies more + // nonzero elements in the matrix, so + // that a more rubust algorithm needs to + // be chosen internally). Moreover, we + // want to be able to control how the + // coarsening structure is build up. The + // way the Trilinos smoothed aggregation + // AMG does this is to look which matrix + // entries are of similar size as the + // diagonal entry in order to + // algebraically build a coarse-grid + // structure. By setting the parameter + // aggregation_threshold to + // 0.02, we specify that all entries that + // are more than two precent of size of + // some diagonal pivots in that row + // should form one coarse grid + // point. This parameter is rather + // ad-hoc, and some fine-tuning of it can + // influence the performance of the + // preconditioner. As a rule of thumb, + // larger values of + // aggregation_threshold + // will decrease the number of + // iterations, but increase the costs per + // iteration. A look at the Trilinos + // documentation will provide more + // information on these parameters. With + // this data set, we then initialize the + // preconditioner with the matrix we want + // it to apply to. + // + // Finally, we also initialize the + // preconditioner for the inversion of + // the pressure mass matrix. This matrix + // is symmetric and well-behaved, so we + // can chose a simple preconditioner. We + // stick with an incomple Cholesky (IC) + // factorization preconditioner, which is + // designed for symmetric matrices. We + // could have also chosen an SSOR + // preconditioner with relaxation factor + // around 1.2, but IC is cheaper for our + // example. We wrap the preconditioners + // into a std_cxx1x::shared_ptr + // pointer, which makes it easier to + // recreate the preconditioner next time + // around since we do not have to care + // about destroying the previously used + // object. amg_data.elliptic = true; amg_data.higher_order_elements = true; amg_data.smoother_sweeps = 2; amg_data.aggregation_threshold = 0.02; Amg_preconditioner->initialize(stokes_preconditioner_matrix.block(0,0), - amg_data); + amg_data); Mp_preconditioner = std_cxx1x::shared_ptr - (new TrilinosWrappers::PreconditionIC()); + (new TrilinosWrappers::PreconditionIC()); Mp_preconditioner->initialize(stokes_preconditioner_matrix.block(1,1)); std::cout << std::endl; @@ -1548,67 +1548,67 @@ namespace Step31 - // @sect4{BoussinesqFlowProblem::assemble_stokes_system} - // - // The time lag scheme we use for advancing - // the coupled Stokes-temperature system - // forces us to split up the assembly (and - // the solution of linear systems) into two - // step. The first one is to create the - // Stokes system matrix and right hand - // side, and the second is to create matrix - // and right hand sides for the temperature - // dofs, which depends on the result of the - // linear system for the velocity. - // - // This function is called at the beginning - // of each time step. In the first time step - // or if the mesh has changed, indicated by - // the rebuild_stokes_matrix, we - // need to assemble the Stokes matrix; on the - // other hand, if the mesh hasn't changed and - // the matrix is already available, this is - // not necessary and all we need to do is - // assemble the right hand side vector which - // changes in each time step. - // - // Regarding the technical details of - // implementation, not much has changed from - // step-22. We reset matrix and vector, - // create a quadrature formula on the cells, - // and then create the respective FEValues - // object. For the update flags, we require - // basis function derivatives only in case of - // a full assembly, since they are not needed - // for the right hand side; as always, - // choosing the minimal set of flags - // depending on what is currently needed - // makes the call to FEValues::reinit further - // down in the program more efficient. - // - // There is one thing that needs to be - // commented – since we have a separate - // finite element and DoFHandler for the - // temperature, we need to generate a second - // FEValues object for the proper evaluation - // of the temperature solution. This isn't - // too complicated to realize here: just use - // the temperature structures and set an - // update flag for the basis function values - // which we need for evaluation of the - // temperature solution. The only important - // part to remember here is that the same - // quadrature formula is used for both - // FEValues objects to ensure that we get - // matching information when we loop over the - // quadrature points of the two objects. - // - // The declarations proceed with some - // shortcuts for array sizes, the creation - // of the local matrix and right hand side - // as well as the vector for the indices of - // the local dofs compared to the global - // system. + // @sect4{BoussinesqFlowProblem::assemble_stokes_system} + // + // The time lag scheme we use for advancing + // the coupled Stokes-temperature system + // forces us to split up the assembly (and + // the solution of linear systems) into two + // step. The first one is to create the + // Stokes system matrix and right hand + // side, and the second is to create matrix + // and right hand sides for the temperature + // dofs, which depends on the result of the + // linear system for the velocity. + // + // This function is called at the beginning + // of each time step. In the first time step + // or if the mesh has changed, indicated by + // the rebuild_stokes_matrix, we + // need to assemble the Stokes matrix; on the + // other hand, if the mesh hasn't changed and + // the matrix is already available, this is + // not necessary and all we need to do is + // assemble the right hand side vector which + // changes in each time step. + // + // Regarding the technical details of + // implementation, not much has changed from + // step-22. We reset matrix and vector, + // create a quadrature formula on the cells, + // and then create the respective FEValues + // object. For the update flags, we require + // basis function derivatives only in case of + // a full assembly, since they are not needed + // for the right hand side; as always, + // choosing the minimal set of flags + // depending on what is currently needed + // makes the call to FEValues::reinit further + // down in the program more efficient. + // + // There is one thing that needs to be + // commented – since we have a separate + // finite element and DoFHandler for the + // temperature, we need to generate a second + // FEValues object for the proper evaluation + // of the temperature solution. This isn't + // too complicated to realize here: just use + // the temperature structures and set an + // update flag for the basis function values + // which we need for evaluation of the + // temperature solution. The only important + // part to remember here is that the same + // quadrature formula is used for both + // FEValues objects to ensure that we get + // matching information when we loop over the + // quadrature points of the two objects. + // + // The declarations proceed with some + // shortcuts for array sizes, the creation + // of the local matrix and right hand side + // as well as the vector for the indices of + // the local dofs compared to the global + // system. template void BoussinesqFlowProblem::assemble_stokes_system () { @@ -1621,17 +1621,17 @@ namespace Step31 const QGauss quadrature_formula (stokes_degree+2); FEValues stokes_fe_values (stokes_fe, quadrature_formula, - update_values | - update_quadrature_points | - update_JxW_values | - (rebuild_stokes_matrix == true - ? - update_gradients - : - UpdateFlags(0))); + update_values | + update_quadrature_points | + update_JxW_values | + (rebuild_stokes_matrix == true + ? + update_gradients + : + UpdateFlags(0))); FEValues temperature_fe_values (temperature_fe, quadrature_formula, - update_values); + update_values); const unsigned int dofs_per_cell = stokes_fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -1641,29 +1641,29 @@ namespace Step31 std::vector local_dof_indices (dofs_per_cell); - // Next we need a vector that will contain - // the values of the temperature solution - // at the previous time level at the - // quadrature points to assemble the source - // term in the right hand side of the - // momentum equation. Let's call this vector - // old_solution_values. - // - // The set of vectors we create next hold - // the evaluations of the basis functions - // as well as their gradients and - // symmetrized gradients that will be used - // for creating the matrices. Putting these - // into their own arrays rather than asking - // the FEValues object for this information - // each time it is needed is an - // optimization to accelerate the assembly - // process, see step-22 for details. - // - // The last two declarations are used to - // extract the individual blocks - // (velocity, pressure, temperature) from - // the total FE system. + // Next we need a vector that will contain + // the values of the temperature solution + // at the previous time level at the + // quadrature points to assemble the source + // term in the right hand side of the + // momentum equation. Let's call this vector + // old_solution_values. + // + // The set of vectors we create next hold + // the evaluations of the basis functions + // as well as their gradients and + // symmetrized gradients that will be used + // for creating the matrices. Putting these + // into their own arrays rather than asking + // the FEValues object for this information + // each time it is needed is an + // optimization to accelerate the assembly + // process, see step-22 for details. + // + // The last two declarations are used to + // extract the individual blocks + // (velocity, pressure, temperature) from + // the total FE system. std::vector old_temperature_values(n_q_points); std::vector > phi_u (dofs_per_cell); @@ -1674,25 +1674,25 @@ namespace Step31 const FEValuesExtractors::Vector velocities (0); const FEValuesExtractors::Scalar pressure (dim); - // Now start the loop over all cells in - // the problem. We are working on two - // different DoFHandlers for this - // assembly routine, so we must have two - // different cell iterators for the two - // objects in use. This might seem a bit - // peculiar, since both the Stokes system - // and the temperature system use the - // same grid, but that's the only way to - // keep degrees of freedom in sync. The - // first statements within the loop are - // again all very familiar, doing the - // update of the finite element data as - // specified by the update flags, zeroing - // out the local arrays and getting the - // values of the old solution at the - // quadrature points. Then we are ready to - // loop over the quadrature points on the - // cell. + // Now start the loop over all cells in + // the problem. We are working on two + // different DoFHandlers for this + // assembly routine, so we must have two + // different cell iterators for the two + // objects in use. This might seem a bit + // peculiar, since both the Stokes system + // and the temperature system use the + // same grid, but that's the only way to + // keep degrees of freedom in sync. The + // first statements within the loop are + // again all very familiar, doing the + // update of the finite element data as + // specified by the update flags, zeroing + // out the local arrays and getting the + // values of the old solution at the + // quadrature points. Then we are ready to + // loop over the quadrature points on the + // cell. typename DoFHandler::active_cell_iterator cell = stokes_dof_handler.begin_active(), endc = stokes_dof_handler.end(); @@ -1701,94 +1701,94 @@ namespace Step31 for (; cell!=endc; ++cell, ++temperature_cell) { - stokes_fe_values.reinit (cell); - temperature_fe_values.reinit (temperature_cell); - - local_matrix = 0; - local_rhs = 0; - - temperature_fe_values.get_function_values (old_temperature_solution, - old_temperature_values); - - for (unsigned int q=0; qrebuild_matrices - // flag. - for (unsigned int k=0; k gravity = -( (dim == 2) ? (Point (0,1)) : - (Point (0,0,1)) ); - for (unsigned int i=0; ilocal_dof_indices. - // Again, we let the ConstraintMatrix - // class do the insertion of the cell - // matrix elements to the global - // matrix, which already condenses the - // hanging node constraints. - cell->get_dof_indices (local_dof_indices); - - if (rebuild_stokes_matrix == true) - stokes_constraints.distribute_local_to_global (local_matrix, - local_rhs, - local_dof_indices, - stokes_matrix, - stokes_rhs); - else - stokes_constraints.distribute_local_to_global (local_rhs, - local_dof_indices, - stokes_rhs); + stokes_fe_values.reinit (cell); + temperature_fe_values.reinit (temperature_cell); + + local_matrix = 0; + local_rhs = 0; + + temperature_fe_values.get_function_values (old_temperature_solution, + old_temperature_values); + + for (unsigned int q=0; qrebuild_matrices + // flag. + for (unsigned int k=0; k gravity = -( (dim == 2) ? (Point (0,1)) : + (Point (0,0,1)) ); + for (unsigned int i=0; ilocal_dof_indices. + // Again, we let the ConstraintMatrix + // class do the insertion of the cell + // matrix elements to the global + // matrix, which already condenses the + // hanging node constraints. + cell->get_dof_indices (local_dof_indices); + + if (rebuild_stokes_matrix == true) + stokes_constraints.distribute_local_to_global (local_matrix, + local_rhs, + local_dof_indices, + stokes_matrix, + stokes_rhs); + else + stokes_constraints.distribute_local_to_global (local_rhs, + local_dof_indices, + stokes_rhs); } rebuild_stokes_matrix = false; @@ -1799,40 +1799,40 @@ namespace Step31 - // @sect4{BoussinesqFlowProblem::assemble_temperature_matrix} - // - // This function assembles the matrix in - // the temperature equation. The - // temperature matrix consists of two - // parts, a mass matrix and the time step - // size times a stiffness matrix given by - // a Laplace term times the amount of - // diffusion. Since the matrix depends on - // the time step size (which varies from - // one step to another), the temperature - // matrix needs to be updated every time - // step. We could simply regenerate the - // matrices in every time step, but this - // is not really efficient since mass and - // Laplace matrix do only change when we - // change the mesh. Hence, we do this - // more efficiently by generating two - // separate matrices in this function, - // one for the mass matrix and one for - // the stiffness (diffusion) matrix. We - // will then sum up the matrix plus the - // stiffness matrix times the time step - // size once we know the actual time step. - // - // So the details for this first step are - // very simple. In case we need to - // rebuild the matrix (i.e., the mesh has - // changed), we zero the data structures, - // get a quadrature formula and a - // FEValues object, and create local - // matrices, local dof indices and - // evaluation structures for the basis - // functions. + // @sect4{BoussinesqFlowProblem::assemble_temperature_matrix} + // + // This function assembles the matrix in + // the temperature equation. The + // temperature matrix consists of two + // parts, a mass matrix and the time step + // size times a stiffness matrix given by + // a Laplace term times the amount of + // diffusion. Since the matrix depends on + // the time step size (which varies from + // one step to another), the temperature + // matrix needs to be updated every time + // step. We could simply regenerate the + // matrices in every time step, but this + // is not really efficient since mass and + // Laplace matrix do only change when we + // change the mesh. Hence, we do this + // more efficiently by generating two + // separate matrices in this function, + // one for the mass matrix and one for + // the stiffness (diffusion) matrix. We + // will then sum up the matrix plus the + // stiffness matrix times the time step + // size once we know the actual time step. + // + // So the details for this first step are + // very simple. In case we need to + // rebuild the matrix (i.e., the mesh has + // changed), we zero the data structures, + // get a quadrature formula and a + // FEValues object, and create local + // matrices, local dof indices and + // evaluation structures for the basis + // functions. template void BoussinesqFlowProblem::assemble_temperature_matrix () { @@ -1844,8 +1844,8 @@ namespace Step31 QGauss quadrature_formula (temperature_degree+2); FEValues temperature_fe_values (temperature_fe, quadrature_formula, - update_values | update_gradients | - update_JxW_values); + update_values | update_gradients | + update_JxW_values); const unsigned int dofs_per_cell = temperature_fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -1858,60 +1858,60 @@ namespace Step31 std::vector phi_T (dofs_per_cell); std::vector > grad_phi_T (dofs_per_cell); - // Now, let's start the loop over all cells - // in the triangulation. We need to zero - // out the local matrices, update the - // finite element evaluations, and then - // loop over the rows and columns of the - // matrices on each quadrature point, where - // we then create the mass matrix and the - // stiffness matrix (Laplace terms times - // the diffusion - // EquationData::kappa. Finally, - // we let the constraints object insert - // these values into the global matrix, and - // directly condense the constraints into - // the matrix. + // Now, let's start the loop over all cells + // in the triangulation. We need to zero + // out the local matrices, update the + // finite element evaluations, and then + // loop over the rows and columns of the + // matrices on each quadrature point, where + // we then create the mass matrix and the + // stiffness matrix (Laplace terms times + // the diffusion + // EquationData::kappa. Finally, + // we let the constraints object insert + // these values into the global matrix, and + // directly condense the constraints into + // the matrix. typename DoFHandler::active_cell_iterator cell = temperature_dof_handler.begin_active(), endc = temperature_dof_handler.end(); for (; cell!=endc; ++cell) { - local_mass_matrix = 0; - local_stiffness_matrix = 0; - - temperature_fe_values.reinit (cell); - - for (unsigned int q=0; qget_dof_indices (local_dof_indices); - - temperature_constraints.distribute_local_to_global (local_mass_matrix, - local_dof_indices, - temperature_mass_matrix); - temperature_constraints.distribute_local_to_global (local_stiffness_matrix, - local_dof_indices, - temperature_stiffness_matrix); + local_mass_matrix = 0; + local_stiffness_matrix = 0; + + temperature_fe_values.reinit (cell); + + for (unsigned int q=0; qget_dof_indices (local_dof_indices); + + temperature_constraints.distribute_local_to_global (local_mass_matrix, + local_dof_indices, + temperature_mass_matrix); + temperature_constraints.distribute_local_to_global (local_stiffness_matrix, + local_dof_indices, + temperature_stiffness_matrix); } rebuild_temperature_matrices = false; @@ -1919,34 +1919,34 @@ namespace Step31 - // @sect4{BoussinesqFlowProblem::assemble_temperature_system} - // - // This function does the second part of - // the assembly work on the temperature - // matrix, the actual addition of - // pressure mass and stiffness matrix - // (where the time step size comes into - // play), as well as the creation of the - // velocity-dependent right hand - // side. The declarations for the right - // hand side assembly in this function - // are pretty much the same as the ones - // used in the other assembly routines, - // except that we restrict ourselves to - // vectors this time. We are going to - // calculate residuals on the temperature - // system, which means that we have to - // evaluate second derivatives, specified - // by the update flag - // update_hessians. - // - // The temperature equation is coupled to the - // Stokes system by means of the fluid - // velocity. These two parts of the solution - // are associated with different DoFHandlers, - // so we again need to create a second - // FEValues object for the evaluation of the - // velocity at the quadrature points. + // @sect4{BoussinesqFlowProblem::assemble_temperature_system} + // + // This function does the second part of + // the assembly work on the temperature + // matrix, the actual addition of + // pressure mass and stiffness matrix + // (where the time step size comes into + // play), as well as the creation of the + // velocity-dependent right hand + // side. The declarations for the right + // hand side assembly in this function + // are pretty much the same as the ones + // used in the other assembly routines, + // except that we restrict ourselves to + // vectors this time. We are going to + // calculate residuals on the temperature + // system, which means that we have to + // evaluate second derivatives, specified + // by the update flag + // update_hessians. + // + // The temperature equation is coupled to the + // Stokes system by means of the fluid + // velocity. These two parts of the solution + // are associated with different DoFHandlers, + // so we again need to create a second + // FEValues object for the evaluation of the + // velocity at the quadrature points. template void BoussinesqFlowProblem:: assemble_temperature_system (const double maximal_velocity) @@ -1955,28 +1955,28 @@ namespace Step31 if (use_bdf2_scheme == true) { - temperature_matrix.copy_from (temperature_mass_matrix); - temperature_matrix *= (2*time_step + old_time_step) / - (time_step + old_time_step); - temperature_matrix.add (time_step, temperature_stiffness_matrix); + temperature_matrix.copy_from (temperature_mass_matrix); + temperature_matrix *= (2*time_step + old_time_step) / + (time_step + old_time_step); + temperature_matrix.add (time_step, temperature_stiffness_matrix); } else { - temperature_matrix.copy_from (temperature_mass_matrix); - temperature_matrix.add (time_step, temperature_stiffness_matrix); + temperature_matrix.copy_from (temperature_mass_matrix); + temperature_matrix.add (time_step, temperature_stiffness_matrix); } temperature_rhs = 0; const QGauss quadrature_formula(temperature_degree+2); FEValues temperature_fe_values (temperature_fe, quadrature_formula, - update_values | - update_gradients | - update_hessians | - update_quadrature_points | - update_JxW_values); + update_values | + update_gradients | + update_hessians | + update_quadrature_points | + update_JxW_values); FEValues stokes_fe_values (stokes_fe, quadrature_formula, - update_values); + update_values); const unsigned int dofs_per_cell = temperature_fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -1985,24 +1985,24 @@ namespace Step31 std::vector local_dof_indices (dofs_per_cell); - // Next comes the declaration of vectors - // to hold the old and older solution - // values (as a notation for time levels - // n-1 and n-2, - // respectively) and gradients at - // quadrature points of the current - // cell. We also declarate an object to - // hold the temperature right hande side - // values (gamma_values), - // and we again use shortcuts for the - // temperature basis - // functions. Eventually, we need to find - // the temperature extrema and the - // diameter of the computational domain - // which will be used for the definition - // of the stabilization parameter (we got - // the maximal velocity as an input to - // this function). + // Next comes the declaration of vectors + // to hold the old and older solution + // values (as a notation for time levels + // n-1 and n-2, + // respectively) and gradients at + // quadrature points of the current + // cell. We also declarate an object to + // hold the temperature right hande side + // values (gamma_values), + // and we again use shortcuts for the + // temperature basis + // functions. Eventually, we need to find + // the temperature extrema and the + // diameter of the computational domain + // which will be used for the definition + // of the stabilization parameter (we got + // the maximal velocity as an input to + // this function). std::vector > old_velocity_values (n_q_points); std::vector > old_old_velocity_values (n_q_points); std::vector old_temperature_values (n_q_points); @@ -2023,29 +2023,29 @@ namespace Step31 const FEValuesExtractors::Vector velocities (0); - // Now, let's start the loop over all cells - // in the triangulation. Again, we need two - // cell iterators that walk in parallel - // through the cells of the two involved - // DoFHandler objects for the Stokes and - // temperature part. Within the loop, we - // first set the local rhs to zero, and - // then get the values and derivatives of - // the old solution functions at the - // quadrature points, since they are going - // to be needed for the definition of the - // stabilization parameters and as - // coefficients in the equation, - // respectively. Note that since the - // temperature has its own DoFHandler and - // FEValues object we get the entire - // solution at the quadrature point (which - // is the scalar temperature field only - // anyway) whereas for the Stokes part we - // restrict ourselves to extracting the - // velocity part (and ignoring the pressure - // part) by using - // stokes_fe_values[velocities].get_function_values. + // Now, let's start the loop over all cells + // in the triangulation. Again, we need two + // cell iterators that walk in parallel + // through the cells of the two involved + // DoFHandler objects for the Stokes and + // temperature part. Within the loop, we + // first set the local rhs to zero, and + // then get the values and derivatives of + // the old solution functions at the + // quadrature points, since they are going + // to be needed for the definition of the + // stabilization parameters and as + // coefficients in the equation, + // respectively. Note that since the + // temperature has its own DoFHandler and + // FEValues object we get the entire + // solution at the quadrature point (which + // is the scalar temperature field only + // anyway) whereas for the Stokes part we + // restrict ourselves to extracting the + // velocity part (and ignoring the pressure + // part) by using + // stokes_fe_values[velocities].get_function_values. typename DoFHandler::active_cell_iterator cell = temperature_dof_handler.begin_active(), endc = temperature_dof_handler.end(); @@ -2054,186 +2054,186 @@ namespace Step31 for (; cell!=endc; ++cell, ++stokes_cell) { - local_rhs = 0; - - temperature_fe_values.reinit (cell); - stokes_fe_values.reinit (stokes_cell); - - temperature_fe_values.get_function_values (old_temperature_solution, - old_temperature_values); - temperature_fe_values.get_function_values (old_old_temperature_solution, - old_old_temperature_values); - - temperature_fe_values.get_function_gradients (old_temperature_solution, - old_temperature_grads); - temperature_fe_values.get_function_gradients (old_old_temperature_solution, - old_old_temperature_grads); - - temperature_fe_values.get_function_laplacians (old_temperature_solution, - old_temperature_laplacians); - temperature_fe_values.get_function_laplacians (old_old_temperature_solution, - old_old_temperature_laplacians); - - temperature_right_hand_side.value_list (temperature_fe_values.get_quadrature_points(), - gamma_values); - - stokes_fe_values[velocities].get_function_values (stokes_solution, - old_velocity_values); - stokes_fe_values[velocities].get_function_values (old_stokes_solution, - old_old_velocity_values); - - // Next, we calculate the artificial - // viscosity for stabilization - // according to the discussion in the - // introduction using the dedicated - // function. With that at hand, we - // can get into the loop over - // quadrature points and local rhs - // vector components. The terms here - // are quite lenghty, but their - // definition follows the - // time-discrete system developed in - // the introduction of this - // program. The BDF-2 scheme needs - // one more term from the old time - // step (and involves more - // complicated factors) than the - // backward Euler scheme that is used - // for the first time step. When all - // this is done, we distribute the - // local vector into the global one - // (including hanging node - // constraints). - const double nu - = compute_viscosity (old_temperature_values, - old_old_temperature_values, - old_temperature_grads, - old_old_temperature_grads, - old_temperature_laplacians, - old_old_temperature_laplacians, - old_velocity_values, - old_old_velocity_values, - gamma_values, - maximal_velocity, - global_T_range.second - global_T_range.first, - cell->diameter()); - - for (unsigned int q=0; q ext_grad_T - = (use_bdf2_scheme ? - (old_temperature_grads[q] * - (1 + time_step/old_time_step) - - - old_old_temperature_grads[q] * - time_step/old_time_step) - : - old_temperature_grads[q]); - - const Tensor<1,dim> extrapolated_u - = (use_bdf2_scheme ? - (old_velocity_values[q] * - (1 + time_step/old_time_step) - - - old_old_velocity_values[q] * - time_step/old_time_step) - : - old_velocity_values[q]); - - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - temperature_constraints.distribute_local_to_global (local_rhs, - local_dof_indices, - temperature_rhs); + local_rhs = 0; + + temperature_fe_values.reinit (cell); + stokes_fe_values.reinit (stokes_cell); + + temperature_fe_values.get_function_values (old_temperature_solution, + old_temperature_values); + temperature_fe_values.get_function_values (old_old_temperature_solution, + old_old_temperature_values); + + temperature_fe_values.get_function_gradients (old_temperature_solution, + old_temperature_grads); + temperature_fe_values.get_function_gradients (old_old_temperature_solution, + old_old_temperature_grads); + + temperature_fe_values.get_function_laplacians (old_temperature_solution, + old_temperature_laplacians); + temperature_fe_values.get_function_laplacians (old_old_temperature_solution, + old_old_temperature_laplacians); + + temperature_right_hand_side.value_list (temperature_fe_values.get_quadrature_points(), + gamma_values); + + stokes_fe_values[velocities].get_function_values (stokes_solution, + old_velocity_values); + stokes_fe_values[velocities].get_function_values (old_stokes_solution, + old_old_velocity_values); + + // Next, we calculate the artificial + // viscosity for stabilization + // according to the discussion in the + // introduction using the dedicated + // function. With that at hand, we + // can get into the loop over + // quadrature points and local rhs + // vector components. The terms here + // are quite lenghty, but their + // definition follows the + // time-discrete system developed in + // the introduction of this + // program. The BDF-2 scheme needs + // one more term from the old time + // step (and involves more + // complicated factors) than the + // backward Euler scheme that is used + // for the first time step. When all + // this is done, we distribute the + // local vector into the global one + // (including hanging node + // constraints). + const double nu + = compute_viscosity (old_temperature_values, + old_old_temperature_values, + old_temperature_grads, + old_old_temperature_grads, + old_temperature_laplacians, + old_old_temperature_laplacians, + old_velocity_values, + old_old_velocity_values, + gamma_values, + maximal_velocity, + global_T_range.second - global_T_range.first, + cell->diameter()); + + for (unsigned int q=0; q ext_grad_T + = (use_bdf2_scheme ? + (old_temperature_grads[q] * + (1 + time_step/old_time_step) + - + old_old_temperature_grads[q] * + time_step/old_time_step) + : + old_temperature_grads[q]); + + const Tensor<1,dim> extrapolated_u + = (use_bdf2_scheme ? + (old_velocity_values[q] * + (1 + time_step/old_time_step) + - + old_old_velocity_values[q] * + time_step/old_time_step) + : + old_velocity_values[q]); + + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + temperature_constraints.distribute_local_to_global (local_rhs, + local_dof_indices, + temperature_rhs); } } - // @sect4{BoussinesqFlowProblem::solve} - // - // This function solves the linear systems - // of equations. Following the - // introduction, we start with the Stokes - // system, where we need to generate our - // block Schur preconditioner. Since all - // the relevant actions are implemented in - // the class - // BlockSchurPreconditioner, - // all we have to do is to initialize the - // class appropriately. What we need to - // pass down is an - // InverseMatrix object for - // the pressure mass matrix, which we set - // up using the respective class together - // with the IC preconditioner we already - // generated, and the AMG preconditioner - // for the velocity-velocity matrix. Note - // that both Mp_preconditioner - // and Amg_preconditioner are - // only pointers, so we use * - // to pass down the actual preconditioner - // objects. - // - // Once the preconditioner is ready, we - // create a GMRES solver for the block - // system. Since we are working with - // Trilinos data structures, we have to set - // the respective template argument in the - // solver. GMRES needs to internally store - // temporary vectors for each iteration - // (see the discussion in the results - // section of step-22) – the more - // vectors it can use, the better it will - // generally perform. To keep memory - // demands in check, we set the number of - // vectors to 100. This means that up to - // 100 solver iterations, every temporary - // vector can be stored. If the solver - // needs to iterate more often to get the - // specified tolerance, it will work on a - // reduced set of vectors by restarting at - // every 100 iterations. - // - // With this all set up, we solve the system - // and distribute the constraints in the - // Stokes system, i.e. hanging nodes and - // no-flux boundary condition, in order to - // have the appropriate solution values even - // at constrained dofs. Finally, we write the - // number of iterations to the screen. + // @sect4{BoussinesqFlowProblem::solve} + // + // This function solves the linear systems + // of equations. Following the + // introduction, we start with the Stokes + // system, where we need to generate our + // block Schur preconditioner. Since all + // the relevant actions are implemented in + // the class + // BlockSchurPreconditioner, + // all we have to do is to initialize the + // class appropriately. What we need to + // pass down is an + // InverseMatrix object for + // the pressure mass matrix, which we set + // up using the respective class together + // with the IC preconditioner we already + // generated, and the AMG preconditioner + // for the velocity-velocity matrix. Note + // that both Mp_preconditioner + // and Amg_preconditioner are + // only pointers, so we use * + // to pass down the actual preconditioner + // objects. + // + // Once the preconditioner is ready, we + // create a GMRES solver for the block + // system. Since we are working with + // Trilinos data structures, we have to set + // the respective template argument in the + // solver. GMRES needs to internally store + // temporary vectors for each iteration + // (see the discussion in the results + // section of step-22) – the more + // vectors it can use, the better it will + // generally perform. To keep memory + // demands in check, we set the number of + // vectors to 100. This means that up to + // 100 solver iterations, every temporary + // vector can be stored. If the solver + // needs to iterate more often to get the + // specified tolerance, it will work on a + // reduced set of vectors by restarting at + // every 100 iterations. + // + // With this all set up, we solve the system + // and distribute the constraints in the + // Stokes system, i.e. hanging nodes and + // no-flux boundary condition, in order to + // have the appropriate solution values even + // at constrained dofs. Finally, we write the + // number of iterations to the screen. template void BoussinesqFlowProblem::solve () { @@ -2241,201 +2241,201 @@ namespace Step31 { const LinearSolvers::InverseMatrix - mp_inverse (stokes_preconditioner_matrix.block(1,1), *Mp_preconditioner); + TrilinosWrappers::PreconditionIC> + mp_inverse (stokes_preconditioner_matrix.block(1,1), *Mp_preconditioner); const LinearSolvers::BlockSchurPreconditioner - preconditioner (stokes_matrix, mp_inverse, *Amg_preconditioner); + TrilinosWrappers::PreconditionIC> + preconditioner (stokes_matrix, mp_inverse, *Amg_preconditioner); SolverControl solver_control (stokes_matrix.m(), - 1e-6*stokes_rhs.l2_norm()); + 1e-6*stokes_rhs.l2_norm()); SolverGMRES - gmres (solver_control, - SolverGMRES::AdditionalData(100)); + gmres (solver_control, + SolverGMRES::AdditionalData(100)); for (unsigned int i=0; i= 0.01) time_step = 1./(1.6*dim*std::sqrt(1.*dim)) / - temperature_degree * - GridTools::minimal_cell_diameter(triangulation) / - maximal_velocity; + temperature_degree * + GridTools::minimal_cell_diameter(triangulation) / + maximal_velocity; else time_step = 1./(1.6*dim*std::sqrt(1.*dim)) / - temperature_degree * - GridTools::minimal_cell_diameter(triangulation) / - .01; + temperature_degree * + GridTools::minimal_cell_diameter(triangulation) / + .01; std::cout << " " << "Time step: " << time_step - << std::endl; + << std::endl; temperature_solution = old_temperature_solution; - // Next we set up the temperature system - // and the right hand side using the - // function - // assemble_temperature_system(). - // Knowing the matrix and right hand side - // of the temperature equation, we set up - // a preconditioner and a solver. The - // temperature matrix is a mass matrix - // (with eigenvalues around one) plus a - // Laplace matrix (with eigenvalues - // between zero and $ch^{-2}$) times a - // small number proportional to the time - // step $k_n$. Hence, the resulting - // symmetric and positive definite matrix - // has eigenvalues in the range - // $[1,1+k_nh^{-2}]$ (up to - // constants). This matrix is only - // moderately ill conditioned even for - // small mesh sizes and we get a - // reasonably good preconditioner by - // simple means, for example with an - // incomplete Cholesky decomposition - // preconditioner (IC) as we also use for - // preconditioning the pressure mass - // matrix solver. As a solver, we choose - // the conjugate gradient method CG. As - // before, we tell the solver to use - // Trilinos vectors via the template - // argument - // TrilinosWrappers::Vector. - // Finally, we solve, distribute the - // hanging node constraints and write out - // the number of iterations. + // Next we set up the temperature system + // and the right hand side using the + // function + // assemble_temperature_system(). + // Knowing the matrix and right hand side + // of the temperature equation, we set up + // a preconditioner and a solver. The + // temperature matrix is a mass matrix + // (with eigenvalues around one) plus a + // Laplace matrix (with eigenvalues + // between zero and $ch^{-2}$) times a + // small number proportional to the time + // step $k_n$. Hence, the resulting + // symmetric and positive definite matrix + // has eigenvalues in the range + // $[1,1+k_nh^{-2}]$ (up to + // constants). This matrix is only + // moderately ill conditioned even for + // small mesh sizes and we get a + // reasonably good preconditioner by + // simple means, for example with an + // incomplete Cholesky decomposition + // preconditioner (IC) as we also use for + // preconditioning the pressure mass + // matrix solver. As a solver, we choose + // the conjugate gradient method CG. As + // before, we tell the solver to use + // Trilinos vectors via the template + // argument + // TrilinosWrappers::Vector. + // Finally, we solve, distribute the + // hanging node constraints and write out + // the number of iterations. assemble_temperature_system (maximal_velocity); { SolverControl solver_control (temperature_matrix.m(), - 1e-8*temperature_rhs.l2_norm()); + 1e-8*temperature_rhs.l2_norm()); SolverCG cg (solver_control); TrilinosWrappers::PreconditionIC preconditioner; preconditioner.initialize (temperature_matrix); cg.solve (temperature_matrix, temperature_solution, - temperature_rhs, preconditioner); + temperature_rhs, preconditioner); temperature_constraints.distribute (temperature_solution); std::cout << " " - << solver_control.last_step() - << " CG iterations for temperature." - << std::endl; - - // At the end of this function, we step - // through the vector and read out the - // maximum and minimum temperature value, - // which we also want to output. This - // will come in handy when determining - // the correct constant in the choice of - // time step as discuss in the results - // section of this program. + << solver_control.last_step() + << " CG iterations for temperature." + << std::endl; + + // At the end of this function, we step + // through the vector and read out the + // maximum and minimum temperature value, + // which we also want to output. This + // will come in handy when determining + // the correct constant in the choice of + // time step as discuss in the results + // section of this program. double min_temperature = temperature_solution(0), - max_temperature = temperature_solution(0); + max_temperature = temperature_solution(0); for (unsigned int i=0; i (min_temperature, - temperature_solution(i)); - max_temperature = std::max (max_temperature, - temperature_solution(i)); - } + { + min_temperature = std::min (min_temperature, + temperature_solution(i)); + max_temperature = std::max (max_temperature, + temperature_solution(i)); + } std::cout << " Temperature range: " - << min_temperature << ' ' << max_temperature - << std::endl; + << min_temperature << ' ' << max_temperature + << std::endl; } } - // @sect4{BoussinesqFlowProblem::output_results} - // - // This function writes the solution to a VTK - // output file for visualization, which is - // done every tenth time step. This is - // usually quite a simple task, since the - // deal.II library provides functions that do - // almost all the job for us. In this case, - // the situation is a bit more complicated, - // since we want to visualize both the Stokes - // solution and the temperature as one data - // set, but we have done all the calculations - // based on two different DoFHandler objects, - // a situation the DataOut class usually used - // for output is not prepared to deal - // with. The way we're going to achieve this - // recombination is to create a joint - // DoFHandler that collects both components, - // the Stokes solution and the temperature - // solution. This can be nicely done by - // combining the finite elements from the two - // systems to form one FESystem, and let this - // collective system define a new DoFHandler - // object. To be sure that everything was - // done correctly, we perform a sanity check - // that ensures that we got all the dofs from - // both Stokes and temperature even in the - // combined system. - // - // Next, we create a vector that will collect - // the actual solution values. Since this - // vector is only going to be used for - // output, we create it as a deal.II vector - // that nicely cooperate with the data output - // classes. Remember that we used Trilinos - // vectors for assembly and solving. + // @sect4{BoussinesqFlowProblem::output_results} + // + // This function writes the solution to a VTK + // output file for visualization, which is + // done every tenth time step. This is + // usually quite a simple task, since the + // deal.II library provides functions that do + // almost all the job for us. In this case, + // the situation is a bit more complicated, + // since we want to visualize both the Stokes + // solution and the temperature as one data + // set, but we have done all the calculations + // based on two different DoFHandler objects, + // a situation the DataOut class usually used + // for output is not prepared to deal + // with. The way we're going to achieve this + // recombination is to create a joint + // DoFHandler that collects both components, + // the Stokes solution and the temperature + // solution. This can be nicely done by + // combining the finite elements from the two + // systems to form one FESystem, and let this + // collective system define a new DoFHandler + // object. To be sure that everything was + // done correctly, we perform a sanity check + // that ensures that we got all the dofs from + // both Stokes and temperature even in the + // combined system. + // + // Next, we create a vector that will collect + // the actual solution values. Since this + // vector is only going to be used for + // output, we create it as a deal.II vector + // that nicely cooperate with the data output + // classes. Remember that we used Trilinos + // vectors for assembly and solving. template void BoussinesqFlowProblem::output_results () const { @@ -2443,123 +2443,123 @@ namespace Step31 return; const FESystem joint_fe (stokes_fe, 1, - temperature_fe, 1); + temperature_fe, 1); DoFHandler joint_dof_handler (triangulation); joint_dof_handler.distribute_dofs (joint_fe); Assert (joint_dof_handler.n_dofs() == - stokes_dof_handler.n_dofs() + temperature_dof_handler.n_dofs(), - ExcInternalError()); + stokes_dof_handler.n_dofs() + temperature_dof_handler.n_dofs(), + ExcInternalError()); Vector joint_solution (joint_dof_handler.n_dofs()); - // Unfortunately, there is no - // straight-forward relation that tells - // us how to sort Stokes and temperature - // vector into the joint vector. The way - // we can get around this trouble is to - // rely on the information collected in - // the FESystem. For each dof in a cell, - // the joint finite element knows to - // which equation component (velocity - // component, pressure, or temperature) - // it belongs – that's the - // information we need! So we step - // through all cells (with iterators into - // all three DoFHandlers moving in - // synch), and for each joint cell dof, - // we read out that component using the - // FiniteElement::system_to_base_index - // function (see there for a description - // of what the various parts of its - // return value contain). We also need to - // keep track whether we're on a Stokes - // dof or a temperature dof, which is - // contained in - // joint_fe.system_to_base_index(i).first.first. - // Eventually, the dof_indices data - // structures on either of the three - // systems tell us how the relation - // between global vector and local dofs - // looks like on the present cell, which - // concludes this tedious work. - // - // There's one thing worth remembering - // when looking at the output: In our - // algorithm, we first solve for the - // Stokes system at time level n-1 - // in each time step and then for the - // temperature at time level n - // using the previously computed - // velocity. These are the two components - // we join for output, so these two parts - // of the output file are actually - // misaligned by one time step. Since we - // consider graphical output as only a - // qualititative means to understand a - // solution, we ignore this - // $\mathcal{O}(h)$ error. + // Unfortunately, there is no + // straight-forward relation that tells + // us how to sort Stokes and temperature + // vector into the joint vector. The way + // we can get around this trouble is to + // rely on the information collected in + // the FESystem. For each dof in a cell, + // the joint finite element knows to + // which equation component (velocity + // component, pressure, or temperature) + // it belongs – that's the + // information we need! So we step + // through all cells (with iterators into + // all three DoFHandlers moving in + // synch), and for each joint cell dof, + // we read out that component using the + // FiniteElement::system_to_base_index + // function (see there for a description + // of what the various parts of its + // return value contain). We also need to + // keep track whether we're on a Stokes + // dof or a temperature dof, which is + // contained in + // joint_fe.system_to_base_index(i).first.first. + // Eventually, the dof_indices data + // structures on either of the three + // systems tell us how the relation + // between global vector and local dofs + // looks like on the present cell, which + // concludes this tedious work. + // + // There's one thing worth remembering + // when looking at the output: In our + // algorithm, we first solve for the + // Stokes system at time level n-1 + // in each time step and then for the + // temperature at time level n + // using the previously computed + // velocity. These are the two components + // we join for output, so these two parts + // of the output file are actually + // misaligned by one time step. Since we + // consider graphical output as only a + // qualititative means to understand a + // solution, we ignore this + // $\mathcal{O}(h)$ error. { std::vector local_joint_dof_indices (joint_fe.dofs_per_cell); std::vector local_stokes_dof_indices (stokes_fe.dofs_per_cell); std::vector local_temperature_dof_indices (temperature_fe.dofs_per_cell); typename DoFHandler::active_cell_iterator - joint_cell = joint_dof_handler.begin_active(), - joint_endc = joint_dof_handler.end(), - stokes_cell = stokes_dof_handler.begin_active(), - temperature_cell = temperature_dof_handler.begin_active(); + joint_cell = joint_dof_handler.begin_active(), + joint_endc = joint_dof_handler.end(), + stokes_cell = stokes_dof_handler.begin_active(), + temperature_cell = temperature_dof_handler.begin_active(); for (; joint_cell!=joint_endc; ++joint_cell, ++stokes_cell, ++temperature_cell) - { - joint_cell->get_dof_indices (local_joint_dof_indices); - stokes_cell->get_dof_indices (local_stokes_dof_indices); - temperature_cell->get_dof_indices (local_temperature_dof_indices); - - for (unsigned int i=0; iget_dof_indices (local_joint_dof_indices); + stokes_cell->get_dof_indices (local_stokes_dof_indices); + temperature_cell->get_dof_indices (local_temperature_dof_indices); + + for (unsigned int i=0; idim - // components are the vector velocity, - // and then we have pressure and - // temperature. This information is read - // out using the - // DataComponentInterpretation helper - // class. Next, we attach the solution - // values together with the names of its - // components to the output object, and - // build patches according to the degree - // of freedom, which are (sub-) elements - // that describe the data for - // visualization programs. Finally, we - // set a file name (that includes the - // time step number) and write the vtk - // file. + // Next, we proceed as we've done in + // step-22. We create solution names + // (that are going to appear in the + // visualization program for the + // individual components), and attach the + // joint dof handler to a DataOut + // object. The first dim + // components are the vector velocity, + // and then we have pressure and + // temperature. This information is read + // out using the + // DataComponentInterpretation helper + // class. Next, we attach the solution + // values together with the names of its + // components to the output object, and + // build patches according to the degree + // of freedom, which are (sub-) elements + // that describe the data for + // visualization programs. Finally, we + // set a file name (that includes the + // time step number) and write the vtk + // file. std::vector joint_solution_names (dim, "velocity"); joint_solution_names.push_back ("p"); joint_solution_names.push_back ("T"); @@ -2573,11 +2573,11 @@ namespace Step31 (dim+2, DataComponentInterpretation::component_is_scalar); for (unsigned int i=0; i::type_dof_data, - data_component_interpretation); + DataOut::type_dof_data, + data_component_interpretation); data_out.build_patches (std::min(stokes_degree, temperature_degree)); std::ostringstream filename; @@ -2589,111 +2589,111 @@ namespace Step31 - // @sect4{BoussinesqFlowProblem::refine_mesh} - // - // This function takes care of the adaptive - // mesh refinement. The three tasks this - // function performs is to first find out - // which cells to refine/coarsen, then to - // actually do the refinement and eventually - // transfer the solution vectors between the - // two different grids. The first task is - // simply achieved by using the - // well-established Kelly error estimator on - // the temperature (it is the temperature - // we're mainly interested in for this - // program, and we need to be accurate in - // regions of high temperature gradients, - // also to not have too much numerical - // diffusion). The second task is to actually - // do the remeshing. That involves only basic - // functions as well, such as the - // refine_and_coarsen_fixed_fraction - // that refines those cells with the largest - // estimated error that together make up 80 - // per cent of the error, and coarsens those - // cells with the smallest error that make up - // for a combined 10 per cent of the - // error. - // - // If implemented like this, we would get a - // program that will not make much progress: - // Remember that we expect temperature fields - // that are nearly discontinuous (the - // diffusivity $\kappa$ is very small after - // all) and consequently we can expect that a - // freely adapted mesh will refine further - // and further into the areas of large - // gradients. This decrease in mesh size will - // then be accompanied by a decrease in time - // step, requiring an exceedingly large - // number of time steps to solve to a given - // final time. It will also lead to meshes - // that are much better at resolving - // discontinuities after several mesh - // refinement cycles than in the beginning. - // - // In particular to prevent the decrease in - // time step size and the correspondingly - // large number of time steps, we limit the - // maximal refinement depth of the mesh. To - // this end, after the refinement indicator - // has been applied to the cells, we simply - // loop over all cells on the finest level - // and unselect them from refinement if they - // would result in too high a mesh level. + // @sect4{BoussinesqFlowProblem::refine_mesh} + // + // This function takes care of the adaptive + // mesh refinement. The three tasks this + // function performs is to first find out + // which cells to refine/coarsen, then to + // actually do the refinement and eventually + // transfer the solution vectors between the + // two different grids. The first task is + // simply achieved by using the + // well-established Kelly error estimator on + // the temperature (it is the temperature + // we're mainly interested in for this + // program, and we need to be accurate in + // regions of high temperature gradients, + // also to not have too much numerical + // diffusion). The second task is to actually + // do the remeshing. That involves only basic + // functions as well, such as the + // refine_and_coarsen_fixed_fraction + // that refines those cells with the largest + // estimated error that together make up 80 + // per cent of the error, and coarsens those + // cells with the smallest error that make up + // for a combined 10 per cent of the + // error. + // + // If implemented like this, we would get a + // program that will not make much progress: + // Remember that we expect temperature fields + // that are nearly discontinuous (the + // diffusivity $\kappa$ is very small after + // all) and consequently we can expect that a + // freely adapted mesh will refine further + // and further into the areas of large + // gradients. This decrease in mesh size will + // then be accompanied by a decrease in time + // step, requiring an exceedingly large + // number of time steps to solve to a given + // final time. It will also lead to meshes + // that are much better at resolving + // discontinuities after several mesh + // refinement cycles than in the beginning. + // + // In particular to prevent the decrease in + // time step size and the correspondingly + // large number of time steps, we limit the + // maximal refinement depth of the mesh. To + // this end, after the refinement indicator + // has been applied to the cells, we simply + // loop over all cells on the finest level + // and unselect them from refinement if they + // would result in too high a mesh level. template void BoussinesqFlowProblem::refine_mesh (const unsigned int max_grid_level) { Vector estimated_error_per_cell (triangulation.n_active_cells()); KellyErrorEstimator::estimate (temperature_dof_handler, - QGauss(temperature_degree+1), - typename FunctionMap::type(), - temperature_solution, - estimated_error_per_cell); + QGauss(temperature_degree+1), + typename FunctionMap::type(), + temperature_solution, + estimated_error_per_cell); GridRefinement::refine_and_coarsen_fixed_fraction (triangulation, - estimated_error_per_cell, - 0.8, 0.1); + estimated_error_per_cell, + 0.8, 0.1); if (triangulation.n_levels() > max_grid_level) for (typename Triangulation::active_cell_iterator - cell = triangulation.begin_active(max_grid_level); - cell != triangulation.end(); ++cell) - cell->clear_refine_flag (); - - // As part of mesh refinement we need to - // transfer the solution vectors from the - // old mesh to the new one. To this end - // we use the SolutionTransfer class and - // we have to prepare the solution - // vectors that should be transfered to - // the new grid (we will lose the old - // grid once we have done the refinement - // so the transfer has to happen - // concurrently with refinement). What we - // definetely need are the current and - // the old temperature (BDF-2 time - // stepping requires two old - // solutions). Since the SolutionTransfer - // objects only support to transfer one - // object per dof handler, we need to - // collect the two temperature solutions - // in one data structure. Moreover, we - // choose to transfer the Stokes - // solution, too, since we need the - // velocity at two previous time steps, - // of which only one is calculated on the - // fly. - // - // Consequently, we initialize two - // SolutionTransfer objects for the - // Stokes and temperature DoFHandler - // objects, by attaching them to the old - // dof handlers. With this at place, we - // can prepare the triangulation and the - // data vectors for refinement (in this - // order). + cell = triangulation.begin_active(max_grid_level); + cell != triangulation.end(); ++cell) + cell->clear_refine_flag (); + + // As part of mesh refinement we need to + // transfer the solution vectors from the + // old mesh to the new one. To this end + // we use the SolutionTransfer class and + // we have to prepare the solution + // vectors that should be transfered to + // the new grid (we will lose the old + // grid once we have done the refinement + // so the transfer has to happen + // concurrently with refinement). What we + // definetely need are the current and + // the old temperature (BDF-2 time + // stepping requires two old + // solutions). Since the SolutionTransfer + // objects only support to transfer one + // object per dof handler, we need to + // collect the two temperature solutions + // in one data structure. Moreover, we + // choose to transfer the Stokes + // solution, too, since we need the + // velocity at two previous time steps, + // of which only one is calculated on the + // fly. + // + // Consequently, we initialize two + // SolutionTransfer objects for the + // Stokes and temperature DoFHandler + // objects, by attaching them to the old + // dof handlers. With this at place, we + // can prepare the triangulation and the + // data vectors for refinement (in this + // order). std::vector x_temperature (2); x_temperature[0] = temperature_solution; x_temperature[1] = old_temperature_solution; @@ -2708,30 +2708,30 @@ namespace Step31 temperature_trans.prepare_for_coarsening_and_refinement(x_temperature); stokes_trans.prepare_for_coarsening_and_refinement(x_stokes); - // Now everything is ready, so do the - // refinement and recreate the dof - // structure on the new grid, and - // initialize the matrix structures and - // the new vectors in the - // setup_dofs - // function. Next, we actually perform - // the interpolation of the solutions - // between the grids. We create another - // copy of temporary vectors for - // temperature (now corresponding to the - // new grid), and let the interpolate - // function do the job. Then, the - // resulting array of vectors is written - // into the respective vector member - // variables. For the Stokes vector, - // everything is just the same – - // except that we do not need another - // temporary vector since we just - // interpolate a single vector. In the - // end, we have to tell the program that - // the matrices and preconditioners need - // to be regenerated, since the mesh has - // changed. + // Now everything is ready, so do the + // refinement and recreate the dof + // structure on the new grid, and + // initialize the matrix structures and + // the new vectors in the + // setup_dofs + // function. Next, we actually perform + // the interpolation of the solutions + // between the grids. We create another + // copy of temporary vectors for + // temperature (now corresponding to the + // new grid), and let the interpolate + // function do the job. Then, the + // resulting array of vectors is written + // into the respective vector member + // variables. For the Stokes vector, + // everything is just the same – + // except that we do not need another + // temporary vector since we just + // interpolate a single vector. In the + // end, we have to tell the program that + // the matrices and preconditioners need + // to be regenerated, since the mesh has + // changed. triangulation.execute_coarsening_and_refinement (); setup_dofs (); @@ -2752,41 +2752,41 @@ namespace Step31 - // @sect4{BoussinesqFlowProblem::run} - // - // This function performs all the - // essential steps in the Boussinesq - // program. It starts by setting up a - // grid (depending on the spatial - // dimension, we choose some - // different level of initial - // refinement and additional adaptive - // refinement steps, and then create - // a cube in dim - // dimensions and set up the dofs for - // the first time. Since we want to - // start the time stepping already - // with an adaptively refined grid, - // we perform some pre-refinement - // steps, consisting of all assembly, - // solution and refinement, but - // without actually advancing in - // time. Rather, we use the vilified - // goto statement to - // jump out of the time loop right - // after mesh refinement to start all - // over again on the new mesh - // beginning at the - // start_time_iteration - // label. - // - // Before we start, we project the - // initial values to the grid and - // obtain the first data for the - // old_temperature_solution - // vector. Then, we initialize time - // step number and time step and - // start the time loop. + // @sect4{BoussinesqFlowProblem::run} + // + // This function performs all the + // essential steps in the Boussinesq + // program. It starts by setting up a + // grid (depending on the spatial + // dimension, we choose some + // different level of initial + // refinement and additional adaptive + // refinement steps, and then create + // a cube in dim + // dimensions and set up the dofs for + // the first time. Since we want to + // start the time stepping already + // with an adaptively refined grid, + // we perform some pre-refinement + // steps, consisting of all assembly, + // solution and refinement, but + // without actually advancing in + // time. Rather, we use the vilified + // goto statement to + // jump out of the time loop right + // after mesh refinement to start all + // over again on the new mesh + // beginning at the + // start_time_iteration + // label. + // + // Before we start, we project the + // initial values to the grid and + // obtain the first data for the + // old_temperature_solution + // vector. Then, we initialize time + // step number and time step and + // start the time loop. template void BoussinesqFlowProblem::run () { @@ -2806,10 +2806,10 @@ namespace Step31 start_time_iteration: VectorTools::project (temperature_dof_handler, - temperature_constraints, - QGauss(temperature_degree+2), - EquationData::TemperatureInitialValues(), - old_temperature_solution); + temperature_constraints, + QGauss(temperature_degree+2), + EquationData::TemperatureInitialValues(), + old_temperature_solution); timestep_number = 0; time_step = old_time_step = 0; @@ -2818,88 +2818,88 @@ namespace Step31 do { - std::cout << "Timestep " << timestep_number - << ": t=" << time - << std::endl; - - // The first steps in the time loop - // are all obvious – we - // assemble the Stokes system, the - // preconditioner, the temperature - // matrix (matrices and - // preconditioner do actually only - // change in case we've remeshed - // before), and then do the - // solve. Before going on - // with the next time step, we have - // to check whether we should first - // finish the pre-refinement steps or - // if we should remesh (every fifth - // time step), refining up to a level - // that is consistent with initial - // refinement and pre-refinement - // steps. Last in the loop is to - // advance the solutions, i.e. to - // copy the solutions to the next - // "older" time level. - assemble_stokes_system (); - build_stokes_preconditioner (); - assemble_temperature_matrix (); - - solve (); - - output_results (); - - std::cout << std::endl; - - if ((timestep_number == 0) && - (pre_refinement_step < n_pre_refinement_steps)) - { - refine_mesh (initial_refinement + n_pre_refinement_steps); - ++pre_refinement_step; - goto start_time_iteration; - } - else - if ((timestep_number > 0) && (timestep_number % 5 == 0)) - refine_mesh (initial_refinement + n_pre_refinement_steps); - - time += time_step; - ++timestep_number; - - old_stokes_solution = stokes_solution; - old_old_temperature_solution = old_temperature_solution; - old_temperature_solution = temperature_solution; + std::cout << "Timestep " << timestep_number + << ": t=" << time + << std::endl; + + // The first steps in the time loop + // are all obvious – we + // assemble the Stokes system, the + // preconditioner, the temperature + // matrix (matrices and + // preconditioner do actually only + // change in case we've remeshed + // before), and then do the + // solve. Before going on + // with the next time step, we have + // to check whether we should first + // finish the pre-refinement steps or + // if we should remesh (every fifth + // time step), refining up to a level + // that is consistent with initial + // refinement and pre-refinement + // steps. Last in the loop is to + // advance the solutions, i.e. to + // copy the solutions to the next + // "older" time level. + assemble_stokes_system (); + build_stokes_preconditioner (); + assemble_temperature_matrix (); + + solve (); + + output_results (); + + std::cout << std::endl; + + if ((timestep_number == 0) && + (pre_refinement_step < n_pre_refinement_steps)) + { + refine_mesh (initial_refinement + n_pre_refinement_steps); + ++pre_refinement_step; + goto start_time_iteration; + } + else + if ((timestep_number > 0) && (timestep_number % 5 == 0)) + refine_mesh (initial_refinement + n_pre_refinement_steps); + + time += time_step; + ++timestep_number; + + old_stokes_solution = stokes_solution; + old_old_temperature_solution = old_temperature_solution; + old_temperature_solution = temperature_solution; } - // Do all the above until we arrive at - // time 100. + // Do all the above until we arrive at + // time 100. while (time <= 100); } } - // @sect3{The main function} - // - // The main function looks almost the same - // as in all other programs. - // - // There is one difference we have to be - // careful about. This program uses Trilinos - // and, typically, Trilinos is configured so - // that it can run in %parallel using - // MPI. This doesn't mean that it has - // to run in %parallel, and in fact this - // program (unlike step-32) makes no attempt - // at all to do anything in %parallel using - // MPI. Nevertheless, Trilinos wants the MPI - // system to be initialized. We do that be - // creating an object of type - // Utilities::MPI::MPI_InitFinalize that - // initializes MPI (if available) using the - // arguments given to main() (i.e., - // argc and argv) - // and de-initializes it again when the - // object goes out of scope. + // @sect3{The main function} + // + // The main function looks almost the same + // as in all other programs. + // + // There is one difference we have to be + // careful about. This program uses Trilinos + // and, typically, Trilinos is configured so + // that it can run in %parallel using + // MPI. This doesn't mean that it has + // to run in %parallel, and in fact this + // program (unlike step-32) makes no attempt + // at all to do anything in %parallel using + // MPI. Nevertheless, Trilinos wants the MPI + // system to be initialized. We do that be + // creating an object of type + // Utilities::MPI::MPI_InitFinalize that + // initializes MPI (if available) using the + // arguments given to main() (i.e., + // argc and argv) + // and de-initializes it again when the + // object goes out of scope. int main (int argc, char *argv[]) { try diff --git a/deal.II/examples/step-32/step-32.cc b/deal.II/examples/step-32/step-32.cc index cb1be80a12..7f3fcc77c0 100644 --- a/deal.II/examples/step-32/step-32.cc +++ b/deal.II/examples/step-32/step-32.cc @@ -2557,9 +2557,9 @@ namespace Step32 std::set no_normal_flux_boundaries; no_normal_flux_boundaries.insert (1); VectorTools::compute_no_normal_flux_constraints (stokes_dof_handler, 0, - no_normal_flux_boundaries, - stokes_constraints, - mapping); + no_normal_flux_boundaries, + stokes_constraints, + mapping); stokes_constraints.close (); } { @@ -2567,25 +2567,25 @@ namespace Step32 temperature_constraints.reinit (temperature_relevant_partitioning); DoFTools::make_hanging_node_constraints (temperature_dof_handler, - temperature_constraints); + temperature_constraints); VectorTools::interpolate_boundary_values (temperature_dof_handler, - 0, - EquationData::TemperatureInitialValues(), - temperature_constraints); + 0, + EquationData::TemperatureInitialValues(), + temperature_constraints); VectorTools::interpolate_boundary_values (temperature_dof_handler, - 1, - EquationData::TemperatureInitialValues(), - temperature_constraints); + 1, + EquationData::TemperatureInitialValues(), + temperature_constraints); temperature_constraints.close (); } - // All this done, we can then initialize - // the various matrix and vector objects - // to their proper sizes. At the end, we - // also record that all matrices and - // preconditioners have to be re-computed - // at the beginning of the next time - // step. + // All this done, we can then initialize + // the various matrix and vector objects + // to their proper sizes. At the end, we + // also record that all matrices and + // preconditioners have to be re-computed + // at the beginning of the next time + // step. setup_stokes_matrix (stokes_partitioning); setup_stokes_preconditioner (stokes_partitioning); setup_temperature_matrices (temperature_partitioning); @@ -2609,62 +2609,62 @@ namespace Step32 - // @sect4{The BoussinesqFlowProblem assembly functions} - // - // Following the discussion in the - // introduction and in the @ref threads - // module, we split the assembly functions - // into different parts: - // - //
  • The local calculations of - // matrices and right hand sides, given a - // certain cell as input (these functions - // are named local_assemble_* - // below). The resulting function is, in - // other words, essentially the body of the - // loop over all cells in step-31. Note, - // however, that these functions store the - // result from the local calculations in - // variables of classes from the CopyData - // namespace. - // - //
  • These objects are then given to the - // second step which writes the local data - // into the global data structures (these - // functions are named - // copy_local_to_global_* - // below). These functions are pretty - // trivial. - // - //
  • These two subfunctions are then used - // in the respective assembly routine - // (called assemble_* below), - // where a WorkStream object is set up and - // runs over all the cells that belong to - // the processor's subdomain.
- - // @sect5{Stokes preconditioner assembly} - // - // Let us start with the functions that - // builds the Stokes preconditioner. The - // first two of these are pretty trivial, - // given the discussion above. Note in - // particular that the main point in using - // the scratch data object is that we want - // to avoid allocating any objects on the - // free space each time we visit a new - // cell. As a consequence, the assembly - // function below only has automatic local - // variables, and everything else is - // accessed through the scratch data - // object, which is allocated only once - // before we start the loop over all cells: + // @sect4{The BoussinesqFlowProblem assembly functions} + // + // Following the discussion in the + // introduction and in the @ref threads + // module, we split the assembly functions + // into different parts: + // + //
  • The local calculations of + // matrices and right hand sides, given a + // certain cell as input (these functions + // are named local_assemble_* + // below). The resulting function is, in + // other words, essentially the body of the + // loop over all cells in step-31. Note, + // however, that these functions store the + // result from the local calculations in + // variables of classes from the CopyData + // namespace. + // + //
  • These objects are then given to the + // second step which writes the local data + // into the global data structures (these + // functions are named + // copy_local_to_global_* + // below). These functions are pretty + // trivial. + // + //
  • These two subfunctions are then used + // in the respective assembly routine + // (called assemble_* below), + // where a WorkStream object is set up and + // runs over all the cells that belong to + // the processor's subdomain.
+ + // @sect5{Stokes preconditioner assembly} + // + // Let us start with the functions that + // builds the Stokes preconditioner. The + // first two of these are pretty trivial, + // given the discussion above. Note in + // particular that the main point in using + // the scratch data object is that we want + // to avoid allocating any objects on the + // free space each time we visit a new + // cell. As a consequence, the assembly + // function below only has automatic local + // variables, and everything else is + // accessed through the scratch data + // object, which is allocated only once + // before we start the loop over all cells: template void BoussinesqFlowProblem:: local_assemble_stokes_preconditioner (const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::StokesPreconditioner &scratch, - Assembly::CopyData::StokesPreconditioner &data) + Assembly::Scratch::StokesPreconditioner &scratch, + Assembly::CopyData::StokesPreconditioner &data) { const unsigned int dofs_per_cell = stokes_fe.dofs_per_cell; const unsigned int n_q_points = scratch.stokes_fe_values.n_quadrature_points; @@ -2679,23 +2679,23 @@ namespace Step32 for (unsigned int q=0; q &data) { stokes_constraints.distribute_local_to_global (data.local_matrix, - data.local_dof_indices, - stokes_preconditioner_matrix); + data.local_dof_indices, + stokes_preconditioner_matrix); } - // Now for the function that actually puts - // things together, using the WorkStream - // functions. WorkStream::run needs a - // start and end iterator to enumerate the - // cells it is supposed to work - // on. Typically, one would use - // DoFHandler::begin_active() and - // DoFHandler::end() for that but here we - // actually only want the subset of cells - // that in fact are owned by the current - // processor. This is where the - // FilteredIterator class comes into play: - // you give it a range of cells and it - // provides an iterator that only iterates - // over that subset of cells that satisfy a - // certain predicate (a predicate is a - // function of one argument that either - // returns true or false). The predicate we - // use here is - // IteratorFilters::LocallyOwnedCell, i.e., - // it returns true exactly if the cell is - // owned by the current processor. The - // resulting iterator range is then exactly - // what we need. - // - // With this obstacle out of the way, we - // call the WorkStream::run function with - // this set of cells, scratch and copy - // objects, and with pointers to two - // functions: the local assembly and - // copy-local-to-global function. These - // functions need to have very specific - // signatures: three arguments in the first - // and one argument in the latter case (see - // the documentation of the WorkStream::run - // function for the meaning of these - // arguments). Note how we use the - // construct std_cxx1x::bind - // to create a function object that - // satisfies this requirement. It uses - // placeholders _1, std_cxx1x::_2, - // _3 for the local assembly - // function that specify cell, scratch - // data, and copy data, as well as the - // placeholder _1 for the copy - // function that expects the data to be - // written into the global matrix. On the - // other hand, the implicit zeroth argument - // of member functions (namely the - // this pointer of the object - // on which that member function is to - // operate on) is bound to the - // this pointer of the current - // function. The WorkStream::run function, - // as a consequence, does not need to know - // anything about the object these - // functions work on. - // - // When the WorkStream is executed, it will - // create several local assembly routines - // of the first kind for several cells and - // let some available processors work on - // them. The function that needs to be - // synchronized, i.e., the write operation - // into the global matrix, however, is - // executed by only one thread at a time in - // the prescribed order. Of course, this - // only holds for the parallelization on a - // single MPI process. Different MPI - // processes will have their own WorkStream - // objects and do that work completely - // independently (and in different memory - // spaces). In a distributed calculation, - // some data will accumulate at degrees of - // freedom that are not owned by the - // respective processor. It would be - // inefficient to send data around every - // time we encounter such a dof. What - // happens instead is that the Trilinos - // sparse matrix will keep that data and - // send it to the owner at the end of - // assembly, by calling the - // compress() command. + // Now for the function that actually puts + // things together, using the WorkStream + // functions. WorkStream::run needs a + // start and end iterator to enumerate the + // cells it is supposed to work + // on. Typically, one would use + // DoFHandler::begin_active() and + // DoFHandler::end() for that but here we + // actually only want the subset of cells + // that in fact are owned by the current + // processor. This is where the + // FilteredIterator class comes into play: + // you give it a range of cells and it + // provides an iterator that only iterates + // over that subset of cells that satisfy a + // certain predicate (a predicate is a + // function of one argument that either + // returns true or false). The predicate we + // use here is + // IteratorFilters::LocallyOwnedCell, i.e., + // it returns true exactly if the cell is + // owned by the current processor. The + // resulting iterator range is then exactly + // what we need. + // + // With this obstacle out of the way, we + // call the WorkStream::run function with + // this set of cells, scratch and copy + // objects, and with pointers to two + // functions: the local assembly and + // copy-local-to-global function. These + // functions need to have very specific + // signatures: three arguments in the first + // and one argument in the latter case (see + // the documentation of the WorkStream::run + // function for the meaning of these + // arguments). Note how we use the + // construct std_cxx1x::bind + // to create a function object that + // satisfies this requirement. It uses + // placeholders _1, std_cxx1x::_2, + // _3 for the local assembly + // function that specify cell, scratch + // data, and copy data, as well as the + // placeholder _1 for the copy + // function that expects the data to be + // written into the global matrix. On the + // other hand, the implicit zeroth argument + // of member functions (namely the + // this pointer of the object + // on which that member function is to + // operate on) is bound to the + // this pointer of the current + // function. The WorkStream::run function, + // as a consequence, does not need to know + // anything about the object these + // functions work on. + // + // When the WorkStream is executed, it will + // create several local assembly routines + // of the first kind for several cells and + // let some available processors work on + // them. The function that needs to be + // synchronized, i.e., the write operation + // into the global matrix, however, is + // executed by only one thread at a time in + // the prescribed order. Of course, this + // only holds for the parallelization on a + // single MPI process. Different MPI + // processes will have their own WorkStream + // objects and do that work completely + // independently (and in different memory + // spaces). In a distributed calculation, + // some data will accumulate at degrees of + // freedom that are not owned by the + // respective processor. It would be + // inefficient to send data around every + // time we encounter such a dof. What + // happens instead is that the Trilinos + // sparse matrix will keep that data and + // send it to the owner at the end of + // assembly, by calling the + // compress() command. template void BoussinesqFlowProblem::assemble_stokes_preconditioner () @@ -2809,42 +2809,42 @@ namespace Step32 WorkStream:: run (CellFilter (IteratorFilters::LocallyOwnedCell(), - stokes_dof_handler.begin_active()), - CellFilter (IteratorFilters::LocallyOwnedCell(), - stokes_dof_handler.end()), - std_cxx1x::bind (&BoussinesqFlowProblem:: - local_assemble_stokes_preconditioner, - this, - std_cxx1x::_1, - std_cxx1x::_2, - std_cxx1x::_3), - std_cxx1x::bind (&BoussinesqFlowProblem:: - copy_local_to_global_stokes_preconditioner, - this, - std_cxx1x::_1), - Assembly::Scratch:: - StokesPreconditioner (stokes_fe, quadrature_formula, - mapping, - update_JxW_values | - update_values | - update_gradients), - Assembly::CopyData:: - StokesPreconditioner (stokes_fe)); + stokes_dof_handler.begin_active()), + CellFilter (IteratorFilters::LocallyOwnedCell(), + stokes_dof_handler.end()), + std_cxx1x::bind (&BoussinesqFlowProblem:: + local_assemble_stokes_preconditioner, + this, + std_cxx1x::_1, + std_cxx1x::_2, + std_cxx1x::_3), + std_cxx1x::bind (&BoussinesqFlowProblem:: + copy_local_to_global_stokes_preconditioner, + this, + std_cxx1x::_1), + Assembly::Scratch:: + StokesPreconditioner (stokes_fe, quadrature_formula, + mapping, + update_JxW_values | + update_values | + update_gradients), + Assembly::CopyData:: + StokesPreconditioner (stokes_fe)); stokes_preconditioner_matrix.compress(); } - // The final function in this block - // initiates assembly of the Stokes - // preconditioner matrix and then in fact - // builds the Stokes preconditioner. It is - // mostly the same as in the serial - // case. The only difference to step-31 is - // that we use a Jacobi preconditioner for - // the pressure mass matrix instead of IC, - // as discussed in the introduction. + // The final function in this block + // initiates assembly of the Stokes + // preconditioner matrix and then in fact + // builds the Stokes preconditioner. It is + // mostly the same as in the serial + // case. The only difference to step-31 is + // that we use a Jacobi preconditioner for + // the pressure mass matrix instead of IC, + // as discussed in the introduction. template void BoussinesqFlowProblem::build_stokes_preconditioner () @@ -2861,7 +2861,7 @@ namespace Step32 std::vector velocity_components (dim+1,true); velocity_components[dim] = false; DoFTools::extract_constant_modes (stokes_dof_handler, velocity_components, - constant_modes); + constant_modes); Mp_preconditioner.reset (new TrilinosWrappers::PreconditionJacobi()); Amg_preconditioner.reset (new TrilinosWrappers::PreconditionAMG()); @@ -2875,7 +2875,7 @@ namespace Step32 Mp_preconditioner->initialize (stokes_preconditioner_matrix.block(1,1)); Amg_preconditioner->initialize (stokes_preconditioner_matrix.block(0,0), - Amg_data); + Amg_data); rebuild_stokes_preconditioner = false; @@ -2884,33 +2884,33 @@ namespace Step32 } - // @sect5{Stokes system assembly} - - // The next three functions implement the - // assembly of the Stokes system, again - // split up into a part performing local - // calculations, one for writing the local - // data into the global matrix and vector, - // and one for actually running the loop - // over all cells with the help of the - // WorkStream class. Note that the assembly - // of the Stokes matrix needs only to be - // done in case we have changed the - // mesh. Otherwise, just the - // (temperature-dependent) right hand side - // needs to be calculated here. Since we - // are working with distributed matrices - // and vectors, we have to call the - // respective compress() - // functions in the end of the assembly in - // order to send non-local data to the - // owner process. + // @sect5{Stokes system assembly} + + // The next three functions implement the + // assembly of the Stokes system, again + // split up into a part performing local + // calculations, one for writing the local + // data into the global matrix and vector, + // and one for actually running the loop + // over all cells with the help of the + // WorkStream class. Note that the assembly + // of the Stokes matrix needs only to be + // done in case we have changed the + // mesh. Otherwise, just the + // (temperature-dependent) right hand side + // needs to be calculated here. Since we + // are working with distributed matrices + // and vectors, we have to call the + // respective compress() + // functions in the end of the assembly in + // order to send non-local data to the + // owner process. template void BoussinesqFlowProblem:: local_assemble_stokes_system (const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::StokesSystem &scratch, - Assembly::CopyData::StokesSystem &data) + Assembly::Scratch::StokesSystem &scratch, + Assembly::CopyData::StokesSystem &data) { const unsigned int dofs_per_cell = scratch.stokes_fe_values.get_fe().dofs_per_cell; const unsigned int n_q_points = scratch.stokes_fe_values.n_quadrature_points; @@ -2922,9 +2922,9 @@ namespace Step32 typename DoFHandler::active_cell_iterator temperature_cell (&triangulation, - cell->level(), - cell->index(), - &temperature_dof_handler); + cell->level(), + cell->index(), + &temperature_dof_handler); scratch.temperature_fe_values.reinit (temperature_cell); if (rebuild_stokes_matrix) @@ -2932,43 +2932,43 @@ namespace Step32 data.local_rhs = 0; scratch.temperature_fe_values.get_function_values (old_temperature_solution, - scratch.old_temperature_values); + scratch.old_temperature_values); for (unsigned int q=0; q - gravity = EquationData::gravity_vector (scratch.stokes_fe_values - .quadrature_point(q)); - - for (unsigned int i=0; i + gravity = EquationData::gravity_vector (scratch.stokes_fe_values + .quadrature_point(q)); + + for (unsigned int i=0; iget_dof_indices (data.local_dof_indices); @@ -2983,14 +2983,14 @@ namespace Step32 { if (rebuild_stokes_matrix == true) stokes_constraints.distribute_local_to_global (data.local_matrix, - data.local_rhs, - data.local_dof_indices, - stokes_matrix, - stokes_rhs); + data.local_rhs, + data.local_dof_indices, + stokes_matrix, + stokes_rhs); else stokes_constraints.distribute_local_to_global (data.local_rhs, - data.local_dof_indices, - stokes_rhs); + data.local_dof_indices, + stokes_rhs); } @@ -3013,33 +3013,33 @@ namespace Step32 WorkStream:: run (CellFilter (IteratorFilters::LocallyOwnedCell(), - stokes_dof_handler.begin_active()), - CellFilter (IteratorFilters::LocallyOwnedCell(), - stokes_dof_handler.end()), - std_cxx1x::bind (&BoussinesqFlowProblem:: - local_assemble_stokes_system, - this, - std_cxx1x::_1, - std_cxx1x::_2, - std_cxx1x::_3), - std_cxx1x::bind (&BoussinesqFlowProblem:: - copy_local_to_global_stokes_system, - this, - std_cxx1x::_1), - Assembly::Scratch:: - StokesSystem (stokes_fe, mapping, quadrature_formula, - (update_values | - update_quadrature_points | - update_JxW_values | - (rebuild_stokes_matrix == true - ? - update_gradients - : - UpdateFlags(0))), - temperature_fe, - update_values), - Assembly::CopyData:: - StokesSystem (stokes_fe)); + stokes_dof_handler.begin_active()), + CellFilter (IteratorFilters::LocallyOwnedCell(), + stokes_dof_handler.end()), + std_cxx1x::bind (&BoussinesqFlowProblem:: + local_assemble_stokes_system, + this, + std_cxx1x::_1, + std_cxx1x::_2, + std_cxx1x::_3), + std_cxx1x::bind (&BoussinesqFlowProblem:: + copy_local_to_global_stokes_system, + this, + std_cxx1x::_1), + Assembly::Scratch:: + StokesSystem (stokes_fe, mapping, quadrature_formula, + (update_values | + update_quadrature_points | + update_JxW_values | + (rebuild_stokes_matrix == true + ? + update_gradients + : + UpdateFlags(0))), + temperature_fe, + update_values), + Assembly::CopyData:: + StokesSystem (stokes_fe)); stokes_matrix.compress(); stokes_rhs.compress(Add); @@ -3051,27 +3051,27 @@ namespace Step32 } - // @sect5{Temperature matrix assembly} - - // The task to be performed by the next - // three functions is to calculate a mass - // matrix and a Laplace matrix on the - // temperature system. These will be - // combined in order to yield the - // semi-implicit time stepping matrix that - // consists of the mass matrix plus a time - // step-dependent weight factor times the - // Laplace matrix. This function is again - // essentially the body of the loop over - // all cells from step-31. - // - // The two following functions perform - // similar services as the ones above. + // @sect5{Temperature matrix assembly} + + // The task to be performed by the next + // three functions is to calculate a mass + // matrix and a Laplace matrix on the + // temperature system. These will be + // combined in order to yield the + // semi-implicit time stepping matrix that + // consists of the mass matrix plus a time + // step-dependent weight factor times the + // Laplace matrix. This function is again + // essentially the body of the loop over + // all cells from step-31. + // + // The two following functions perform + // similar services as the ones above. template void BoussinesqFlowProblem:: local_assemble_temperature_matrix (const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::TemperatureMatrix &scratch, - Assembly::CopyData::TemperatureMatrix &data) + Assembly::Scratch::TemperatureMatrix &scratch, + Assembly::CopyData::TemperatureMatrix &data) { const unsigned int dofs_per_cell = scratch.temperature_fe_values.get_fe().dofs_per_cell; const unsigned int n_q_points = scratch.temperature_fe_values.n_quadrature_points; @@ -3084,24 +3084,24 @@ namespace Step32 for (unsigned int q=0; q &data) { temperature_constraints.distribute_local_to_global (data.local_mass_matrix, - data.local_dof_indices, - temperature_mass_matrix); + data.local_dof_indices, + temperature_mass_matrix); temperature_constraints.distribute_local_to_global (data.local_stiffness_matrix, - data.local_dof_indices, - temperature_stiffness_matrix); + data.local_dof_indices, + temperature_stiffness_matrix); } @@ -3139,23 +3139,23 @@ namespace Step32 WorkStream:: run (CellFilter (IteratorFilters::LocallyOwnedCell(), - temperature_dof_handler.begin_active()), - CellFilter (IteratorFilters::LocallyOwnedCell(), - temperature_dof_handler.end()), - std_cxx1x::bind (&BoussinesqFlowProblem:: - local_assemble_temperature_matrix, - this, - std_cxx1x::_1, - std_cxx1x::_2, - std_cxx1x::_3), - std_cxx1x::bind (&BoussinesqFlowProblem:: - copy_local_to_global_temperature_matrix, - this, - std_cxx1x::_1), - Assembly::Scratch:: - TemperatureMatrix (temperature_fe, mapping, quadrature_formula), - Assembly::CopyData:: - TemperatureMatrix (temperature_fe)); + temperature_dof_handler.begin_active()), + CellFilter (IteratorFilters::LocallyOwnedCell(), + temperature_dof_handler.end()), + std_cxx1x::bind (&BoussinesqFlowProblem:: + local_assemble_temperature_matrix, + this, + std_cxx1x::_1, + std_cxx1x::_2, + std_cxx1x::_3), + std_cxx1x::bind (&BoussinesqFlowProblem:: + copy_local_to_global_temperature_matrix, + this, + std_cxx1x::_1), + Assembly::Scratch:: + TemperatureMatrix (temperature_fe, mapping, quadrature_formula), + Assembly::CopyData:: + TemperatureMatrix (temperature_fe)); temperature_mass_matrix.compress(); temperature_stiffness_matrix.compress(); @@ -3167,39 +3167,39 @@ namespace Step32 } - // @sect5{Temperature right hand side assembly} - - // This is the last assembly function. It - // calculates the right hand side of the - // temperature system, which includes the - // convection and the stabilization - // terms. It includes a lot of evaluations - // of old solutions at the quadrature - // points (which are necessary for - // calculating the artificial viscosity of - // stabilization), but is otherwise similar - // to the other assembly functions. Notice, - // once again, how we resolve the dilemma - // of having inhomogeneous boundary - // conditions, by just making a right hand - // side at this point (compare the comments - // for the project() function - // above): We create some matrix columns - // with exactly the values that would be - // entered for the temperature stiffness - // matrix, in case we have inhomogeneously - // constrained dofs. That will account for - // the correct balance of the right hand - // side vector with the matrix system of - // temperature. + // @sect5{Temperature right hand side assembly} + + // This is the last assembly function. It + // calculates the right hand side of the + // temperature system, which includes the + // convection and the stabilization + // terms. It includes a lot of evaluations + // of old solutions at the quadrature + // points (which are necessary for + // calculating the artificial viscosity of + // stabilization), but is otherwise similar + // to the other assembly functions. Notice, + // once again, how we resolve the dilemma + // of having inhomogeneous boundary + // conditions, by just making a right hand + // side at this point (compare the comments + // for the project() function + // above): We create some matrix columns + // with exactly the values that would be + // entered for the temperature stiffness + // matrix, in case we have inhomogeneously + // constrained dofs. That will account for + // the correct balance of the right hand + // side vector with the matrix system of + // temperature. template void BoussinesqFlowProblem:: local_assemble_temperature_rhs (const std::pair global_T_range, - const double global_max_velocity, - const double global_entropy_variation, - const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::TemperatureRHS &scratch, - Assembly::CopyData::TemperatureRHS &data) + const double global_max_velocity, + const double global_entropy_variation, + const typename DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::TemperatureRHS &scratch, + Assembly::CopyData::TemperatureRHS &data) { const bool use_bdf2_scheme = (timestep_number != 0); @@ -3216,149 +3216,149 @@ namespace Step32 typename DoFHandler::active_cell_iterator stokes_cell (&triangulation, - cell->level(), - cell->index(), - &stokes_dof_handler); + cell->level(), + cell->index(), + &stokes_dof_handler); scratch.stokes_fe_values.reinit (stokes_cell); scratch.temperature_fe_values.get_function_values (old_temperature_solution, - scratch.old_temperature_values); + scratch.old_temperature_values); scratch.temperature_fe_values.get_function_values (old_old_temperature_solution, - scratch.old_old_temperature_values); + scratch.old_old_temperature_values); scratch.temperature_fe_values.get_function_gradients (old_temperature_solution, - scratch.old_temperature_grads); + scratch.old_temperature_grads); scratch.temperature_fe_values.get_function_gradients (old_old_temperature_solution, - scratch.old_old_temperature_grads); + scratch.old_old_temperature_grads); scratch.temperature_fe_values.get_function_laplacians (old_temperature_solution, - scratch.old_temperature_laplacians); + scratch.old_temperature_laplacians); scratch.temperature_fe_values.get_function_laplacians (old_old_temperature_solution, - scratch.old_old_temperature_laplacians); + scratch.old_old_temperature_laplacians); scratch.stokes_fe_values[velocities].get_function_values (stokes_solution, - scratch.old_velocity_values); + scratch.old_velocity_values); scratch.stokes_fe_values[velocities].get_function_values (old_stokes_solution, - scratch.old_old_velocity_values); + scratch.old_old_velocity_values); scratch.stokes_fe_values[velocities].get_function_symmetric_gradients (stokes_solution, - scratch.old_strain_rates); + scratch.old_strain_rates); scratch.stokes_fe_values[velocities].get_function_symmetric_gradients (old_stokes_solution, - scratch.old_old_strain_rates); + scratch.old_old_strain_rates); const double nu = compute_viscosity (scratch.old_temperature_values, - scratch.old_old_temperature_values, - scratch.old_temperature_grads, - scratch.old_old_temperature_grads, - scratch.old_temperature_laplacians, - scratch.old_old_temperature_laplacians, - scratch.old_velocity_values, - scratch.old_old_velocity_values, - scratch.old_strain_rates, - scratch.old_old_strain_rates, - global_max_velocity, - global_T_range.second - global_T_range.first, - 0.5 * (global_T_range.second + global_T_range.first), - global_entropy_variation, - cell->diameter()); + scratch.old_old_temperature_values, + scratch.old_temperature_grads, + scratch.old_old_temperature_grads, + scratch.old_temperature_laplacians, + scratch.old_old_temperature_laplacians, + scratch.old_velocity_values, + scratch.old_old_velocity_values, + scratch.old_strain_rates, + scratch.old_old_strain_rates, + global_max_velocity, + global_T_range.second - global_T_range.first, + 0.5 * (global_T_range.second + global_T_range.first), + global_entropy_variation, + cell->diameter()); for (unsigned int q=0; q ext_grad_T - = (use_bdf2_scheme ? - (scratch.old_temperature_grads[q] * - (1 + time_step/old_time_step) - - - scratch.old_old_temperature_grads[q] * - time_step/old_time_step) - : - scratch.old_temperature_grads[q]); - - const Tensor<1,dim> extrapolated_u - = (use_bdf2_scheme ? - (scratch.old_velocity_values[q] * - (1 + time_step/old_time_step) - - - scratch.old_old_velocity_values[q] * - time_step/old_time_step) - : - scratch.old_velocity_values[q]); - - const SymmetricTensor<2,dim> extrapolated_strain_rate - = (use_bdf2_scheme ? - (scratch.old_strain_rates[q] * - (1 + time_step/old_time_step) - - - scratch.old_old_strain_rates[q] * - time_step/old_time_step) - : - scratch.old_strain_rates[q]); - - const double gamma - = ((EquationData::radiogenic_heating * EquationData::density(ext_T) - + - 2 * EquationData::eta * extrapolated_strain_rate * extrapolated_strain_rate) / - (EquationData::density(ext_T) * EquationData::specific_heat)); - - for (unsigned int i=0; i ext_grad_T + = (use_bdf2_scheme ? + (scratch.old_temperature_grads[q] * + (1 + time_step/old_time_step) + - + scratch.old_old_temperature_grads[q] * + time_step/old_time_step) + : + scratch.old_temperature_grads[q]); + + const Tensor<1,dim> extrapolated_u + = (use_bdf2_scheme ? + (scratch.old_velocity_values[q] * + (1 + time_step/old_time_step) + - + scratch.old_old_velocity_values[q] * + time_step/old_time_step) + : + scratch.old_velocity_values[q]); + + const SymmetricTensor<2,dim> extrapolated_strain_rate + = (use_bdf2_scheme ? + (scratch.old_strain_rates[q] * + (1 + time_step/old_time_step) + - + scratch.old_old_strain_rates[q] * + time_step/old_time_step) + : + scratch.old_strain_rates[q]); + + const double gamma + = ((EquationData::radiogenic_heating * EquationData::density(ext_T) + + + 2 * EquationData::eta * extrapolated_strain_rate * extrapolated_strain_rate) / + (EquationData::density(ext_T) * EquationData::specific_heat)); + + for (unsigned int i=0; i &data) { temperature_constraints.distribute_local_to_global (data.local_rhs, - data.local_dof_indices, - temperature_rhs, - data.matrix_for_bc); + data.local_dof_indices, + temperature_rhs, + data.matrix_for_bc); } - // In the function that runs the WorkStream - // for actually calculating the right hand - // side, we also generate the final - // matrix. As mentioned above, it is a sum - // of the mass matrix and the Laplace - // matrix, times some time step-dependent - // weight. This weight is specified by the - // BDF-2 time integration scheme, see the - // introduction in step-31. What is new in - // this tutorial program (in addition to - // the use of MPI parallelization and the - // WorkStream class), is that we now - // precompute the temperature - // preconditioner as well. The reason is - // that the setup of the Jacobi - // preconditioner takes a noticable time - // compared to the solver because we - // usually only need between 10 and 20 - // iterations for solving the temperature - // system (this might sound strange, as - // Jacobi really only consists of a - // diagonal, but in Trilinos it is derived - // from more general framework for point - // relaxation preconditioners which is a - // bit inefficient). Hence, it is more - // efficient to precompute the - // preconditioner, even though the matrix - // entries may slightly change because the - // time step might change. This is not too - // big a problem because we remesh every - // few time steps (and regenerate the - // preconditioner then). + // In the function that runs the WorkStream + // for actually calculating the right hand + // side, we also generate the final + // matrix. As mentioned above, it is a sum + // of the mass matrix and the Laplace + // matrix, times some time step-dependent + // weight. This weight is specified by the + // BDF-2 time integration scheme, see the + // introduction in step-31. What is new in + // this tutorial program (in addition to + // the use of MPI parallelization and the + // WorkStream class), is that we now + // precompute the temperature + // preconditioner as well. The reason is + // that the setup of the Jacobi + // preconditioner takes a noticable time + // compared to the solver because we + // usually only need between 10 and 20 + // iterations for solving the temperature + // system (this might sound strange, as + // Jacobi really only consists of a + // diagonal, but in Trilinos it is derived + // from more general framework for point + // relaxation preconditioners which is a + // bit inefficient). Hence, it is more + // efficient to precompute the + // preconditioner, even though the matrix + // entries may slightly change because the + // time step might change. This is not too + // big a problem because we remesh every + // few time steps (and regenerate the + // preconditioner then). template void BoussinesqFlowProblem::assemble_temperature_system (const double maximal_velocity) { @@ -3415,44 +3415,44 @@ namespace Step32 if (use_bdf2_scheme == true) { - temperature_matrix.copy_from (temperature_mass_matrix); - temperature_matrix *= (2*time_step + old_time_step) / - (time_step + old_time_step); - temperature_matrix.add (time_step, temperature_stiffness_matrix); + temperature_matrix.copy_from (temperature_mass_matrix); + temperature_matrix *= (2*time_step + old_time_step) / + (time_step + old_time_step); + temperature_matrix.add (time_step, temperature_stiffness_matrix); } else { - temperature_matrix.copy_from (temperature_mass_matrix); - temperature_matrix.add (time_step, temperature_stiffness_matrix); + temperature_matrix.copy_from (temperature_mass_matrix); + temperature_matrix.add (time_step, temperature_stiffness_matrix); } temperature_matrix.compress(); if (rebuild_temperature_preconditioner == true) { - T_preconditioner.reset (new TrilinosWrappers::PreconditionJacobi()); - T_preconditioner->initialize (temperature_matrix); - rebuild_temperature_preconditioner = false; + T_preconditioner.reset (new TrilinosWrappers::PreconditionJacobi()); + T_preconditioner->initialize (temperature_matrix); + rebuild_temperature_preconditioner = false; } - // The next part is computing the right - // hand side vectors. To do so, we first - // compute the average temperature $T_m$ - // that we use for evaluating the - // artificial viscosity stabilization - // through the residual $E(T) = - // (T-T_m)^2$. We do this by defining the - // midpoint between maximum and minimum - // temperature as average temperature in - // the definition of the entropy - // viscosity. An alternative would be to - // use the integral average, but the - // results are not very sensitive to this - // choice. The rest then only requires - // calling WorkStream::run again, binding - // the arguments to the - // local_assemble_temperature_rhs - // function that are the same in every - // call to the correct values: + // The next part is computing the right + // hand side vectors. To do so, we first + // compute the average temperature $T_m$ + // that we use for evaluating the + // artificial viscosity stabilization + // through the residual $E(T) = + // (T-T_m)^2$. We do this by defining the + // midpoint between maximum and minimum + // temperature as average temperature in + // the definition of the entropy + // viscosity. An alternative would be to + // use the integral average, but the + // results are not very sensitive to this + // choice. The rest then only requires + // calling WorkStream::run again, binding + // the arguments to the + // local_assemble_temperature_rhs + // function that are the same in every + // call to the correct values: temperature_rhs = 0; const QGauss quadrature_formula(parameters.temperature_degree+2); @@ -3460,7 +3460,7 @@ namespace Step32 global_T_range = get_extrapolated_temperature_range(); const double average_temperature = 0.5 * (global_T_range.first + - global_T_range.second); + global_T_range.second); const double global_entropy_variation = get_entropy_variation (average_temperature); @@ -3470,27 +3470,27 @@ namespace Step32 WorkStream:: run (CellFilter (IteratorFilters::LocallyOwnedCell(), - temperature_dof_handler.begin_active()), - CellFilter (IteratorFilters::LocallyOwnedCell(), - temperature_dof_handler.end()), - std_cxx1x::bind (&BoussinesqFlowProblem:: - local_assemble_temperature_rhs, - this, - global_T_range, - maximal_velocity, - global_entropy_variation, - std_cxx1x::_1, - std_cxx1x::_2, - std_cxx1x::_3), - std_cxx1x::bind (&BoussinesqFlowProblem:: - copy_local_to_global_temperature_rhs, - this, - std_cxx1x::_1), - Assembly::Scratch:: - TemperatureRHS (temperature_fe, stokes_fe, mapping, - quadrature_formula), - Assembly::CopyData:: - TemperatureRHS (temperature_fe)); + temperature_dof_handler.begin_active()), + CellFilter (IteratorFilters::LocallyOwnedCell(), + temperature_dof_handler.end()), + std_cxx1x::bind (&BoussinesqFlowProblem:: + local_assemble_temperature_rhs, + this, + global_T_range, + maximal_velocity, + global_entropy_variation, + std_cxx1x::_1, + std_cxx1x::_2, + std_cxx1x::_3), + std_cxx1x::bind (&BoussinesqFlowProblem:: + copy_local_to_global_temperature_rhs, + this, + std_cxx1x::_1), + Assembly::Scratch:: + TemperatureRHS (temperature_fe, stokes_fe, mapping, + quadrature_formula), + Assembly::CopyData:: + TemperatureRHS (temperature_fe)); temperature_rhs.compress(Add); } @@ -3498,89 +3498,89 @@ namespace Step32 - // @sect4{BoussinesqFlowProblem::solve} - - // This function solves the linear systems - // in each time step of the Boussinesq - // problem. First, we - // work on the Stokes system and then on - // the temperature system. In essence, it - // does the same things as the respective - // function in step-31. However, there are a few - // changes here. - // - // The first change is related to the way - // we store our solution: we keep the - // vectors with locally owned degrees of - // freedom plus ghost nodes on each MPI - // node. When we enter a solver which is - // supposed to perform matrix-vector - // products with a distributed matrix, this - // is not the appropriate form, - // though. There, we will want to have the - // solution vector to be distributed in the - // same way as the matrix, i.e. without any - // ghosts. So what we do first is to - // generate a distributed vector called - // distributed_stokes_solution - // and put only the locally owned dofs into - // that, which is neatly done by the - // operator= of the Trilinos - // vector. - // - // Next, we scale the pressure solution (or - // rather, the initial guess) for the - // solver so that it matches with the - // length scales in the matrices, as - // discussed in the introduction. We also - // immediately scale the pressure solution - // back to the correct units after the - // solution is completed. We also need to - // set the pressure values at hanging nodes - // to zero. This we also did in step-31 in - // order not to disturb the Schur - // complement by some vector entries that - // actually are irrelevant during the solve - // stage. As a difference to step-31, here - // we do it only for the locally owned - // pressure dofs. After solving for the - // Stokes solution, each processor copies - // the distributed solution back into the - // solution vector that also includes ghost - // elements. - // - // The third and most obvious change is - // that we have two variants for the Stokes - // solver: A fast solver that sometimes - // breaks down, and a robust solver that is - // slower. This is what we already - // discussed in the introduction. Here is - // how we realize it: First, we perform 30 - // iterations with the fast solver based on - // the simple preconditioner based on the - // AMG V-cycle instead of an approximate - // solve (this is indicated by the - // false argument to the - // LinearSolvers::BlockSchurPreconditioner - // object). If we converge, everything is - // fine. If we do not converge, the solver - // control object will throw an exception - // SolverControl::NoConvergence. Usually, - // this would abort the program because we - // don't catch them in our usual - // solve() functions. This is - // certainly not what we want to happen - // here. Rather, we want to switch to the - // strong solver and continue the solution - // process with whatever vector we got so - // far. Hence, we catch the exception with - // the C++ try/catch mechanism. We then - // simply go through the same solver - // sequence again in the catch - // clause, this time passing the @p true - // flag to the preconditioner for the - // strong solver, signaling an approximate - // CG solve. + // @sect4{BoussinesqFlowProblem::solve} + + // This function solves the linear systems + // in each time step of the Boussinesq + // problem. First, we + // work on the Stokes system and then on + // the temperature system. In essence, it + // does the same things as the respective + // function in step-31. However, there are a few + // changes here. + // + // The first change is related to the way + // we store our solution: we keep the + // vectors with locally owned degrees of + // freedom plus ghost nodes on each MPI + // node. When we enter a solver which is + // supposed to perform matrix-vector + // products with a distributed matrix, this + // is not the appropriate form, + // though. There, we will want to have the + // solution vector to be distributed in the + // same way as the matrix, i.e. without any + // ghosts. So what we do first is to + // generate a distributed vector called + // distributed_stokes_solution + // and put only the locally owned dofs into + // that, which is neatly done by the + // operator= of the Trilinos + // vector. + // + // Next, we scale the pressure solution (or + // rather, the initial guess) for the + // solver so that it matches with the + // length scales in the matrices, as + // discussed in the introduction. We also + // immediately scale the pressure solution + // back to the correct units after the + // solution is completed. We also need to + // set the pressure values at hanging nodes + // to zero. This we also did in step-31 in + // order not to disturb the Schur + // complement by some vector entries that + // actually are irrelevant during the solve + // stage. As a difference to step-31, here + // we do it only for the locally owned + // pressure dofs. After solving for the + // Stokes solution, each processor copies + // the distributed solution back into the + // solution vector that also includes ghost + // elements. + // + // The third and most obvious change is + // that we have two variants for the Stokes + // solver: A fast solver that sometimes + // breaks down, and a robust solver that is + // slower. This is what we already + // discussed in the introduction. Here is + // how we realize it: First, we perform 30 + // iterations with the fast solver based on + // the simple preconditioner based on the + // AMG V-cycle instead of an approximate + // solve (this is indicated by the + // false argument to the + // LinearSolvers::BlockSchurPreconditioner + // object). If we converge, everything is + // fine. If we do not converge, the solver + // control object will throw an exception + // SolverControl::NoConvergence. Usually, + // this would abort the program because we + // don't catch them in our usual + // solve() functions. This is + // certainly not what we want to happen + // here. Rather, we want to switch to the + // strong solver and continue the solution + // process with whatever vector we got so + // far. Hence, we catch the exception with + // the C++ try/catch mechanism. We then + // simply go through the same solver + // sequence again in the catch + // clause, this time passing the @p true + // flag to the preconditioner for the + // strong solver, signaling an approximate + // CG solve. template void BoussinesqFlowProblem::solve () { @@ -3590,19 +3590,19 @@ namespace Step32 pcout << " Solving Stokes system... " << std::flush; TrilinosWrappers::MPI::BlockVector - distributed_stokes_solution (stokes_rhs); + distributed_stokes_solution (stokes_rhs); distributed_stokes_solution = stokes_solution; distributed_stokes_solution.block(1) /= EquationData::pressure_scaling; const unsigned int - start = (distributed_stokes_solution.block(0).size() + - distributed_stokes_solution.block(1).local_range().first), - end = (distributed_stokes_solution.block(0).size() + - distributed_stokes_solution.block(1).local_range().second); + start = (distributed_stokes_solution.block(0).size() + + distributed_stokes_solution.block(1).local_range().first), + end = (distributed_stokes_solution.block(0).size() + + distributed_stokes_solution.block(1).local_range().second); for (unsigned int i=start; i mem; @@ -3612,42 +3612,42 @@ namespace Step32 SolverControl solver_control (30, solver_tolerance); try - { - const LinearSolvers::BlockSchurPreconditioner - preconditioner (stokes_matrix, stokes_preconditioner_matrix, - *Mp_preconditioner, *Amg_preconditioner, - false); - - SolverFGMRES - solver(solver_control, mem, - SolverFGMRES:: - AdditionalData(30, true)); - solver.solve(stokes_matrix, distributed_stokes_solution, stokes_rhs, - preconditioner); - - n_iterations = solver_control.last_step(); - } + { + const LinearSolvers::BlockSchurPreconditioner + preconditioner (stokes_matrix, stokes_preconditioner_matrix, + *Mp_preconditioner, *Amg_preconditioner, + false); + + SolverFGMRES + solver(solver_control, mem, + SolverFGMRES:: + AdditionalData(30, true)); + solver.solve(stokes_matrix, distributed_stokes_solution, stokes_rhs, + preconditioner); + + n_iterations = solver_control.last_step(); + } catch (SolverControl::NoConvergence) - { - const LinearSolvers::BlockSchurPreconditioner - preconditioner (stokes_matrix, stokes_preconditioner_matrix, - *Mp_preconditioner, *Amg_preconditioner, - true); - - SolverControl solver_control_refined (stokes_matrix.m(), solver_tolerance); - SolverFGMRES - solver(solver_control_refined, mem, - SolverFGMRES:: - AdditionalData(50, true)); - solver.solve(stokes_matrix, distributed_stokes_solution, stokes_rhs, - preconditioner); - - n_iterations = (solver_control.last_step() + - solver_control_refined.last_step()); - } + { + const LinearSolvers::BlockSchurPreconditioner + preconditioner (stokes_matrix, stokes_preconditioner_matrix, + *Mp_preconditioner, *Amg_preconditioner, + true); + + SolverControl solver_control_refined (stokes_matrix.m(), solver_tolerance); + SolverFGMRES + solver(solver_control_refined, mem, + SolverFGMRES:: + AdditionalData(50, true)); + solver.solve(stokes_matrix, distributed_stokes_solution, stokes_rhs, + preconditioner); + + n_iterations = (solver_control.last_step() + + solver_control_refined.last_step()); + } stokes_constraints.distribute (distributed_stokes_solution); @@ -3656,62 +3656,62 @@ namespace Step32 stokes_solution = distributed_stokes_solution; pcout << n_iterations << " iterations." - << std::endl; + << std::endl; } computing_timer.exit_section(); - // Now let's turn to the temperature - // part: First, we compute the time step - // size. We found that we need smaller - // time steps for 3D than for 2D for the - // shell geometry. This is because the - // cells are more distorted in that case - // (it is the smallest edge length that - // determines the CFL number). Instead of - // computing the time step from maximum - // velocity and minimal mesh size as in - // step-31, we compute local CFL numbers, - // i.e., on each cell we compute the - // maximum velocity times the mesh size, - // and compute the maximum of - // them. Hence, we need to choose the - // factor in front of the time step - // slightly smaller. - // - // After temperature right hand side - // assembly, we solve the linear system - // for temperature (with fully - // distributed vectors without any - // ghosts), apply constraints and copy - // the vector back to one with ghosts. - // - // In the end, we extract the temperature - // range similarly to step-31 to produce - // some output (for example in order to - // help us choose the stabilization - // constants, as discussed in the - // introduction). The only difference is - // that we need to exchange maxima over - // all processors. + // Now let's turn to the temperature + // part: First, we compute the time step + // size. We found that we need smaller + // time steps for 3D than for 2D for the + // shell geometry. This is because the + // cells are more distorted in that case + // (it is the smallest edge length that + // determines the CFL number). Instead of + // computing the time step from maximum + // velocity and minimal mesh size as in + // step-31, we compute local CFL numbers, + // i.e., on each cell we compute the + // maximum velocity times the mesh size, + // and compute the maximum of + // them. Hence, we need to choose the + // factor in front of the time step + // slightly smaller. + // + // After temperature right hand side + // assembly, we solve the linear system + // for temperature (with fully + // distributed vectors without any + // ghosts), apply constraints and copy + // the vector back to one with ghosts. + // + // In the end, we extract the temperature + // range similarly to step-31 to produce + // some output (for example in order to + // help us choose the stabilization + // constants, as discussed in the + // introduction). The only difference is + // that we need to exchange maxima over + // all processors. computing_timer.enter_section (" Assemble temperature rhs"); { old_time_step = time_step; const double scaling = (dim==3 ? 0.25 : 1.0); time_step = (scaling/(2.1*dim*std::sqrt(1.*dim)) / - (parameters.temperature_degree * - get_cfl_number())); + (parameters.temperature_degree * + get_cfl_number())); const double maximal_velocity = get_maximal_velocity(); pcout << " Maximal velocity: " - << maximal_velocity *EquationData::year_in_seconds * 100 - << " cm/year" - << std::endl; + << maximal_velocity *EquationData::year_in_seconds * 100 + << " cm/year" + << std::endl; pcout << " " << "Time step: " - << time_step/EquationData::year_in_seconds - << " years" - << std::endl; + << time_step/EquationData::year_in_seconds + << " years" + << std::endl; temperature_solution = old_temperature_solution; assemble_temperature_system (maximal_velocity); @@ -3721,85 +3721,85 @@ namespace Step32 computing_timer.enter_section (" Solve temperature system"); { SolverControl solver_control (temperature_matrix.m(), - 1e-12*temperature_rhs.l2_norm()); + 1e-12*temperature_rhs.l2_norm()); SolverCG cg (solver_control); TrilinosWrappers::MPI::Vector - distributed_temperature_solution (temperature_rhs); + distributed_temperature_solution (temperature_rhs); distributed_temperature_solution = temperature_solution; cg.solve (temperature_matrix, distributed_temperature_solution, - temperature_rhs, *T_preconditioner); + temperature_rhs, *T_preconditioner); temperature_constraints.distribute (distributed_temperature_solution); temperature_solution = distributed_temperature_solution; pcout << " " - << solver_control.last_step() - << " CG iterations for temperature" << std::endl; + << solver_control.last_step() + << " CG iterations for temperature" << std::endl; computing_timer.exit_section(); double temperature[2] = { std::numeric_limits::max(), - -std::numeric_limits::max() }; + -std::numeric_limits::max() }; double global_temperature[2]; for (unsigned int i=0; i (temperature[0], - distributed_temperature_solution.trilinos_vector()[0][i]); - temperature[1] = std::max (temperature[1], - distributed_temperature_solution.trilinos_vector()[0][i]); - } + { + temperature[0] = std::min (temperature[0], + distributed_temperature_solution.trilinos_vector()[0][i]); + temperature[1] = std::max (temperature[1], + distributed_temperature_solution.trilinos_vector()[0][i]); + } temperature[0] *= -1.0; Utilities::MPI::max (temperature, MPI_COMM_WORLD, global_temperature); global_temperature[0] *= -1.0; pcout << " Temperature range: " - << global_temperature[0] << ' ' << global_temperature[1] - << std::endl; + << global_temperature[0] << ' ' << global_temperature[1] + << std::endl; } } - // @sect4{BoussinesqFlowProblem::output_results} - - // Next comes the function that generates - // the output. The quantities to output - // could be introduced manually like we did - // in step-31. An alternative is to hand - // this task over to a class PostProcessor - // that inherits from the class - // DataPostprocessor, which can be attached - // to DataOut. This allows us to output - // derived quantities from the solution, - // like the friction heating included in - // this example. It overloads the virtual - // function - // DataPostprocessor::compute_derived_quantities_vector, - // which is then internally called from - // DataOut::build_patches. We have to give - // it values of the numerical solution, its - // derivatives, normals to the cell, the - // actual evaluation points and any - // additional quantities. This follows the - // same procedure as discussed in step-29 - // and other programs. + // @sect4{BoussinesqFlowProblem::output_results} + + // Next comes the function that generates + // the output. The quantities to output + // could be introduced manually like we did + // in step-31. An alternative is to hand + // this task over to a class PostProcessor + // that inherits from the class + // DataPostprocessor, which can be attached + // to DataOut. This allows us to output + // derived quantities from the solution, + // like the friction heating included in + // this example. It overloads the virtual + // function + // DataPostprocessor::compute_derived_quantities_vector, + // which is then internally called from + // DataOut::build_patches. We have to give + // it values of the numerical solution, its + // derivatives, normals to the cell, the + // actual evaluation points and any + // additional quantities. This follows the + // same procedure as discussed in step-29 + // and other programs. template class BoussinesqFlowProblem::Postprocessor : public DataPostprocessor { public: Postprocessor (const unsigned int partition, - const double minimal_pressure); + const double minimal_pressure); virtual void compute_derived_quantities_vector (const std::vector > &uh, - const std::vector > > &duh, - const std::vector > > &dduh, - const std::vector > &normals, - const std::vector > &evaluation_points, - std::vector > &computed_quantities) const; + const std::vector > > &duh, + const std::vector > > &dduh, + const std::vector > &normals, + const std::vector > &evaluation_points, + std::vector > &computed_quantities) const; virtual std::vector get_names () const; @@ -3818,24 +3818,24 @@ namespace Step32 template BoussinesqFlowProblem::Postprocessor:: Postprocessor (const unsigned int partition, - const double minimal_pressure) - : - partition (partition), - minimal_pressure (minimal_pressure) + const double minimal_pressure) + : + partition (partition), + minimal_pressure (minimal_pressure) {} - // Here we define the names for the - // variables we want to output. These are - // the actual solution values for velocity, - // pressure, and temperature, as well as - // the friction heating and to each cell - // the number of the processor that owns - // it. This allows us to visualize the - // partitioning of the domain among the - // processors. Except for the velocity, - // which is vector-valued, all other - // quantities are scalar. + // Here we define the names for the + // variables we want to output. These are + // the actual solution values for velocity, + // pressure, and temperature, as well as + // the friction heating and to each cell + // the number of the processor that owns + // it. This allows us to visualize the + // partitioning of the domain among the + // processors. Except for the velocity, + // which is vector-valued, all other + // quantities are scalar. template std::vector BoussinesqFlowProblem::Postprocessor::get_names() const @@ -3857,7 +3857,7 @@ namespace Step32 { std::vector interpretation (dim, - DataComponentInterpretation::component_is_part_of_vector); + DataComponentInterpretation::component_is_part_of_vector); interpretation.push_back (DataComponentInterpretation::component_is_scalar); interpretation.push_back (DataComponentInterpretation::component_is_scalar); @@ -3876,35 +3876,35 @@ namespace Step32 } - // Now we implement the function that - // computes the derived quantities. As we - // also did for the output, we rescale the - // velocity from its SI units to something - // more readable, namely cm/year. Next, the - // pressure is scaled to be between 0 and - // the maximum pressure. This makes it more - // easily comparable -- in essence making - // all pressure variables positive or - // zero. Temperature is taken as is, and - // the friction heating is computed as $2 - // \eta \varepsilon(\mathbf{u}) \cdot - // \varepsilon(\mathbf{u})$. - // - // The quantities we output here are more - // for illustration, rather than for actual - // scientific value. We come back to this - // briefly in the results section of this - // program and explain what one may in fact - // be interested in. + // Now we implement the function that + // computes the derived quantities. As we + // also did for the output, we rescale the + // velocity from its SI units to something + // more readable, namely cm/year. Next, the + // pressure is scaled to be between 0 and + // the maximum pressure. This makes it more + // easily comparable -- in essence making + // all pressure variables positive or + // zero. Temperature is taken as is, and + // the friction heating is computed as $2 + // \eta \varepsilon(\mathbf{u}) \cdot + // \varepsilon(\mathbf{u})$. + // + // The quantities we output here are more + // for illustration, rather than for actual + // scientific value. We come back to this + // briefly in the results section of this + // program and explain what one may in fact + // be interested in. template void BoussinesqFlowProblem::Postprocessor:: compute_derived_quantities_vector (const std::vector > &uh, - const std::vector > > &duh, - const std::vector > > &/*dduh*/, - const std::vector > &/*normals*/, - const std::vector > &/*evaluation_points*/, - std::vector > &computed_quantities) const + const std::vector > > &duh, + const std::vector > > &/*dduh*/, + const std::vector > &/*normals*/, + const std::vector > &/*evaluation_points*/, + std::vector > &computed_quantities) const { const unsigned int n_quadrature_points = uh.size(); Assert (duh.size() == n_quadrature_points, ExcInternalError()); @@ -3913,75 +3913,75 @@ namespace Step32 for (unsigned int q=0; q grad_u; - for (unsigned int d=0; d strain_rate = symmetrize (grad_u); - computed_quantities[q](dim+2) = 2 * EquationData::eta * - strain_rate * strain_rate; + Tensor<2,dim> grad_u; + for (unsigned int d=0; d strain_rate = symmetrize (grad_u); + computed_quantities[q](dim+2) = 2 * EquationData::eta * + strain_rate * strain_rate; - computed_quantities[q](dim+3) = partition; + computed_quantities[q](dim+3) = partition; } } - // The output_results() - // function does mostly what the - // corresponding one did in to step-31, in - // particular the merging data from the two - // DoFHandler objects (for the Stokes and - // the temperature parts of the problem) - // into one. There is one minor change: we - // make sure that each processor only works - // on the subdomain it owns locally (and - // not on ghost or artificial cells) when - // building the joint solution vector. The - // same will then have to be done in - // DataOut::build_patches(), but that - // function does so automatically. - // - // What we end up with is a set of patches - // that we can write using the functions in - // DataOutBase in a variety of output - // formats. Here, we then have to pay - // attention that what each processor - // writes is really only its own part of - // the domain, i.e. we will want to write - // each processor's contribution into a - // separate file. This we do by adding an - // additional number to the filename when - // we write the solution. This is not - // really new, we did it similarly in - // step-40. Note that we write in the - // compressed format @p .vtu instead of - // plain vtk files, which saves quite some - // storage. - // - // All the rest of the work is done in the - // PostProcessor class. + // The output_results() + // function does mostly what the + // corresponding one did in to step-31, in + // particular the merging data from the two + // DoFHandler objects (for the Stokes and + // the temperature parts of the problem) + // into one. There is one minor change: we + // make sure that each processor only works + // on the subdomain it owns locally (and + // not on ghost or artificial cells) when + // building the joint solution vector. The + // same will then have to be done in + // DataOut::build_patches(), but that + // function does so automatically. + // + // What we end up with is a set of patches + // that we can write using the functions in + // DataOutBase in a variety of output + // formats. Here, we then have to pay + // attention that what each processor + // writes is really only its own part of + // the domain, i.e. we will want to write + // each processor's contribution into a + // separate file. This we do by adding an + // additional number to the filename when + // we write the solution. This is not + // really new, we did it similarly in + // step-40. Note that we write in the + // compressed format @p .vtu instead of + // plain vtk files, which saves quite some + // storage. + // + // All the rest of the work is done in the + // PostProcessor class. template void BoussinesqFlowProblem::output_results () { computing_timer.enter_section ("Postprocessing"); const FESystem joint_fe (stokes_fe, 1, - temperature_fe, 1); + temperature_fe, 1); DoFHandler joint_dof_handler (triangulation); joint_dof_handler.distribute_dofs (joint_fe); Assert (joint_dof_handler.n_dofs() == - stokes_dof_handler.n_dofs() + temperature_dof_handler.n_dofs(), - ExcInternalError()); + stokes_dof_handler.n_dofs() + temperature_dof_handler.n_dofs(), + ExcInternalError()); TrilinosWrappers::MPI::Vector joint_solution; joint_solution.reinit (joint_dof_handler.locally_owned_dofs(), MPI_COMM_WORLD); @@ -3992,43 +3992,43 @@ namespace Step32 std::vector local_temperature_dof_indices (temperature_fe.dofs_per_cell); typename DoFHandler::active_cell_iterator - joint_cell = joint_dof_handler.begin_active(), - joint_endc = joint_dof_handler.end(), - stokes_cell = stokes_dof_handler.begin_active(), - temperature_cell = temperature_dof_handler.begin_active(); + joint_cell = joint_dof_handler.begin_active(), + joint_endc = joint_dof_handler.end(), + stokes_cell = stokes_dof_handler.begin_active(), + temperature_cell = temperature_dof_handler.begin_active(); for (; joint_cell!=joint_endc; - ++joint_cell, ++stokes_cell, ++temperature_cell) - if (joint_cell->is_locally_owned()) - { - joint_cell->get_dof_indices (local_joint_dof_indices); - stokes_cell->get_dof_indices (local_stokes_dof_indices); - temperature_cell->get_dof_indices (local_temperature_dof_indices); - - for (unsigned int i=0; iis_locally_owned()) + { + joint_cell->get_dof_indices (local_joint_dof_indices); + stokes_cell->get_dof_indices (local_stokes_dof_indices); + temperature_cell->get_dof_indices (local_temperature_dof_indices); + + for (unsigned int i=0; i data_out; data_out.attach_dof_handler (joint_dof_handler); @@ -4048,50 +4048,50 @@ namespace Step32 static int out_index=0; const std::string filename = ("solution-" + - Utilities::int_to_string (out_index, 5) + - "." + - Utilities::int_to_string - (triangulation.locally_owned_subdomain(), 4) + - ".vtu"); + Utilities::int_to_string (out_index, 5) + + "." + + Utilities::int_to_string + (triangulation.locally_owned_subdomain(), 4) + + ".vtu"); std::ofstream output (filename.c_str()); data_out.write_vtu (output); - // At this point, all processors have - // written their own files to disk. We - // could visualize them individually in - // Visit or Paraview, but in reality we - // of course want to visualize the whole - // set of files at once. To this end, we - // create a master file in each of the - // formats understood by Visit - // (.visit) and Paraview - // (.pvtu) on the zeroth - // processor that describes how the - // individual files are defining the - // global data set. + // At this point, all processors have + // written their own files to disk. We + // could visualize them individually in + // Visit or Paraview, but in reality we + // of course want to visualize the whole + // set of files at once. To this end, we + // create a master file in each of the + // formats understood by Visit + // (.visit) and Paraview + // (.pvtu) on the zeroth + // processor that describes how the + // individual files are defining the + // global data set. if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) { - std::vector filenames; - for (unsigned int i=0; i filenames; + for (unsigned int i=0; isetup_dofs function that we - // call in the middle has its own timer - // section, we split timing this function - // into two sections. It will also allow us - // to easily identify which of the two is - // more expensive. - // - // One thing of note, however, is that we - // only want to compute error indicators on - // the locally owned subdomain. In order to - // achieve this, we pass one additional - // argument to the - // KellyErrorEstimator::estimate - // function. Note that the vector for error - // estimates is resized to the number of - // active cells present on the current - // process, which is less than the total - // number of active cells on all processors - // (but more than the number of locally - // owned active cells); each processor only - // has a few coarse cells around the - // locally owned ones, as also explained in - // step-40. - // - // The local error estimates are then - // handed to a %parallel version of - // GridRefinement (in namespace - // parallel::distributed::GridRefinement, - // see also step-40) which looks at the - // errors and finds the cells that need - // refinement by comparing the error values - // across processors. As in step-31, we - // want to limit the maximum grid level. So - // in case some cells have been marked that - // are already at the finest level, we - // simply clear the refine flags. + // @sect4{BoussinesqFlowProblem::refine_mesh} + + // This function isn't really new + // either. Since the + // setup_dofs function that we + // call in the middle has its own timer + // section, we split timing this function + // into two sections. It will also allow us + // to easily identify which of the two is + // more expensive. + // + // One thing of note, however, is that we + // only want to compute error indicators on + // the locally owned subdomain. In order to + // achieve this, we pass one additional + // argument to the + // KellyErrorEstimator::estimate + // function. Note that the vector for error + // estimates is resized to the number of + // active cells present on the current + // process, which is less than the total + // number of active cells on all processors + // (but more than the number of locally + // owned active cells); each processor only + // has a few coarse cells around the + // locally owned ones, as also explained in + // step-40. + // + // The local error estimates are then + // handed to a %parallel version of + // GridRefinement (in namespace + // parallel::distributed::GridRefinement, + // see also step-40) which looks at the + // errors and finds the cells that need + // refinement by comparing the error values + // across processors. As in step-31, we + // want to limit the maximum grid level. So + // in case some cells have been marked that + // are already at the finest level, we + // simply clear the refine flags. template void BoussinesqFlowProblem::refine_mesh (const unsigned int max_grid_level) { @@ -4147,40 +4147,40 @@ namespace Step32 Vector estimated_error_per_cell (triangulation.n_active_cells()); KellyErrorEstimator::estimate (temperature_dof_handler, - QGauss(parameters.temperature_degree+1), - typename FunctionMap::type(), - temperature_solution, - estimated_error_per_cell, - std::vector(), - 0, - 0, - triangulation.locally_owned_subdomain()); + QGauss(parameters.temperature_degree+1), + typename FunctionMap::type(), + temperature_solution, + estimated_error_per_cell, + std::vector(), + 0, + 0, + triangulation.locally_owned_subdomain()); parallel::distributed::GridRefinement:: refine_and_coarsen_fixed_fraction (triangulation, - estimated_error_per_cell, - 0.3, 0.1); + estimated_error_per_cell, + 0.3, 0.1); if (triangulation.n_levels() > max_grid_level) for (typename Triangulation::active_cell_iterator - cell = triangulation.begin_active(max_grid_level); - cell != triangulation.end(); ++cell) - cell->clear_refine_flag (); - - // With all flags marked as necessary, we - // set up the - // parallel::distributed::SolutionTransfer - // object to transfer the solutions for - // the current time level and the next - // older one. The syntax is similar to - // the non-%parallel solution transfer - // (with the exception that here a - // pointer to the vector entries is - // enough). The remainder of the function - // is concerned with setting up the data - // structures again after mesh refinement - // and restoring the solution vectors on - // the new mesh. + cell = triangulation.begin_active(max_grid_level); + cell != triangulation.end(); ++cell) + cell->clear_refine_flag (); + + // With all flags marked as necessary, we + // set up the + // parallel::distributed::SolutionTransfer + // object to transfer the solutions for + // the current time level and the next + // older one. The syntax is similar to + // the non-%parallel solution transfer + // (with the exception that here a + // pointer to the vector entries is + // enough). The remainder of the function + // is concerned with setting up the data + // structures again after mesh refinement + // and restoring the solution vectors on + // the new mesh. std::vector x_temperature (2); x_temperature[0] = &temperature_solution; x_temperature[1] = &old_temperature_solution; @@ -4235,28 +4235,28 @@ namespace Step32 - // @sect4{BoussinesqFlowProblem::run} + // @sect4{BoussinesqFlowProblem::run} - // This is the final and controlling - // function in this class. It, in fact, - // runs the entire rest of the program and - // is, once more, very similar to - // step-31. We use a different mesh now (a - // GridGenerator::hyper_shell instead of a - // simple cube geometry), and use the - // project_temperature_field() - // function instead of the library function - // VectorTools::project, the - // rest is as before. + // This is the final and controlling + // function in this class. It, in fact, + // runs the entire rest of the program and + // is, once more, very similar to + // step-31. We use a different mesh now (a + // GridGenerator::hyper_shell instead of a + // simple cube geometry), and use the + // project_temperature_field() + // function instead of the library function + // VectorTools::project, the + // rest is as before. template void BoussinesqFlowProblem::run () { GridGenerator::hyper_shell (triangulation, - Point(), - EquationData::R0, - EquationData::R1, - (dim==3) ? 96 : 12, - true); + Point(), + EquationData::R0, + EquationData::R1, + (dim==3) ? 96 : 12, + true); static HyperShellBoundary boundary; triangulation.set_boundary (0, boundary); triangulation.set_boundary (1, boundary); @@ -4280,117 +4280,117 @@ namespace Step32 do { - pcout << "Timestep " << timestep_number - << ": t=" << time/EquationData::year_in_seconds - << " years" - << std::endl; - - assemble_stokes_system (); - build_stokes_preconditioner (); - assemble_temperature_matrix (); - - solve (); - - pcout << std::endl; - - if ((timestep_number == 0) && - (pre_refinement_step < parameters.initial_adaptive_refinement)) - { - refine_mesh (parameters.initial_global_refinement + - parameters.initial_adaptive_refinement); - ++pre_refinement_step; - goto start_time_iteration; - } - else if ((timestep_number > 0) - && - (timestep_number % parameters.adaptive_refinement_interval == 0)) - refine_mesh (parameters.initial_global_refinement + - parameters.initial_adaptive_refinement); - - if ((parameters.generate_graphical_output == true) - && - (timestep_number % parameters.graphical_output_interval == 0)) - output_results (); - - // In order to speed up linear - // solvers, we extrapolate the - // solutions from the old time levels - // to the new one. This gives a very - // good initial guess, cutting the - // number of iterations needed in - // solvers by more than one half. We - // do not need to extrapolate in the - // last iteration, so if we reached - // the final time, we stop here. - // - // As the last thing during a - // time step (before actually - // bumping up the number of - // the time step), we check - // whether the current time - // step number is divisible - // by 100, and if so we let - // the computing timer print - // a summary of CPU times - // spent so far. - if (time > parameters.end_time * EquationData::year_in_seconds) - break; - - TrilinosWrappers::MPI::BlockVector old_old_stokes_solution; - old_old_stokes_solution = old_stokes_solution; - old_stokes_solution = stokes_solution; - old_old_temperature_solution = old_temperature_solution; - old_temperature_solution = temperature_solution; - if (old_time_step > 0) - { - stokes_solution.sadd (1.+time_step/old_time_step, -time_step/old_time_step, - old_old_stokes_solution); - temperature_solution.sadd (1.+time_step/old_time_step, - -time_step/old_time_step, - old_old_temperature_solution); - } - - if ((timestep_number > 0) && (timestep_number % 100 == 0)) - computing_timer.print_summary (); - - time += time_step; - ++timestep_number; + pcout << "Timestep " << timestep_number + << ": t=" << time/EquationData::year_in_seconds + << " years" + << std::endl; + + assemble_stokes_system (); + build_stokes_preconditioner (); + assemble_temperature_matrix (); + + solve (); + + pcout << std::endl; + + if ((timestep_number == 0) && + (pre_refinement_step < parameters.initial_adaptive_refinement)) + { + refine_mesh (parameters.initial_global_refinement + + parameters.initial_adaptive_refinement); + ++pre_refinement_step; + goto start_time_iteration; + } + else if ((timestep_number > 0) + && + (timestep_number % parameters.adaptive_refinement_interval == 0)) + refine_mesh (parameters.initial_global_refinement + + parameters.initial_adaptive_refinement); + + if ((parameters.generate_graphical_output == true) + && + (timestep_number % parameters.graphical_output_interval == 0)) + output_results (); + + // In order to speed up linear + // solvers, we extrapolate the + // solutions from the old time levels + // to the new one. This gives a very + // good initial guess, cutting the + // number of iterations needed in + // solvers by more than one half. We + // do not need to extrapolate in the + // last iteration, so if we reached + // the final time, we stop here. + // + // As the last thing during a + // time step (before actually + // bumping up the number of + // the time step), we check + // whether the current time + // step number is divisible + // by 100, and if so we let + // the computing timer print + // a summary of CPU times + // spent so far. + if (time > parameters.end_time * EquationData::year_in_seconds) + break; + + TrilinosWrappers::MPI::BlockVector old_old_stokes_solution; + old_old_stokes_solution = old_stokes_solution; + old_stokes_solution = stokes_solution; + old_old_temperature_solution = old_temperature_solution; + old_temperature_solution = temperature_solution; + if (old_time_step > 0) + { + stokes_solution.sadd (1.+time_step/old_time_step, -time_step/old_time_step, + old_old_stokes_solution); + temperature_solution.sadd (1.+time_step/old_time_step, + -time_step/old_time_step, + old_old_temperature_solution); + } + + if ((timestep_number > 0) && (timestep_number % 100 == 0)) + computing_timer.print_summary (); + + time += time_step; + ++timestep_number; } while (true); - // If we are generating graphical - // output, do so also for the last - // time step unless we had just - // done so before we left the - // do-while loop + // If we are generating graphical + // output, do so also for the last + // time step unless we had just + // done so before we left the + // do-while loop if ((parameters.generate_graphical_output == true) - && - !((timestep_number-1) % parameters.graphical_output_interval == 0)) + && + !((timestep_number-1) % parameters.graphical_output_interval == 0)) output_results (); } } - // @sect3{The main function} - - // The main function is short as usual and - // very similar to the one in step-31. Since - // we use a parameter file which is specified - // as an argument in the command line, we - // have to read it in here and pass it on to - // the Parameters class for parsing. If no - // filename is given in the command line, we - // simply use the \step-32.prm - // file which is distributed together with - // the program. - // - // Because 3d computations are simply - // very slow unless you throw a lot - // of processors at them, the program - // defaults to 2d. You can get the 3d - // version by changing the constant - // dimension below to 3. + // @sect3{The main function} + + // The main function is short as usual and + // very similar to the one in step-31. Since + // we use a parameter file which is specified + // as an argument in the command line, we + // have to read it in here and pass it on to + // the Parameters class for parsing. If no + // filename is given in the command line, we + // simply use the \step-32.prm + // file which is distributed together with + // the program. + // + // Because 3d computations are simply + // very slow unless you throw a lot + // of processors at them, the program + // defaults to 2d. You can get the 3d + // version by changing the constant + // dimension below to 3. int main (int argc, char *argv[]) { using namespace Step32; diff --git a/deal.II/examples/step-33/step-33.cc b/deal.II/examples/step-33/step-33.cc index 936da35231..d95ffcced5 100644 --- a/deal.II/examples/step-33/step-33.cc +++ b/deal.II/examples/step-33/step-33.cc @@ -9,7 +9,7 @@ /* to the file deal.II/doc/license.html for the text and */ /* further information on this license. */ - // @sect3{Include files} + // @sect3{Include files} // First a standard set of deal.II // includes. Nothing special to comment on @@ -69,663 +69,663 @@ #include - // And this again is C++: + // And this again is C++: #include #include #include #include - // To end this section, introduce everything - // in the dealii library into the namespace - // into which the contents of this program - // will go: + // To end this section, introduce everything + // in the dealii library into the namespace + // into which the contents of this program + // will go: namespace Step33 { using namespace dealii; - // @sect3{Euler equation specifics} - - // Here we define the flux function for this - // particular system of conservation laws, as - // well as pretty much everything else that's - // specific to the Euler equations for gas - // dynamics, for reasons discussed in the - // introduction. We group all this into a - // structure that defines everything that has - // to do with the flux. All members of this - // structure are static, i.e. the structure - // has no actual state specified by instance - // member variables. The better way to do - // this, rather than a structure with all - // static members would be to use a namespace - // -- but namespaces can't be templatized and - // we want some of the member variables of - // the structure to depend on the space - // dimension, which we in our usual way - // introduce using a template parameter. + // @sect3{Euler equation specifics} + + // Here we define the flux function for this + // particular system of conservation laws, as + // well as pretty much everything else that's + // specific to the Euler equations for gas + // dynamics, for reasons discussed in the + // introduction. We group all this into a + // structure that defines everything that has + // to do with the flux. All members of this + // structure are static, i.e. the structure + // has no actual state specified by instance + // member variables. The better way to do + // this, rather than a structure with all + // static members would be to use a namespace + // -- but namespaces can't be templatized and + // we want some of the member variables of + // the structure to depend on the space + // dimension, which we in our usual way + // introduce using a template parameter. template struct EulerEquations { - // @sect4{Component description} - - // First a few variables that - // describe the various components of our - // solution vector in a generic way. This - // includes the number of components in the - // system (Euler's equations have one entry - // for momenta in each spatial direction, - // plus the energy and density components, - // for a total of dim+2 - // components), as well as functions that - // describe the index within the solution - // vector of the first momentum component, - // the density component, and the energy - // density component. Note that all these - // %numbers depend on the space dimension; - // defining them in a generic way (rather - // than by implicit convention) makes our - // code more flexible and makes it easier - // to later extend it, for example by - // adding more components to the equations. + // @sect4{Component description} + + // First a few variables that + // describe the various components of our + // solution vector in a generic way. This + // includes the number of components in the + // system (Euler's equations have one entry + // for momenta in each spatial direction, + // plus the energy and density components, + // for a total of dim+2 + // components), as well as functions that + // describe the index within the solution + // vector of the first momentum component, + // the density component, and the energy + // density component. Note that all these + // %numbers depend on the space dimension; + // defining them in a generic way (rather + // than by implicit convention) makes our + // code more flexible and makes it easier + // to later extend it, for example by + // adding more components to the equations. static const unsigned int n_components = dim + 2; static const unsigned int first_momentum_component = 0; static const unsigned int density_component = dim; static const unsigned int energy_component = dim+1; - // When generating graphical - // output way down in this - // program, we need to specify - // the names of the solution - // variables as well as how the - // various components group into - // vector and scalar fields. We - // could describe this there, but - // in order to keep things that - // have to do with the Euler - // equation localized here and - // the rest of the program as - // generic as possible, we - // provide this sort of - // information in the following - // two functions: + // When generating graphical + // output way down in this + // program, we need to specify + // the names of the solution + // variables as well as how the + // various components group into + // vector and scalar fields. We + // could describe this there, but + // in order to keep things that + // have to do with the Euler + // equation localized here and + // the rest of the program as + // generic as possible, we + // provide this sort of + // information in the following + // two functions: static std::vector component_names () - { - std::vector names (dim, "momentum"); - names.push_back ("density"); - names.push_back ("energy_density"); + { + std::vector names (dim, "momentum"); + names.push_back ("density"); + names.push_back ("energy_density"); - return names; - } + return names; + } static std::vector component_interpretation () - { - std::vector - data_component_interpretation - (dim, DataComponentInterpretation::component_is_part_of_vector); - data_component_interpretation - .push_back (DataComponentInterpretation::component_is_scalar); - data_component_interpretation - .push_back (DataComponentInterpretation::component_is_scalar); - - return data_component_interpretation; - } - - - // @sect4{Transformations between variables} - - // Next, we define the gas - // constant. We will set it to 1.4 - // in its definition immediately - // following the declaration of - // this class (unlike integer - // variables, like the ones above, - // static const floating point - // member variables cannot be - // initialized within the class - // declaration in C++). This value - // of 1.4 is representative of a - // gas that consists of molecules - // composed of two atoms, such as - // air which consists up to small - // traces almost entirely of $N_2$ - // and $O_2$. + { + std::vector + data_component_interpretation + (dim, DataComponentInterpretation::component_is_part_of_vector); + data_component_interpretation + .push_back (DataComponentInterpretation::component_is_scalar); + data_component_interpretation + .push_back (DataComponentInterpretation::component_is_scalar); + + return data_component_interpretation; + } + + + // @sect4{Transformations between variables} + + // Next, we define the gas + // constant. We will set it to 1.4 + // in its definition immediately + // following the declaration of + // this class (unlike integer + // variables, like the ones above, + // static const floating point + // member variables cannot be + // initialized within the class + // declaration in C++). This value + // of 1.4 is representative of a + // gas that consists of molecules + // composed of two atoms, such as + // air which consists up to small + // traces almost entirely of $N_2$ + // and $O_2$. static const double gas_gamma; - // In the following, we will need to - // compute the kinetic energy and the - // pressure from a vector of conserved - // variables. This we can do based on the - // energy density and the kinetic energy - // $\frac 12 \rho |\mathbf v|^2 = - // \frac{|\rho \mathbf v|^2}{2\rho}$ - // (note that the independent variables - // contain the momentum components $\rho - // v_i$, not the velocities $v_i$). - // - // There is one slight problem: We will - // need to call the following functions - // with input arguments of type - // std::vector@ and - // Vector@. The - // problem is that the former has an - // access operator - // operator[] whereas the - // latter, for historical reasons, has - // operator(). We wouldn't - // be able to write the function in a - // generic way if we were to use one or - // the other of these. Fortunately, we - // can use the following trick: instead - // of writing v[i] or - // v(i), we can use - // *(v.begin() + i), i.e. we - // generate an iterator that points to - // the ith element, and then - // dereference it. This works for both - // kinds of vectors -- not the prettiest - // solution, but one that works. + // In the following, we will need to + // compute the kinetic energy and the + // pressure from a vector of conserved + // variables. This we can do based on the + // energy density and the kinetic energy + // $\frac 12 \rho |\mathbf v|^2 = + // \frac{|\rho \mathbf v|^2}{2\rho}$ + // (note that the independent variables + // contain the momentum components $\rho + // v_i$, not the velocities $v_i$). + // + // There is one slight problem: We will + // need to call the following functions + // with input arguments of type + // std::vector@ and + // Vector@. The + // problem is that the former has an + // access operator + // operator[] whereas the + // latter, for historical reasons, has + // operator(). We wouldn't + // be able to write the function in a + // generic way if we were to use one or + // the other of these. Fortunately, we + // can use the following trick: instead + // of writing v[i] or + // v(i), we can use + // *(v.begin() + i), i.e. we + // generate an iterator that points to + // the ith element, and then + // dereference it. This works for both + // kinds of vectors -- not the prettiest + // solution, but one that works. template static number compute_kinetic_energy (const InputVector &W) - { - number kinetic_energy = 0; - for (unsigned int d=0; d static number compute_pressure (const InputVector &W) - { - return ((gas_gamma-1.0) * - (*(W.begin() + energy_component) - - compute_kinetic_energy(W))); - } - - - // @sect4{EulerEquations::compute_flux_matrix} - - // We define the flux function - // $F(W)$ as one large matrix. - // Each row of this matrix - // represents a scalar - // conservation law for the - // component in that row. The - // exact form of this matrix is - // given in the - // introduction. Note that we - // know the size of the matrix: - // it has as many rows as the - // system has components, and - // dim columns; - // rather than using a FullMatrix - // object for such a matrix - // (which has a variable number - // of rows and columns and must - // therefore allocate memory on - // the heap each time such a - // matrix is created), we use a - // rectangular array of numbers - // right away. - // - // We templatize the numerical type of - // the flux function so that we may use - // the automatic differentiation type - // here. Similarly, we will call the - // function with different input vector - // data types, so we templatize on it as - // well: + { + return ((gas_gamma-1.0) * + (*(W.begin() + energy_component) - + compute_kinetic_energy(W))); + } + + + // @sect4{EulerEquations::compute_flux_matrix} + + // We define the flux function + // $F(W)$ as one large matrix. + // Each row of this matrix + // represents a scalar + // conservation law for the + // component in that row. The + // exact form of this matrix is + // given in the + // introduction. Note that we + // know the size of the matrix: + // it has as many rows as the + // system has components, and + // dim columns; + // rather than using a FullMatrix + // object for such a matrix + // (which has a variable number + // of rows and columns and must + // therefore allocate memory on + // the heap each time such a + // matrix is created), we use a + // rectangular array of numbers + // right away. + // + // We templatize the numerical type of + // the flux function so that we may use + // the automatic differentiation type + // here. Similarly, we will call the + // function with different input vector + // data types, so we templatize on it as + // well: template static void compute_flux_matrix (const InputVector &W, - number (&flux)[n_components][dim]) - { - // First compute the pressure that - // appears in the flux matrix, and - // then compute the first - // dim columns of the - // matrix that correspond to the - // momentum terms: - const number pressure = compute_pressure (W); - - for (unsigned int d=0; ddim columns of the + // matrix that correspond to the + // momentum terms: + const number pressure = compute_pressure (W); + + for (unsigned int d=0; d static void numerical_normal_flux (const Point &normal, - const InputVector &Wplus, - const InputVector &Wminus, - const double alpha, - Sacado::Fad::DFad (&normal_flux)[n_components]) - { - Sacado::Fad::DFad iflux[n_components][dim]; - Sacado::Fad::DFad oflux[n_components][dim]; - - compute_flux_matrix (Wplus, iflux); - compute_flux_matrix (Wminus, oflux); - - for (unsigned int di=0; di (&normal_flux)[n_components]) + { + Sacado::Fad::DFad iflux[n_components][dim]; + Sacado::Fad::DFad oflux[n_components][dim]; + + compute_flux_matrix (Wplus, iflux); + compute_flux_matrix (Wminus, oflux); + + for (unsigned int di=0; di static void compute_forcing_vector (const InputVector &W, - number (&forcing)[n_components]) - { - const double gravity = -1.0; - - for (unsigned int c=0; cWminus will of course be - // modified, so it shouldn't be a - // const argument. Yet it is - // in the implementation below, and needs - // to be in order to allow the code to - // compile. The reason is that we call - // this function at a place where - // Wminus is of type - // Table@<2,Sacado::Fad::DFad@ - // @>, this being 2d table with - // indices representing the quadrature - // point and the vector component, - // respectively. We call this function - // with Wminus[q] as last - // argument; subscripting a 2d table - // yields a temporary accessor object - // representing a 1d vector, just what we - // want here. The problem is that a - // temporary accessor object can't be - // bound to a non-const reference - // argument of a function, as we would - // like here, according to the C++ 1998 - // and 2003 standards (something that - // will be fixed with the next standard - // in the form of rvalue references). We - // get away with making the output - // argument here a constant because it is - // the accessor object that's - // constant, not the table it points to: - // that one can still be written to. The - // hack is unpleasant nevertheless - // because it restricts the kind of data - // types that may be used as template - // argument to this function: a regular - // vector isn't going to do because that - // one can not be written to when marked - // const. With no good - // solution around at the moment, we'll - // go with the pragmatic, even if not - // pretty, solution shown here: + // The next part is to actually decide + // what to do at each kind of + // boundary. To this end, remember from + // the introduction that boundary + // conditions are specified by choosing a + // value $\mathbf w^-$ on the outside of + // a boundary given an inhomogeneity + // $\mathbf j$ and possibly the + // solution's value $\mathbf w^+$ on the + // inside. Both are then passed to the + // numerical flux $\mathbf + // H(\mathbf{w}^+, \mathbf{w}^-, + // \mathbf{n})$ to define boundary + // contributions to the bilinear form. + // + // Boundary conditions can in some cases + // be specified for each component of the + // solution vector independently. For + // example, if component $c$ is marked + // for inflow, then $w^-_c = j_c$. If it + // is an outflow, then $w^-_c = + // w^+_c$. These two simple cases are + // handled first in the function below. + // + // There is a little snag that makes this + // function unpleasant from a C++ + // language viewpoint: The output vector + // Wminus will of course be + // modified, so it shouldn't be a + // const argument. Yet it is + // in the implementation below, and needs + // to be in order to allow the code to + // compile. The reason is that we call + // this function at a place where + // Wminus is of type + // Table@<2,Sacado::Fad::DFad@ + // @>, this being 2d table with + // indices representing the quadrature + // point and the vector component, + // respectively. We call this function + // with Wminus[q] as last + // argument; subscripting a 2d table + // yields a temporary accessor object + // representing a 1d vector, just what we + // want here. The problem is that a + // temporary accessor object can't be + // bound to a non-const reference + // argument of a function, as we would + // like here, according to the C++ 1998 + // and 2003 standards (something that + // will be fixed with the next standard + // in the form of rvalue references). We + // get away with making the output + // argument here a constant because it is + // the accessor object that's + // constant, not the table it points to: + // that one can still be written to. The + // hack is unpleasant nevertheless + // because it restricts the kind of data + // types that may be used as template + // argument to this function: a regular + // vector isn't going to do because that + // one can not be written to when marked + // const. With no good + // solution around at the moment, we'll + // go with the pragmatic, even if not + // pretty, solution shown here: template static void compute_Wminus (const BoundaryKind (&boundary_kind)[n_components], - const Point &normal_vector, - const DataVector &Wplus, - const Vector &boundary_values, - const DataVector &Wminus) - { - for (unsigned int c = 0; c < n_components; c++) - switch (boundary_kind[c]) - { - case inflow_boundary: - { - Wminus[c] = boundary_values(c); - break; - } - - case outflow_boundary: - { - Wminus[c] = Wplus[c]; - break; - } - - // Prescribed pressure boundary - // conditions are a bit more - // complicated by the fact that - // even though the pressure is - // prescribed, we really are - // setting the energy component - // here, which will depend on - // velocity and pressure. So - // even though this seems like - // a Dirichlet type boundary - // condition, we get - // sensitivities of energy to - // velocity and density (unless - // these are also prescribed): - case pressure_boundary: - { - const typename DataVector::value_type - density = (boundary_kind[density_component] == - inflow_boundary - ? - boundary_values(density_component) - : - Wplus[density_component]); - - typename DataVector::value_type kinetic_energy = 0; - for (unsigned int d=0; d vdotn = 0; - for (unsigned int d = 0; d < dim; d++) { - vdotn += Wplus[d]*normal_vector[d]; - } - - Wminus[c] = Wplus[c] - 2.0*vdotn*normal_vector[c]; - break; - } - - default: - Assert (false, ExcNotImplemented()); - } - } - - - // @sect4{EulerEquations::compute_refinement_indicators} - - // In this class, we also want to specify - // how to refine the mesh. The class - // ConservationLaw that will - // use all the information we provide - // here in the EulerEquation - // class is pretty agnostic about the - // particular conservation law it solves: - // as doesn't even really care how many - // components a solution vector - // has. Consequently, it can't know what - // a reasonable refinement indicator - // would be. On the other hand, here we - // do, or at least we can come up with a - // reasonable choice: we simply look at - // the gradient of the density, and - // compute - // $\eta_K=\log\left(1+|\nabla\rho(x_K)|\right)$, - // where $x_K$ is the center of cell $K$. - // - // There are certainly a number of - // equally reasonable refinement - // indicators, but this one does, and it - // is easy to compute: + const Point &normal_vector, + const DataVector &Wplus, + const Vector &boundary_values, + const DataVector &Wminus) + { + for (unsigned int c = 0; c < n_components; c++) + switch (boundary_kind[c]) + { + case inflow_boundary: + { + Wminus[c] = boundary_values(c); + break; + } + + case outflow_boundary: + { + Wminus[c] = Wplus[c]; + break; + } + + // Prescribed pressure boundary + // conditions are a bit more + // complicated by the fact that + // even though the pressure is + // prescribed, we really are + // setting the energy component + // here, which will depend on + // velocity and pressure. So + // even though this seems like + // a Dirichlet type boundary + // condition, we get + // sensitivities of energy to + // velocity and density (unless + // these are also prescribed): + case pressure_boundary: + { + const typename DataVector::value_type + density = (boundary_kind[density_component] == + inflow_boundary + ? + boundary_values(density_component) + : + Wplus[density_component]); + + typename DataVector::value_type kinetic_energy = 0; + for (unsigned int d=0; d vdotn = 0; + for (unsigned int d = 0; d < dim; d++) { + vdotn += Wplus[d]*normal_vector[d]; + } + + Wminus[c] = Wplus[c] - 2.0*vdotn*normal_vector[c]; + break; + } + + default: + Assert (false, ExcNotImplemented()); + } + } + + + // @sect4{EulerEquations::compute_refinement_indicators} + + // In this class, we also want to specify + // how to refine the mesh. The class + // ConservationLaw that will + // use all the information we provide + // here in the EulerEquation + // class is pretty agnostic about the + // particular conservation law it solves: + // as doesn't even really care how many + // components a solution vector + // has. Consequently, it can't know what + // a reasonable refinement indicator + // would be. On the other hand, here we + // do, or at least we can come up with a + // reasonable choice: we simply look at + // the gradient of the density, and + // compute + // $\eta_K=\log\left(1+|\nabla\rho(x_K)|\right)$, + // where $x_K$ is the center of cell $K$. + // + // There are certainly a number of + // equally reasonable refinement + // indicators, but this one does, and it + // is easy to compute: static void compute_refinement_indicators (const DoFHandler &dof_handler, - const Mapping &mapping, - const Vector &solution, - Vector &refinement_indicators) - { - const unsigned int dofs_per_cell = dof_handler.get_fe().dofs_per_cell; - std::vector dofs (dofs_per_cell); - - const QMidpoint quadrature_formula; - const UpdateFlags update_flags = update_gradients; - FEValues fe_v (mapping, dof_handler.get_fe(), - quadrature_formula, update_flags); - - std::vector > > - dU (1, std::vector >(n_components)); - - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (unsigned int cell_no=0; cell!=endc; ++cell, ++cell_no) - { - fe_v.reinit(cell); - fe_v.get_function_grads (solution, dU); - - refinement_indicators(cell_no) - = std::log(1+ - std::sqrt(dU[0][density_component] * - dU[0][density_component])); - } - } - - - - // @sect4{EulerEquations::Postprocessor} - - // Finally, we declare a class that - // implements a postprocessing of data - // components. The problem this class - // solves is that the variables in the - // formulation of the Euler equations we - // use are in conservative rather than - // physical form: they are momentum - // densities $\mathbf m=\rho\mathbf v$, - // density $\rho$, and energy density - // $E$. What we would like to also put - // into our output file are velocities - // $\mathbf v=\frac{\mathbf m}{\rho}$ and - // pressure $p=(\gamma-1)(E-\frac{1}{2} - // \rho |\mathbf v|^2)$. - // - // In addition, we would like to add the - // possibility to generate schlieren - // plots. Schlieren plots are a way to - // visualize shocks and other sharp - // interfaces. The word "schlieren" is a - // German word that may be translated as - // "striae" -- it may be simpler to - // explain it by an example, however: - // schlieren is what you see when you, - // for example, pour highly concentrated - // alcohol, or a transparent saline - // solution, into water; the two have the - // same color, but they have different - // refractive indices and so before they - // are fully mixed light goes through the - // mixture along bent rays that lead to - // brightness variations if you look at - // it. That's "schlieren". A similar - // effect happens in compressible flow - // because the refractive index - // depends on the pressure (and therefore - // the density) of the gas. - // - // The origin of the word refers to - // two-dimensional projections of a - // three-dimensional volume (we see a 2d - // picture of the 3d fluid). In - // computational fluid dynamics, we can - // get an idea of this effect by - // considering what causes it: density - // variations. Schlieren plots are - // therefore produced by plotting - // $s=|\nabla \rho|^2$; obviously, $s$ is - // large in shocks and at other highly - // dynamic places. If so desired by the - // user (by specifying this in the input - // file), we would like to generate these - // schlieren plots in addition to the - // other derived quantities listed above. - // - // The implementation of the algorithms - // to compute derived quantities from the - // ones that solve our problem, and to - // output them into data file, rests on - // the DataPostprocessor class. It has - // extensive documentation, and other - // uses of the class can also be found in - // step-29. We therefore refrain from - // extensive comments. + const Mapping &mapping, + const Vector &solution, + Vector &refinement_indicators) + { + const unsigned int dofs_per_cell = dof_handler.get_fe().dofs_per_cell; + std::vector dofs (dofs_per_cell); + + const QMidpoint quadrature_formula; + const UpdateFlags update_flags = update_gradients; + FEValues fe_v (mapping, dof_handler.get_fe(), + quadrature_formula, update_flags); + + std::vector > > + dU (1, std::vector >(n_components)); + + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (unsigned int cell_no=0; cell!=endc; ++cell, ++cell_no) + { + fe_v.reinit(cell); + fe_v.get_function_grads (solution, dU); + + refinement_indicators(cell_no) + = std::log(1+ + std::sqrt(dU[0][density_component] * + dU[0][density_component])); + } + } + + + + // @sect4{EulerEquations::Postprocessor} + + // Finally, we declare a class that + // implements a postprocessing of data + // components. The problem this class + // solves is that the variables in the + // formulation of the Euler equations we + // use are in conservative rather than + // physical form: they are momentum + // densities $\mathbf m=\rho\mathbf v$, + // density $\rho$, and energy density + // $E$. What we would like to also put + // into our output file are velocities + // $\mathbf v=\frac{\mathbf m}{\rho}$ and + // pressure $p=(\gamma-1)(E-\frac{1}{2} + // \rho |\mathbf v|^2)$. + // + // In addition, we would like to add the + // possibility to generate schlieren + // plots. Schlieren plots are a way to + // visualize shocks and other sharp + // interfaces. The word "schlieren" is a + // German word that may be translated as + // "striae" -- it may be simpler to + // explain it by an example, however: + // schlieren is what you see when you, + // for example, pour highly concentrated + // alcohol, or a transparent saline + // solution, into water; the two have the + // same color, but they have different + // refractive indices and so before they + // are fully mixed light goes through the + // mixture along bent rays that lead to + // brightness variations if you look at + // it. That's "schlieren". A similar + // effect happens in compressible flow + // because the refractive index + // depends on the pressure (and therefore + // the density) of the gas. + // + // The origin of the word refers to + // two-dimensional projections of a + // three-dimensional volume (we see a 2d + // picture of the 3d fluid). In + // computational fluid dynamics, we can + // get an idea of this effect by + // considering what causes it: density + // variations. Schlieren plots are + // therefore produced by plotting + // $s=|\nabla \rho|^2$; obviously, $s$ is + // large in shocks and at other highly + // dynamic places. If so desired by the + // user (by specifying this in the input + // file), we would like to generate these + // schlieren plots in addition to the + // other derived quantities listed above. + // + // The implementation of the algorithms + // to compute derived quantities from the + // ones that solve our problem, and to + // output them into data file, rests on + // the DataPostprocessor class. It has + // extensive documentation, and other + // uses of the class can also be found in + // step-29. We therefore refrain from + // extensive comments. class Postprocessor : public DataPostprocessor { - public: - Postprocessor (const bool do_schlieren_plot); + public: + Postprocessor (const bool do_schlieren_plot); - virtual - void - compute_derived_quantities_vector (const std::vector > &uh, - const std::vector > > &duh, - const std::vector > > &dduh, - const std::vector > &normals, - const std::vector > &evaluation_points, - std::vector > &computed_quantities) const; + virtual + void + compute_derived_quantities_vector (const std::vector > &uh, + const std::vector > > &duh, + const std::vector > > &dduh, + const std::vector > &normals, + const std::vector > &evaluation_points, + std::vector > &computed_quantities) const; - virtual std::vector get_names () const; + virtual std::vector get_names () const; - virtual - std::vector - get_data_component_interpretation () const; + virtual + std::vector + get_data_component_interpretation () const; - virtual UpdateFlags get_needed_update_flags () const; + virtual UpdateFlags get_needed_update_flags () const; - private: - const bool do_schlieren_plot; + private: + const bool do_schlieren_plot; }; }; @@ -738,97 +738,97 @@ namespace Step33 template EulerEquations::Postprocessor:: Postprocessor (const bool do_schlieren_plot) - : - do_schlieren_plot (do_schlieren_plot) + : + do_schlieren_plot (do_schlieren_plot) {} - // This is the only function worth commenting - // on. When generating graphical output, the - // DataOut and related classes will call this - // function on each cell, with values, - // gradients, hessians, and normal vectors - // (in case we're working on faces) at each - // quadrature point. Note that the data at - // each quadrature point is itself - // vector-valued, namely the conserved - // variables. What we're going to do here is - // to compute the quantities we're interested - // in at each quadrature point. Note that for - // this we can ignore the hessians ("dduh") - // and normal vectors; to avoid compiler - // warnings about unused variables, we - // comment out their names. + // This is the only function worth commenting + // on. When generating graphical output, the + // DataOut and related classes will call this + // function on each cell, with values, + // gradients, hessians, and normal vectors + // (in case we're working on faces) at each + // quadrature point. Note that the data at + // each quadrature point is itself + // vector-valued, namely the conserved + // variables. What we're going to do here is + // to compute the quantities we're interested + // in at each quadrature point. Note that for + // this we can ignore the hessians ("dduh") + // and normal vectors; to avoid compiler + // warnings about unused variables, we + // comment out their names. template void EulerEquations::Postprocessor:: compute_derived_quantities_vector (const std::vector > &uh, - const std::vector > > &duh, - const std::vector > > &/*dduh*/, - const std::vector > &/*normals*/, - const std::vector > &/*evaluation_points*/, - std::vector > &computed_quantities) const + const std::vector > > &duh, + const std::vector > > &/*dduh*/, + const std::vector > &/*normals*/, + const std::vector > &/*evaluation_points*/, + std::vector > &computed_quantities) const { - // At the beginning of the function, let us - // make sure that all variables have the - // correct sizes, so that we can access - // individual vector elements without - // having to wonder whether we might read - // or write invalid elements; we also check - // that the duh vector only - // contains data if we really need it (the - // system knows about this because we say - // so in the - // get_needed_update_flags() - // function below). For the inner vectors, - // we check that at least the first element - // of the outer vector has the correct - // inner size: + // At the beginning of the function, let us + // make sure that all variables have the + // correct sizes, so that we can access + // individual vector elements without + // having to wonder whether we might read + // or write invalid elements; we also check + // that the duh vector only + // contains data if we really need it (the + // system knows about this because we say + // so in the + // get_needed_update_flags() + // function below). For the inner vectors, + // we check that at least the first element + // of the outer vector has the correct + // inner size: const unsigned int n_quadrature_points = uh.size(); if (do_schlieren_plot == true) Assert (duh.size() == n_quadrature_points, - ExcInternalError()) + ExcInternalError()) else - Assert (duh.size() == 0, - ExcInternalError()); + Assert (duh.size() == 0, + ExcInternalError()); Assert (computed_quantities.size() == n_quadrature_points, - ExcInternalError()); + ExcInternalError()); Assert (uh[0].size() == n_components, - ExcInternalError()); + ExcInternalError()); if (do_schlieren_plot == true) Assert (computed_quantities[0].size() == dim+2, ExcInternalError()) else - Assert (computed_quantities[0].size() == dim+1, ExcInternalError()); - - // Then loop over all quadrature points and - // do our work there. The code should be - // pretty self-explanatory. The order of - // output variables is first - // dim velocities, then the - // pressure, and if so desired the - // schlieren plot. Note that we try to be - // generic about the order of variables in - // the input vector, using the - // first_momentum_component - // and density_component - // information: + Assert (computed_quantities[0].size() == dim+1, ExcInternalError()); + + // Then loop over all quadrature points and + // do our work there. The code should be + // pretty self-explanatory. The order of + // output variables is first + // dim velocities, then the + // pressure, and if so desired the + // schlieren plot. Note that we try to be + // generic about the order of variables in + // the input vector, using the + // first_momentum_component + // and density_component + // information: for (unsigned int q=0; q (uh[q]); + computed_quantities[q](dim) = compute_pressure (uh[q]); - if (do_schlieren_plot == true) - computed_quantities[q](dim+1) = duh[q][density_component] * - duh[q][density_component]; + if (do_schlieren_plot == true) + computed_quantities[q](dim+1) = duh[q][density_component] * + duh[q][density_component]; } } @@ -857,14 +857,14 @@ namespace Step33 { std::vector interpretation (dim, - DataComponentInterpretation::component_is_part_of_vector); + DataComponentInterpretation::component_is_part_of_vector); interpretation.push_back (DataComponentInterpretation:: - component_is_scalar); + component_is_scalar); if (do_schlieren_plot == true) interpretation.push_back (DataComponentInterpretation:: - component_is_scalar); + component_is_scalar); return interpretation; } @@ -883,143 +883,143 @@ namespace Step33 } - // @sect3{Run time parameter handling} - - // Our next job is to define a few - // classes that will contain run-time - // parameters (for example solver - // tolerances, number of iterations, - // stabilization parameter, and the - // like). One could do this in the - // main class, but we separate it - // from that one to make the program - // more modular and easier to read: - // Everything that has to do with - // run-time parameters will be in the - // following namespace, whereas the - // program logic is in the main - // class. - // - // We will split the run-time - // parameters into a few separate - // structures, which we will all put - // into a namespace - // Parameters. Of these - // classes, there are a few that - // group the parameters for - // individual groups, such as for - // solvers, mesh refinement, or - // output. Each of these classes have - // functions - // declare_parameters() - // and - // parse_parameters() - // that declare parameter subsections - // and entries in a ParameterHandler - // object, and retrieve actual - // parameter values from such an - // object, respectively. These - // classes declare all their - // parameters in subsections of the - // ParameterHandler. - // - // The final class of the following - // namespace combines all the - // previous classes by deriving from - // them and taking care of a few more - // entries at the top level of the - // input file, as well as a few odd - // other entries in subsections that - // are too short to warrent a - // structure by themselves. - // - // It is worth pointing out one thing here: - // None of the classes below have a - // constructor that would initialize the - // various member variables. This isn't a - // problem, however, since we will read all - // variables declared in these classes from - // the input file (or indirectly: a - // ParameterHandler object will read it from - // there, and we will get the values from - // this object), and they will be initialized - // this way. In case a certain variable is - // not specified at all in the input file, - // this isn't a problem either: The - // ParameterHandler class will in this case - // simply take the default value that was - // specified when declaring an entry in the - // declare_parameters() - // functions of the classes below. + // @sect3{Run time parameter handling} + + // Our next job is to define a few + // classes that will contain run-time + // parameters (for example solver + // tolerances, number of iterations, + // stabilization parameter, and the + // like). One could do this in the + // main class, but we separate it + // from that one to make the program + // more modular and easier to read: + // Everything that has to do with + // run-time parameters will be in the + // following namespace, whereas the + // program logic is in the main + // class. + // + // We will split the run-time + // parameters into a few separate + // structures, which we will all put + // into a namespace + // Parameters. Of these + // classes, there are a few that + // group the parameters for + // individual groups, such as for + // solvers, mesh refinement, or + // output. Each of these classes have + // functions + // declare_parameters() + // and + // parse_parameters() + // that declare parameter subsections + // and entries in a ParameterHandler + // object, and retrieve actual + // parameter values from such an + // object, respectively. These + // classes declare all their + // parameters in subsections of the + // ParameterHandler. + // + // The final class of the following + // namespace combines all the + // previous classes by deriving from + // them and taking care of a few more + // entries at the top level of the + // input file, as well as a few odd + // other entries in subsections that + // are too short to warrent a + // structure by themselves. + // + // It is worth pointing out one thing here: + // None of the classes below have a + // constructor that would initialize the + // various member variables. This isn't a + // problem, however, since we will read all + // variables declared in these classes from + // the input file (or indirectly: a + // ParameterHandler object will read it from + // there, and we will get the values from + // this object), and they will be initialized + // this way. In case a certain variable is + // not specified at all in the input file, + // this isn't a problem either: The + // ParameterHandler class will in this case + // simply take the default value that was + // specified when declaring an entry in the + // declare_parameters() + // functions of the classes below. namespace Parameters { - // @sect4{Parameters::Solver} - // - // The first of these classes deals - // with parameters for the linear - // inner solver. It offers - // parameters that indicate which - // solver to use (GMRES as a solver - // for general non-symmetric - // indefinite systems, or a sparse - // direct solver), the amount of - // output to be produced, as well - // as various parameters that tweak - // the thresholded incomplete LU - // decomposition (ILUT) that we use - // as a preconditioner for GMRES. - // - // In particular, the ILUT takes - // the following parameters: - // - ilut_fill: the number of extra - // entries to add when forming the ILU - // decomposition - // - ilut_atol, ilut_rtol: When - // forming the preconditioner, for - // certain problems bad conditioning - // (or just bad luck) can cause the - // preconditioner to be very poorly - // conditioned. Hence it can help to - // add diagonal perturbations to the - // original matrix and form the - // preconditioner for this slightly - // better matrix. ATOL is an absolute - // perturbation that is added to the - // diagonal before forming the prec, - // and RTOL is a scaling factor $rtol - // \geq 1$. - // - ilut_drop: The ILUT will - // drop any values that - // have magnitude less than this value. - // This is a way to manage the amount - // of memory used by this - // preconditioner. - // - // The meaning of each parameter is - // also briefly described in the - // third argument of the - // ParameterHandler::declare_entry - // call in - // declare_parameters(). + // @sect4{Parameters::Solver} + // + // The first of these classes deals + // with parameters for the linear + // inner solver. It offers + // parameters that indicate which + // solver to use (GMRES as a solver + // for general non-symmetric + // indefinite systems, or a sparse + // direct solver), the amount of + // output to be produced, as well + // as various parameters that tweak + // the thresholded incomplete LU + // decomposition (ILUT) that we use + // as a preconditioner for GMRES. + // + // In particular, the ILUT takes + // the following parameters: + // - ilut_fill: the number of extra + // entries to add when forming the ILU + // decomposition + // - ilut_atol, ilut_rtol: When + // forming the preconditioner, for + // certain problems bad conditioning + // (or just bad luck) can cause the + // preconditioner to be very poorly + // conditioned. Hence it can help to + // add diagonal perturbations to the + // original matrix and form the + // preconditioner for this slightly + // better matrix. ATOL is an absolute + // perturbation that is added to the + // diagonal before forming the prec, + // and RTOL is a scaling factor $rtol + // \geq 1$. + // - ilut_drop: The ILUT will + // drop any values that + // have magnitude less than this value. + // This is a way to manage the amount + // of memory used by this + // preconditioner. + // + // The meaning of each parameter is + // also briefly described in the + // third argument of the + // ParameterHandler::declare_entry + // call in + // declare_parameters(). struct Solver { - enum SolverType { gmres, direct }; - SolverType solver; + enum SolverType { gmres, direct }; + SolverType solver; - enum OutputType { quiet, verbose }; - OutputType output; + enum OutputType { quiet, verbose }; + OutputType output; - double linear_residual; - int max_iterations; + double linear_residual; + int max_iterations; - double ilut_fill; - double ilut_atol; - double ilut_rtol; - double ilut_drop; + double ilut_fill; + double ilut_atol; + double ilut_rtol; + double ilut_drop; - static void declare_parameters (ParameterHandler &prm); - void parse_parameters (ParameterHandler &prm); + static void declare_parameters (ParameterHandler &prm); + void parse_parameters (ParameterHandler &prm); }; @@ -1028,32 +1028,32 @@ namespace Step33 { prm.enter_subsection("linear solver"); { - prm.declare_entry("output", "quiet", - Patterns::Selection("quiet|verbose"), - "State whether output from solver runs should be printed. " - "Choices are ."); - prm.declare_entry("method", "gmres", - Patterns::Selection("gmres|direct"), - "The kind of solver for the linear system. " - "Choices are ."); - prm.declare_entry("residual", "1e-10", - Patterns::Double(), - "Linear solver residual"); - prm.declare_entry("max iters", "300", - Patterns::Integer(), - "Maximum solver iterations"); - prm.declare_entry("ilut fill", "2", - Patterns::Double(), - "Ilut preconditioner fill"); - prm.declare_entry("ilut absolute tolerance", "1e-9", - Patterns::Double(), - "Ilut preconditioner tolerance"); - prm.declare_entry("ilut relative tolerance", "1.1", - Patterns::Double(), - "Ilut relative tolerance"); - prm.declare_entry("ilut drop tolerance", "1e-10", - Patterns::Double(), - "Ilut drop tolerance"); + prm.declare_entry("output", "quiet", + Patterns::Selection("quiet|verbose"), + "State whether output from solver runs should be printed. " + "Choices are ."); + prm.declare_entry("method", "gmres", + Patterns::Selection("gmres|direct"), + "The kind of solver for the linear system. " + "Choices are ."); + prm.declare_entry("residual", "1e-10", + Patterns::Double(), + "Linear solver residual"); + prm.declare_entry("max iters", "300", + Patterns::Integer(), + "Maximum solver iterations"); + prm.declare_entry("ilut fill", "2", + Patterns::Double(), + "Ilut preconditioner fill"); + prm.declare_entry("ilut absolute tolerance", "1e-9", + Patterns::Double(), + "Ilut preconditioner tolerance"); + prm.declare_entry("ilut relative tolerance", "1.1", + Patterns::Double(), + "Ilut relative tolerance"); + prm.declare_entry("ilut drop tolerance", "1e-10", + Patterns::Double(), + "Ilut drop tolerance"); } prm.leave_subsection(); } @@ -1065,46 +1065,46 @@ namespace Step33 { prm.enter_subsection("linear solver"); { - const std::string op = prm.get("output"); - if (op == "verbose") - output = verbose; - if (op == "quiet") - output = quiet; - - const std::string sv = prm.get("method"); - if (sv == "direct") - solver = direct; - else if (sv == "gmres") - solver = gmres; - - linear_residual = prm.get_double("residual"); - max_iterations = prm.get_integer("max iters"); - ilut_fill = prm.get_double("ilut fill"); - ilut_atol = prm.get_double("ilut absolute tolerance"); - ilut_rtol = prm.get_double("ilut relative tolerance"); - ilut_drop = prm.get_double("ilut drop tolerance"); + const std::string op = prm.get("output"); + if (op == "verbose") + output = verbose; + if (op == "quiet") + output = quiet; + + const std::string sv = prm.get("method"); + if (sv == "direct") + solver = direct; + else if (sv == "gmres") + solver = gmres; + + linear_residual = prm.get_double("residual"); + max_iterations = prm.get_integer("max iters"); + ilut_fill = prm.get_double("ilut fill"); + ilut_atol = prm.get_double("ilut absolute tolerance"); + ilut_rtol = prm.get_double("ilut relative tolerance"); + ilut_drop = prm.get_double("ilut drop tolerance"); } prm.leave_subsection(); } - // @sect4{Parameters::Refinement} - // - // Similarly, here are a few parameters - // that determine how the mesh is to be - // refined (and if it is to be refined at - // all). For what exactly the shock - // parameters do, see the mesh refinement - // functions further down. + // @sect4{Parameters::Refinement} + // + // Similarly, here are a few parameters + // that determine how the mesh is to be + // refined (and if it is to be refined at + // all). For what exactly the shock + // parameters do, see the mesh refinement + // functions further down. struct Refinement { - bool do_refine; - double shock_val; - double shock_levels; + bool do_refine; + double shock_val; + double shock_levels; - static void declare_parameters (ParameterHandler &prm); - void parse_parameters (ParameterHandler &prm); + static void declare_parameters (ParameterHandler &prm); + void parse_parameters (ParameterHandler &prm); }; @@ -1114,24 +1114,24 @@ namespace Step33 prm.enter_subsection("refinement"); { - prm.declare_entry("refinement", "true", - Patterns::Bool(), - "Whether to perform mesh refinement or not"); - prm.declare_entry("refinement fraction", "0.1", - Patterns::Double(), - "Fraction of high refinement"); - prm.declare_entry("unrefinement fraction", "0.1", - Patterns::Double(), - "Fraction of low unrefinement"); - prm.declare_entry("max elements", "1000000", - Patterns::Double(), - "maximum number of elements"); - prm.declare_entry("shock value", "4.0", - Patterns::Double(), - "value for shock indicator"); - prm.declare_entry("shock levels", "3.0", - Patterns::Double(), - "number of shock refinement levels"); + prm.declare_entry("refinement", "true", + Patterns::Bool(), + "Whether to perform mesh refinement or not"); + prm.declare_entry("refinement fraction", "0.1", + Patterns::Double(), + "Fraction of high refinement"); + prm.declare_entry("unrefinement fraction", "0.1", + Patterns::Double(), + "Fraction of low unrefinement"); + prm.declare_entry("max elements", "1000000", + Patterns::Double(), + "maximum number of elements"); + prm.declare_entry("shock value", "4.0", + Patterns::Double(), + "value for shock indicator"); + prm.declare_entry("shock levels", "3.0", + Patterns::Double(), + "number of shock refinement levels"); } prm.leave_subsection(); } @@ -1141,43 +1141,43 @@ namespace Step33 { prm.enter_subsection("refinement"); { - do_refine = prm.get_bool ("refinement"); - shock_val = prm.get_double("shock value"); - shock_levels = prm.get_double("shock levels"); + do_refine = prm.get_bool ("refinement"); + shock_val = prm.get_double("shock value"); + shock_levels = prm.get_double("shock levels"); } prm.leave_subsection(); } - // @sect4{Parameters::Flux} - // - // Next a section on flux modifications to - // make it more stable. In particular, two - // options are offered to stabilize the - // Lax-Friedrichs flux: either choose - // $\mathbf{H}(\mathbf{a},\mathbf{b},\mathbf{n}) - // = - // \frac{1}{2}(\mathbf{F}(\mathbf{a})\cdot - // \mathbf{n} + \mathbf{F}(\mathbf{b})\cdot - // \mathbf{n} + \alpha (\mathbf{a} - - // \mathbf{b}))$ where $\alpha$ is either a - // fixed number specified in the input - // file, or where $\alpha$ is a mesh - // dependent value. In the latter case, it - // is chosen as $\frac{h}{2\delta T}$ with - // $h$ the diameter of the face to which - // the flux is applied, and $\delta T$ - // the current time step. + // @sect4{Parameters::Flux} + // + // Next a section on flux modifications to + // make it more stable. In particular, two + // options are offered to stabilize the + // Lax-Friedrichs flux: either choose + // $\mathbf{H}(\mathbf{a},\mathbf{b},\mathbf{n}) + // = + // \frac{1}{2}(\mathbf{F}(\mathbf{a})\cdot + // \mathbf{n} + \mathbf{F}(\mathbf{b})\cdot + // \mathbf{n} + \alpha (\mathbf{a} - + // \mathbf{b}))$ where $\alpha$ is either a + // fixed number specified in the input + // file, or where $\alpha$ is a mesh + // dependent value. In the latter case, it + // is chosen as $\frac{h}{2\delta T}$ with + // $h$ the diameter of the face to which + // the flux is applied, and $\delta T$ + // the current time step. struct Flux { - enum StabilizationKind { constant, mesh_dependent }; - StabilizationKind stabilization_kind; + enum StabilizationKind { constant, mesh_dependent }; + StabilizationKind stabilization_kind; - double stabilization_value; + double stabilization_value; - static void declare_parameters (ParameterHandler &prm); - void parse_parameters (ParameterHandler &prm); + static void declare_parameters (ParameterHandler &prm); + void parse_parameters (ParameterHandler &prm); }; @@ -1185,13 +1185,13 @@ namespace Step33 { prm.enter_subsection("flux"); { - prm.declare_entry("stab", "mesh", - Patterns::Selection("constant|mesh"), - "Whether to use a constant stabilization parameter or " - "a mesh-dependent one"); - prm.declare_entry("stab value", "1", - Patterns::Double(), - "alpha stabilization"); + prm.declare_entry("stab", "mesh", + Patterns::Selection("constant|mesh"), + "Whether to use a constant stabilization parameter or " + "a mesh-dependent one"); + prm.declare_entry("stab value", "1", + Patterns::Double(), + "alpha stabilization"); } prm.leave_subsection(); } @@ -1201,37 +1201,37 @@ namespace Step33 { prm.enter_subsection("flux"); { - const std::string stab = prm.get("stab"); - if (stab == "constant") - stabilization_kind = constant; - else if (stab == "mesh") - stabilization_kind = mesh_dependent; - else - AssertThrow (false, ExcNotImplemented()); - - stabilization_value = prm.get_double("stab value"); + const std::string stab = prm.get("stab"); + if (stab == "constant") + stabilization_kind = constant; + else if (stab == "mesh") + stabilization_kind = mesh_dependent; + else + AssertThrow (false, ExcNotImplemented()); + + stabilization_value = prm.get_double("stab value"); } prm.leave_subsection(); } - // @sect4{Parameters::Output} - // - // Then a section on output parameters. We - // offer to produce Schlieren plots (the - // squared gradient of the density, a tool - // to visualize shock fronts), and a time - // interval between graphical output in - // case we don't want an output file every - // time step. + // @sect4{Parameters::Output} + // + // Then a section on output parameters. We + // offer to produce Schlieren plots (the + // squared gradient of the density, a tool + // to visualize shock fronts), and a time + // interval between graphical output in + // case we don't want an output file every + // time step. struct Output { - bool schlieren_plot; - double output_step; + bool schlieren_plot; + double output_step; - static void declare_parameters (ParameterHandler &prm); - void parse_parameters (ParameterHandler &prm); + static void declare_parameters (ParameterHandler &prm); + void parse_parameters (ParameterHandler &prm); }; @@ -1240,12 +1240,12 @@ namespace Step33 { prm.enter_subsection("output"); { - prm.declare_entry("schlieren plot", "true", - Patterns::Bool (), - "Whether or not to produce schlieren plots"); - prm.declare_entry("step", "-1", - Patterns::Double(), - "Output once per this period"); + prm.declare_entry("schlieren plot", "true", + Patterns::Bool (), + "Whether or not to produce schlieren plots"); + prm.declare_entry("step", "-1", + Patterns::Double(), + "Output once per this period"); } prm.leave_subsection(); } @@ -1256,144 +1256,144 @@ namespace Step33 { prm.enter_subsection("output"); { - schlieren_plot = prm.get_bool("schlieren plot"); - output_step = prm.get_double("step"); + schlieren_plot = prm.get_bool("schlieren plot"); + output_step = prm.get_double("step"); } prm.leave_subsection(); } - // @sect4{Parameters::AllParameters} - // - // Finally the class that brings it all - // together. It declares a number of - // parameters itself, mostly ones at the - // top level of the parameter file as well - // as several in section too small to - // warrant their own classes. It also - // contains everything that is actually - // space dimension dependent, like initial - // or boundary conditions. - // - // Since this class is derived from all the - // ones above, the - // declare_parameters() and - // parse_parameters() - // functions call the respective functions - // of the base classes as well. - // - // Note that this class also handles the - // declaration of initial and boundary - // conditions specified in the input - // file. To this end, in both cases, - // there are entries like "w_0 value" - // which represent an expression in terms - // of $x,y,z$ that describe the initial - // or boundary condition as a formula - // that will later be parsed by the - // FunctionParser class. Similar - // expressions exist for "w_1", "w_2", - // etc, denoting the dim+2 - // conserved variables of the Euler - // system. Similarly, we allow up to - // max_n_boundaries boundary - // indicators to be used in the input - // file, and each of these boundary - // indicators can be associated with an - // inflow, outflow, or pressure boundary - // condition, with inhomogenous boundary - // conditions being specified for each - // component and each boundary indicator - // separately. - // - // The data structure used to store the - // boundary indicators is a bit - // complicated. It is an array of - // max_n_boundaries elements - // indicating the range of boundary - // indicators that will be accepted. For - // each entry in this array, we store a - // pair of data in the - // BoundaryCondition - // structure: first, an array of size - // n_components that for - // each component of the solution vector - // indicates whether it is an inflow, - // outflow, or other kind of boundary, - // and second a FunctionParser object - // that describes all components of the - // solution vector for this boundary id - // at once. - // - // The BoundaryCondition - // structure requires a constructor since - // we need to tell the function parser - // object at construction time how many - // vector components it is to - // describe. This initialization can - // therefore not wait till we actually - // set the formulas the FunctionParser - // object represents later in - // AllParameters::parse_parameters() - // - // For the same reason of having to tell - // Function objects their vector size at - // construction time, we have to have a - // constructor of the - // AllParameters class that - // at least initializes the other - // FunctionParser object, i.e. the one - // describing initial conditions. + // @sect4{Parameters::AllParameters} + // + // Finally the class that brings it all + // together. It declares a number of + // parameters itself, mostly ones at the + // top level of the parameter file as well + // as several in section too small to + // warrant their own classes. It also + // contains everything that is actually + // space dimension dependent, like initial + // or boundary conditions. + // + // Since this class is derived from all the + // ones above, the + // declare_parameters() and + // parse_parameters() + // functions call the respective functions + // of the base classes as well. + // + // Note that this class also handles the + // declaration of initial and boundary + // conditions specified in the input + // file. To this end, in both cases, + // there are entries like "w_0 value" + // which represent an expression in terms + // of $x,y,z$ that describe the initial + // or boundary condition as a formula + // that will later be parsed by the + // FunctionParser class. Similar + // expressions exist for "w_1", "w_2", + // etc, denoting the dim+2 + // conserved variables of the Euler + // system. Similarly, we allow up to + // max_n_boundaries boundary + // indicators to be used in the input + // file, and each of these boundary + // indicators can be associated with an + // inflow, outflow, or pressure boundary + // condition, with inhomogenous boundary + // conditions being specified for each + // component and each boundary indicator + // separately. + // + // The data structure used to store the + // boundary indicators is a bit + // complicated. It is an array of + // max_n_boundaries elements + // indicating the range of boundary + // indicators that will be accepted. For + // each entry in this array, we store a + // pair of data in the + // BoundaryCondition + // structure: first, an array of size + // n_components that for + // each component of the solution vector + // indicates whether it is an inflow, + // outflow, or other kind of boundary, + // and second a FunctionParser object + // that describes all components of the + // solution vector for this boundary id + // at once. + // + // The BoundaryCondition + // structure requires a constructor since + // we need to tell the function parser + // object at construction time how many + // vector components it is to + // describe. This initialization can + // therefore not wait till we actually + // set the formulas the FunctionParser + // object represents later in + // AllParameters::parse_parameters() + // + // For the same reason of having to tell + // Function objects their vector size at + // construction time, we have to have a + // constructor of the + // AllParameters class that + // at least initializes the other + // FunctionParser object, i.e. the one + // describing initial conditions. template struct AllParameters : public Solver, - public Refinement, - public Flux, - public Output + public Refinement, + public Flux, + public Output { - static const unsigned int max_n_boundaries = 10; + static const unsigned int max_n_boundaries = 10; - struct BoundaryConditions - { - typename EulerEquations::BoundaryKind - kind[EulerEquations::n_components]; + struct BoundaryConditions + { + typename EulerEquations::BoundaryKind + kind[EulerEquations::n_components]; - FunctionParser values; + FunctionParser values; - BoundaryConditions (); - }; + BoundaryConditions (); + }; - AllParameters (); + AllParameters (); - double diffusion_power; + double diffusion_power; - double time_step, final_time; - double theta; - bool is_stationary; + double time_step, final_time; + double theta; + bool is_stationary; - std::string mesh_filename; + std::string mesh_filename; - FunctionParser initial_conditions; - BoundaryConditions boundary_conditions[max_n_boundaries]; + FunctionParser initial_conditions; + BoundaryConditions boundary_conditions[max_n_boundaries]; - static void declare_parameters (ParameterHandler &prm); - void parse_parameters (ParameterHandler &prm); + static void declare_parameters (ParameterHandler &prm); + void parse_parameters (ParameterHandler &prm); }; template AllParameters::BoundaryConditions::BoundaryConditions () - : - values (EulerEquations::n_components) + : + values (EulerEquations::n_components) {} template AllParameters::AllParameters () - : - initial_conditions (EulerEquations::n_components) + : + initial_conditions (EulerEquations::n_components) {} @@ -1402,63 +1402,63 @@ namespace Step33 AllParameters::declare_parameters (ParameterHandler &prm) { prm.declare_entry("mesh", "grid.inp", - Patterns::Anything(), - "intput file name"); + Patterns::Anything(), + "intput file name"); prm.declare_entry("diffusion power", "2.0", - Patterns::Double(), - "power of mesh size for diffusion"); + Patterns::Double(), + "power of mesh size for diffusion"); prm.enter_subsection("time stepping"); { - prm.declare_entry("time step", "0.1", - Patterns::Double(0), - "simulation time step"); - prm.declare_entry("final time", "10.0", - Patterns::Double(0), - "simulation end time"); - prm.declare_entry("theta scheme value", "0.5", - Patterns::Double(0,1), - "value for theta that interpolated between explicit " - "Euler (theta=0), Crank-Nicolson (theta=0.5), and " - "implicit Euler (theta=1)."); + prm.declare_entry("time step", "0.1", + Patterns::Double(0), + "simulation time step"); + prm.declare_entry("final time", "10.0", + Patterns::Double(0), + "simulation end time"); + prm.declare_entry("theta scheme value", "0.5", + Patterns::Double(0,1), + "value for theta that interpolated between explicit " + "Euler (theta=0), Crank-Nicolson (theta=0.5), and " + "implicit Euler (theta=1)."); } prm.leave_subsection(); for (unsigned int b=0; b::n_components; ++di) - { - prm.declare_entry("w_" + Utilities::int_to_string(di), - "outflow", - Patterns::Selection("inflow|outflow|pressure"), - ""); - - prm.declare_entry("w_" + Utilities::int_to_string(di) + - " value", "0.0", - Patterns::Anything(), - "expression in x,y,z"); - } - } - prm.leave_subsection(); - } + { + prm.enter_subsection("boundary_" + + Utilities::int_to_string(b)); + { + prm.declare_entry("no penetration", "false", + Patterns::Bool(), + "whether the named boundary allows gas to " + "penetrate or is a rigid wall"); + + for (unsigned int di=0; di::n_components; ++di) + { + prm.declare_entry("w_" + Utilities::int_to_string(di), + "outflow", + Patterns::Selection("inflow|outflow|pressure"), + ""); + + prm.declare_entry("w_" + Utilities::int_to_string(di) + + " value", "0.0", + Patterns::Anything(), + "expression in x,y,z"); + } + } + prm.leave_subsection(); + } prm.enter_subsection("initial condition"); { - for (unsigned int di=0; di::n_components; ++di) - prm.declare_entry("w_" + Utilities::int_to_string(di) + " value", - "0.0", - Patterns::Anything(), - "expression in x,y,z"); + for (unsigned int di=0; di::n_components; ++di) + prm.declare_entry("w_" + Utilities::int_to_string(di) + " value", + "0.0", + Patterns::Anything(), + "expression in x,y,z"); } prm.leave_subsection(); @@ -1478,74 +1478,74 @@ namespace Step33 prm.enter_subsection("time stepping"); { - time_step = prm.get_double("time step"); - if (time_step == 0) - { - is_stationary = true; - time_step = 1.0; - final_time = 1.0; - } - else - is_stationary = false; - - final_time = prm.get_double("final time"); - theta = prm.get_double("theta scheme value"); + time_step = prm.get_double("time step"); + if (time_step == 0) + { + is_stationary = true; + time_step = 1.0; + final_time = 1.0; + } + else + is_stationary = false; + + final_time = prm.get_double("final time"); + theta = prm.get_double("theta scheme value"); } prm.leave_subsection(); for (unsigned int boundary_id=0; boundary_id - expressions(EulerEquations::n_components, "0.0"); - - const bool no_penetration = prm.get_bool("no penetration"); - - for (unsigned int di=0; di::n_components; ++di) - { - const std::string boundary_type - = prm.get("w_" + Utilities::int_to_string(di)); - - if ((di < dim) && (no_penetration == true)) - boundary_conditions[boundary_id].kind[di] - = EulerEquations::no_penetration_boundary; - else if (boundary_type == "inflow") - boundary_conditions[boundary_id].kind[di] - = EulerEquations::inflow_boundary; - else if (boundary_type == "pressure") - boundary_conditions[boundary_id].kind[di] - = EulerEquations::pressure_boundary; - else if (boundary_type == "outflow") - boundary_conditions[boundary_id].kind[di] - = EulerEquations::outflow_boundary; - else - AssertThrow (false, ExcNotImplemented()); - - expressions[di] = prm.get("w_" + Utilities::int_to_string(di) + - " value"); - } - - boundary_conditions[boundary_id].values - .initialize (FunctionParser::default_variable_names(), - expressions, - std::map()); - } - prm.leave_subsection(); - } + ++boundary_id) + { + prm.enter_subsection("boundary_" + + Utilities::int_to_string(boundary_id)); + { + std::vector + expressions(EulerEquations::n_components, "0.0"); + + const bool no_penetration = prm.get_bool("no penetration"); + + for (unsigned int di=0; di::n_components; ++di) + { + const std::string boundary_type + = prm.get("w_" + Utilities::int_to_string(di)); + + if ((di < dim) && (no_penetration == true)) + boundary_conditions[boundary_id].kind[di] + = EulerEquations::no_penetration_boundary; + else if (boundary_type == "inflow") + boundary_conditions[boundary_id].kind[di] + = EulerEquations::inflow_boundary; + else if (boundary_type == "pressure") + boundary_conditions[boundary_id].kind[di] + = EulerEquations::pressure_boundary; + else if (boundary_type == "outflow") + boundary_conditions[boundary_id].kind[di] + = EulerEquations::outflow_boundary; + else + AssertThrow (false, ExcNotImplemented()); + + expressions[di] = prm.get("w_" + Utilities::int_to_string(di) + + " value"); + } + + boundary_conditions[boundary_id].values + .initialize (FunctionParser::default_variable_names(), + expressions, + std::map()); + } + prm.leave_subsection(); + } prm.enter_subsection("initial condition"); { - std::vector expressions (EulerEquations::n_components, - "0.0"); - for (unsigned int di = 0; di < EulerEquations::n_components; di++) - expressions[di] = prm.get("w_" + Utilities::int_to_string(di) + - " value"); - initial_conditions.initialize (FunctionParser::default_variable_names(), - expressions, - std::map()); + std::vector expressions (EulerEquations::n_components, + "0.0"); + for (unsigned int di = 0; di < EulerEquations::n_components; di++) + expressions[di] = prm.get("w_" + Utilities::int_to_string(di) + + " value"); + initial_conditions.initialize (FunctionParser::default_variable_names(), + expressions, + std::map()); } prm.leave_subsection(); @@ -1559,27 +1559,27 @@ namespace Step33 - // @sect3{Conservation law class} - - // Here finally comes the class that - // actually does something with all - // the Euler equation and parameter - // specifics we've defined above. The - // public interface is pretty much - // the same as always (the - // constructor now takes the name of - // a file from which to read - // parameters, which is passed on the - // command line). The private - // function interface is also pretty - // similar to the usual arrangement, - // with the - // assemble_system - // function split into three parts: - // one that contains the main loop - // over all cells and that then calls - // the other two for integrals over - // cells and faces, respectively. + // @sect3{Conservation law class} + + // Here finally comes the class that + // actually does something with all + // the Euler equation and parameter + // specifics we've defined above. The + // public interface is pretty much + // the same as always (the + // constructor now takes the name of + // a file from which to read + // parameters, which is passed on the + // command line). The private + // function interface is also pretty + // similar to the usual arrangement, + // with the + // assemble_system + // function split into three parts: + // one that contains the main loop + // over all cells and that then calls + // the other two for integrals over + // cells and faces, respectively. template class ConservationLaw { @@ -1592,15 +1592,15 @@ namespace Step33 void assemble_system (); void assemble_cell_term (const FEValues &fe_v, - const std::vector &dofs); + const std::vector &dofs); void assemble_face_term (const unsigned int face_no, - const FEFaceValuesBase &fe_v, - const FEFaceValuesBase &fe_v_neighbor, - const std::vector &dofs, - const std::vector &dofs_neighbor, - const bool external_face, - const unsigned int boundary_id, - const double face_diameter); + const FEFaceValuesBase &fe_v, + const FEFaceValuesBase &fe_v_neighbor, + const std::vector &dofs, + const std::vector &dofs_neighbor, + const bool external_face, + const unsigned int boundary_id, + const double face_diameter); std::pair solve (Vector &solution); @@ -1611,31 +1611,31 @@ namespace Step33 - // The first few member variables - // are also rather standard. Note - // that we define a mapping - // object to be used throughout - // the program when assembling - // terms (we will hand it to - // every FEValues and - // FEFaceValues object); the - // mapping we use is just the - // standard $Q_1$ mapping -- - // nothing fancy, in other words - // -- but declaring one here and - // using it throughout the - // program will make it simpler - // later on to change it if that - // should become necessary. This - // is, in fact, rather pertinent: - // it is known that for - // transsonic simulations with - // the Euler equations, - // computations do not converge - // even as $h\rightarrow 0$ if - // the boundary approximation is - // not of sufficiently high - // order. + // The first few member variables + // are also rather standard. Note + // that we define a mapping + // object to be used throughout + // the program when assembling + // terms (we will hand it to + // every FEValues and + // FEFaceValues object); the + // mapping we use is just the + // standard $Q_1$ mapping -- + // nothing fancy, in other words + // -- but declaring one here and + // using it throughout the + // program will make it simpler + // later on to change it if that + // should become necessary. This + // is, in fact, rather pertinent: + // it is known that for + // transsonic simulations with + // the Euler equations, + // computations do not converge + // even as $h\rightarrow 0$ if + // the boundary approximation is + // not of sufficiently high + // order. Triangulation triangulation; const MappingQ1 mapping; @@ -1645,56 +1645,56 @@ namespace Step33 const QGauss quadrature; const QGauss face_quadrature; - // Next come a number of data - // vectors that correspond to the - // solution of the previous time - // step - // (old_solution), - // the best guess of the current - // solution - // (current_solution; - // we say guess because - // the Newton iteration to - // compute it may not have - // converged yet, whereas - // old_solution - // refers to the fully converged - // final result of the previous - // time step), and a predictor - // for the solution at the next - // time step, computed by - // extrapolating the current and - // previous solution one time - // step into the future: + // Next come a number of data + // vectors that correspond to the + // solution of the previous time + // step + // (old_solution), + // the best guess of the current + // solution + // (current_solution; + // we say guess because + // the Newton iteration to + // compute it may not have + // converged yet, whereas + // old_solution + // refers to the fully converged + // final result of the previous + // time step), and a predictor + // for the solution at the next + // time step, computed by + // extrapolating the current and + // previous solution one time + // step into the future: Vector old_solution; Vector current_solution; Vector predictor; Vector right_hand_side; - // This final set of member variables - // (except for the object holding all - // run-time parameters at the very - // bottom and a screen output stream - // that only prints something if - // verbose output has been requested) - // deals with the inteface we have in - // this program to the Trilinos library - // that provides us with linear - // solvers. Similarly to including - // PETSc matrices in step-17, - // step-18, and step-19, all we - // need to do is to create a Trilinos - // sparse matrix instead of the - // standard deal.II class. The system - // matrix is used for the Jacobian in - // each Newton step. Since we do not - // intend to run this program in - // parallel (which wouldn't be too hard - // with Trilinos data structures, - // though), we don't have to think - // about anything else like - // distributing the degrees of freedom. + // This final set of member variables + // (except for the object holding all + // run-time parameters at the very + // bottom and a screen output stream + // that only prints something if + // verbose output has been requested) + // deals with the inteface we have in + // this program to the Trilinos library + // that provides us with linear + // solvers. Similarly to including + // PETSc matrices in step-17, + // step-18, and step-19, all we + // need to do is to create a Trilinos + // sparse matrix instead of the + // standard deal.II class. The system + // matrix is used for the Jacobian in + // each Newton step. Since we do not + // intend to run this program in + // parallel (which wouldn't be too hard + // with Trilinos data structures, + // though), we don't have to think + // about anything else like + // distributing the degrees of freedom. TrilinosWrappers::SparseMatrix system_matrix; Parameters::AllParameters parameters; @@ -1702,22 +1702,22 @@ namespace Step33 }; - // @sect4{ConservationLaw::ConservationLaw} - // - // There is nothing much to say about - // the constructor. Essentially, it - // reads the input file and fills the - // parameter object with the parsed - // values: + // @sect4{ConservationLaw::ConservationLaw} + // + // There is nothing much to say about + // the constructor. Essentially, it + // reads the input file and fills the + // parameter object with the parsed + // values: template ConservationLaw::ConservationLaw (const char *input_filename) - : - mapping (), - fe (FE_Q(1), EulerEquations::n_components), - dof_handler (triangulation), - quadrature (2), - face_quadrature (2), - verbose_cout (std::cout, false) + : + mapping (), + fe (FE_Q(1), EulerEquations::n_components), + dof_handler (triangulation), + quadrature (2), + face_quadrature (2), + verbose_cout (std::cout, false) { ParameterHandler prm; Parameters::AllParameters::declare_parameters (prm); @@ -1730,65 +1730,65 @@ namespace Step33 - // @sect4{ConservationLaw::setup_system} - // - // The following (easy) function is called - // each time the mesh is changed. All it - // does is to resize the Trilinos matrix - // according to a sparsity pattern that we - // generate as in all the previous tutorial - // programs. + // @sect4{ConservationLaw::setup_system} + // + // The following (easy) function is called + // each time the mesh is changed. All it + // does is to resize the Trilinos matrix + // according to a sparsity pattern that we + // generate as in all the previous tutorial + // programs. template void ConservationLaw::setup_system () { CompressedSparsityPattern sparsity_pattern (dof_handler.n_dofs(), - dof_handler.n_dofs()); + dof_handler.n_dofs()); DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); system_matrix.reinit (sparsity_pattern); } - // @sect4{ConservationLaw::assemble_system} - // - // This and the following two - // functions are the meat of this - // program: They assemble the linear - // system that results from applying - // Newton's method to the nonlinear - // system of conservation - // equations. - // - // This first function puts all of - // the assembly pieces together in a - // routine that dispatches the - // correct piece for each cell/face. - // The actual implementation of the - // assembly on these objects is done - // in the following functions. - // - // At the top of the function we do the - // usual housekeeping: allocate FEValues, - // FEFaceValues, and FESubfaceValues - // objects necessary to do the integrations - // on cells, faces, and subfaces (in case - // of adjoining cells on different - // refinement levels). Note that we don't - // need all information (like values, - // gradients, or real locations of - // quadrature points) for all of these - // objects, so we only let the FEValues - // classes whatever is actually necessary - // by specifying the minimal set of - // UpdateFlags. For example, when using a - // FEFaceValues object for the neighboring - // cell we only need the shape values: - // Given a specific face, the quadrature - // points and JxW values are - // the same as for the current cells, and - // the normal vectors are known to be the - // negative of the normal vectors of the - // current cell. + // @sect4{ConservationLaw::assemble_system} + // + // This and the following two + // functions are the meat of this + // program: They assemble the linear + // system that results from applying + // Newton's method to the nonlinear + // system of conservation + // equations. + // + // This first function puts all of + // the assembly pieces together in a + // routine that dispatches the + // correct piece for each cell/face. + // The actual implementation of the + // assembly on these objects is done + // in the following functions. + // + // At the top of the function we do the + // usual housekeeping: allocate FEValues, + // FEFaceValues, and FESubfaceValues + // objects necessary to do the integrations + // on cells, faces, and subfaces (in case + // of adjoining cells on different + // refinement levels). Note that we don't + // need all information (like values, + // gradients, or real locations of + // quadrature points) for all of these + // objects, so we only let the FEValues + // classes whatever is actually necessary + // by specifying the minimal set of + // UpdateFlags. For example, when using a + // FEFaceValues object for the neighboring + // cell we only need the shape values: + // Given a specific face, the quadrature + // points and JxW values are + // the same as for the current cells, and + // the normal vectors are known to be the + // negative of the normal vectors of the + // current cell. template void ConservationLaw::assemble_system () { @@ -1798,313 +1798,313 @@ namespace Step33 std::vector dof_indices_neighbor (dofs_per_cell); const UpdateFlags update_flags = update_values - | update_gradients - | update_q_points - | update_JxW_values, - face_update_flags = update_values - | update_q_points - | update_JxW_values - | update_normal_vectors, - neighbor_face_update_flags = update_values; + | update_gradients + | update_q_points + | update_JxW_values, + face_update_flags = update_values + | update_q_points + | update_JxW_values + | update_normal_vectors, + neighbor_face_update_flags = update_values; FEValues fe_v (mapping, fe, quadrature, - update_flags); + update_flags); FEFaceValues fe_v_face (mapping, fe, face_quadrature, - face_update_flags); + face_update_flags); FESubfaceValues fe_v_subface (mapping, fe, face_quadrature, - face_update_flags); + face_update_flags); FEFaceValues fe_v_face_neighbor (mapping, fe, face_quadrature, - neighbor_face_update_flags); + neighbor_face_update_flags); FESubfaceValues fe_v_subface_neighbor (mapping, fe, face_quadrature, - neighbor_face_update_flags); + neighbor_face_update_flags); - // Then loop over all cells, initialize the - // FEValues object for the current cell and - // call the function that assembles the - // problem on this cell. + // Then loop over all cells, initialize the + // FEValues object for the current cell and + // call the function that assembles the + // problem on this cell. typename DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), endc = dof_handler.end(); for (; cell!=endc; ++cell) { - fe_v.reinit (cell); - cell->get_dof_indices (dof_indices); - - assemble_cell_term(fe_v, dof_indices); - - // Then loop over all the faces of this - // cell. If a face is part of the - // external boundary, then assemble - // boundary conditions there (the fifth - // argument to - // assemble_face_terms - // indicates whether we are working on - // an external or internal face; if it - // is an external face, the fourth - // argument denoting the degrees of - // freedom indices of the neighbor is - // ignored, so we pass an empty - // vector): - for (unsigned int face_no=0; face_no::faces_per_cell; - ++face_no) - if (cell->at_boundary(face_no)) - { - fe_v_face.reinit (cell, face_no); - assemble_face_term (face_no, fe_v_face, - fe_v_face, - dof_indices, - std::vector(), - true, - cell->face(face_no)->boundary_indicator(), - cell->face(face_no)->diameter()); - } - - // The alternative is that we are - // dealing with an internal face. There - // are two cases that we need to - // distinguish: that this is a normal - // face between two cells at the same - // refinement level, and that it is a - // face between two cells of the - // different refinement levels. - // - // In the first case, there is nothing - // we need to do: we are using a - // continuous finite element, and face - // terms do not appear in the bilinear - // form in this case. The second case - // usually does not lead to face terms - // either if we enforce hanging node - // constraints strongly (as in all - // previous tutorial programs so far - // whenever we used continuous finite - // elements -- this enforcement is done - // by the ConstraintMatrix class - // together with - // DoFTools::make_hanging_node_constraints). In - // the current program, however, we opt - // to enforce continuity weakly at - // faces between cells of different - // refinement level, for two reasons: - // (i) because we can, and more - // importantly (ii) because we would - // have to thread the automatic - // differentiation we use to compute - // the elements of the Newton matrix - // from the residual through the - // operations of the ConstraintMatrix - // class. This would be possible, but - // is not trivial, and so we choose - // this alternative approach. - // - // What needs to be decided is which - // side of an interface between two - // cells of different refinement level - // we are sitting on. - // - // Let's take the case where the - // neighbor is more refined first. We - // then have to loop over the children - // of the face of the current cell and - // integrate on each of them. We - // sprinkle a couple of assertions into - // the code to ensure that our - // reasoning trying to figure out which - // of the neighbor's children's faces - // coincides with a given subface of - // the current cell's faces is correct - // -- a bit of defensive programming - // never hurts. - // - // We then call the function that - // integrates over faces; since this is - // an internal face, the fifth argument - // is false, and the sixth one is - // ignored so we pass an invalid value - // again: - else - { - if (cell->neighbor(face_no)->has_children()) - { - const unsigned int neighbor2= - cell->neighbor_of_neighbor(face_no); - - for (unsigned int subface_no=0; - subface_no < cell->face(face_no)->n_children(); - ++subface_no) - { - const typename DoFHandler::active_cell_iterator - neighbor_child - = cell->neighbor_child_on_subface (face_no, subface_no); - - Assert (neighbor_child->face(neighbor2) == - cell->face(face_no)->child(subface_no), - ExcInternalError()); - Assert (neighbor_child->has_children() == false, - ExcInternalError()); - - fe_v_subface.reinit (cell, face_no, subface_no); - fe_v_face_neighbor.reinit (neighbor_child, neighbor2); - - neighbor_child->get_dof_indices (dof_indices_neighbor); - - assemble_face_term (face_no, fe_v_subface, - fe_v_face_neighbor, - dof_indices, - dof_indices_neighbor, - false, - numbers::invalid_unsigned_int, - neighbor_child->face(neighbor2)->diameter()); - } - } - - // The other possibility we have - // to care for is if the neighbor - // is coarser than the current - // cell (in particular, because - // of the usual restriction of - // only one hanging node per - // face, the neighbor must be - // exactly one level coarser than - // the current cell, something - // that we check with an - // assertion). Again, we then - // integrate over this interface: - else if (cell->neighbor(face_no)->level() != cell->level()) - { - const typename DoFHandler::cell_iterator - neighbor = cell->neighbor(face_no); - Assert(neighbor->level() == cell->level()-1, - ExcInternalError()); - - neighbor->get_dof_indices (dof_indices_neighbor); - - const std::pair - faceno_subfaceno = cell->neighbor_of_coarser_neighbor(face_no); - const unsigned int neighbor_face_no = faceno_subfaceno.first, - neighbor_subface_no = faceno_subfaceno.second; - - Assert (neighbor->neighbor_child_on_subface (neighbor_face_no, - neighbor_subface_no) - == cell, - ExcInternalError()); - - fe_v_face.reinit (cell, face_no); - fe_v_subface_neighbor.reinit (neighbor, - neighbor_face_no, - neighbor_subface_no); - - assemble_face_term (face_no, fe_v_face, - fe_v_subface_neighbor, - dof_indices, - dof_indices_neighbor, - false, - numbers::invalid_unsigned_int, - cell->face(face_no)->diameter()); - } - } + fe_v.reinit (cell); + cell->get_dof_indices (dof_indices); + + assemble_cell_term(fe_v, dof_indices); + + // Then loop over all the faces of this + // cell. If a face is part of the + // external boundary, then assemble + // boundary conditions there (the fifth + // argument to + // assemble_face_terms + // indicates whether we are working on + // an external or internal face; if it + // is an external face, the fourth + // argument denoting the degrees of + // freedom indices of the neighbor is + // ignored, so we pass an empty + // vector): + for (unsigned int face_no=0; face_no::faces_per_cell; + ++face_no) + if (cell->at_boundary(face_no)) + { + fe_v_face.reinit (cell, face_no); + assemble_face_term (face_no, fe_v_face, + fe_v_face, + dof_indices, + std::vector(), + true, + cell->face(face_no)->boundary_indicator(), + cell->face(face_no)->diameter()); + } + + // The alternative is that we are + // dealing with an internal face. There + // are two cases that we need to + // distinguish: that this is a normal + // face between two cells at the same + // refinement level, and that it is a + // face between two cells of the + // different refinement levels. + // + // In the first case, there is nothing + // we need to do: we are using a + // continuous finite element, and face + // terms do not appear in the bilinear + // form in this case. The second case + // usually does not lead to face terms + // either if we enforce hanging node + // constraints strongly (as in all + // previous tutorial programs so far + // whenever we used continuous finite + // elements -- this enforcement is done + // by the ConstraintMatrix class + // together with + // DoFTools::make_hanging_node_constraints). In + // the current program, however, we opt + // to enforce continuity weakly at + // faces between cells of different + // refinement level, for two reasons: + // (i) because we can, and more + // importantly (ii) because we would + // have to thread the automatic + // differentiation we use to compute + // the elements of the Newton matrix + // from the residual through the + // operations of the ConstraintMatrix + // class. This would be possible, but + // is not trivial, and so we choose + // this alternative approach. + // + // What needs to be decided is which + // side of an interface between two + // cells of different refinement level + // we are sitting on. + // + // Let's take the case where the + // neighbor is more refined first. We + // then have to loop over the children + // of the face of the current cell and + // integrate on each of them. We + // sprinkle a couple of assertions into + // the code to ensure that our + // reasoning trying to figure out which + // of the neighbor's children's faces + // coincides with a given subface of + // the current cell's faces is correct + // -- a bit of defensive programming + // never hurts. + // + // We then call the function that + // integrates over faces; since this is + // an internal face, the fifth argument + // is false, and the sixth one is + // ignored so we pass an invalid value + // again: + else + { + if (cell->neighbor(face_no)->has_children()) + { + const unsigned int neighbor2= + cell->neighbor_of_neighbor(face_no); + + for (unsigned int subface_no=0; + subface_no < cell->face(face_no)->n_children(); + ++subface_no) + { + const typename DoFHandler::active_cell_iterator + neighbor_child + = cell->neighbor_child_on_subface (face_no, subface_no); + + Assert (neighbor_child->face(neighbor2) == + cell->face(face_no)->child(subface_no), + ExcInternalError()); + Assert (neighbor_child->has_children() == false, + ExcInternalError()); + + fe_v_subface.reinit (cell, face_no, subface_no); + fe_v_face_neighbor.reinit (neighbor_child, neighbor2); + + neighbor_child->get_dof_indices (dof_indices_neighbor); + + assemble_face_term (face_no, fe_v_subface, + fe_v_face_neighbor, + dof_indices, + dof_indices_neighbor, + false, + numbers::invalid_unsigned_int, + neighbor_child->face(neighbor2)->diameter()); + } + } + + // The other possibility we have + // to care for is if the neighbor + // is coarser than the current + // cell (in particular, because + // of the usual restriction of + // only one hanging node per + // face, the neighbor must be + // exactly one level coarser than + // the current cell, something + // that we check with an + // assertion). Again, we then + // integrate over this interface: + else if (cell->neighbor(face_no)->level() != cell->level()) + { + const typename DoFHandler::cell_iterator + neighbor = cell->neighbor(face_no); + Assert(neighbor->level() == cell->level()-1, + ExcInternalError()); + + neighbor->get_dof_indices (dof_indices_neighbor); + + const std::pair + faceno_subfaceno = cell->neighbor_of_coarser_neighbor(face_no); + const unsigned int neighbor_face_no = faceno_subfaceno.first, + neighbor_subface_no = faceno_subfaceno.second; + + Assert (neighbor->neighbor_child_on_subface (neighbor_face_no, + neighbor_subface_no) + == cell, + ExcInternalError()); + + fe_v_face.reinit (cell, face_no); + fe_v_subface_neighbor.reinit (neighbor, + neighbor_face_no, + neighbor_subface_no); + + assemble_face_term (face_no, fe_v_face, + fe_v_subface_neighbor, + dof_indices, + dof_indices_neighbor, + false, + numbers::invalid_unsigned_int, + cell->face(face_no)->diameter()); + } + } } - // After all this assembling, notify the - // Trilinos matrix object that the matrix - // is done: + // After all this assembling, notify the + // Trilinos matrix object that the matrix + // is done: system_matrix.compress(); } - // @sect4{ConservationLaw::assemble_cell_term} - // - // This function assembles the cell term by - // computing the cell part of the residual, - // adding its negative to the right hand side - // vector, and adding its derivative with - // respect to the local variables to the - // Jacobian (i.e. the Newton matrix). Recall - // that the cell contributions to the - // residual read $F_i = - // \left(\frac{\mathbf{w}_{n+1} - - // \mathbf{w}_n}{\delta - // t},\mathbf{z}_i\right)_K - - // \left(\mathbf{F}(\tilde{\mathbf{w}}), - // \nabla\mathbf{z}_i\right)_K + - // h^{\eta}(\nabla \mathbf{w} , \nabla - // \mathbf{z}_i)_K - - // (\mathbf{G}(\tilde{\mathbf w}), - // \mathbf{z}_i)_K$ where $\tilde{\mathbf w}$ - // is represented by the variable - // W_theta, $\mathbf{z}_i$ is - // the $i$th test function, and the scalar - // product - // $\left(\mathbf{F}(\tilde{\mathbf{w}}), - // \nabla\mathbf{z}\right)_K$ is understood - // as $\int_K - // \sum_{c=1}^{\text{n\_components}} - // \sum_{d=1}^{\text{dim}} - // \mathbf{F}(\tilde{\mathbf{w}})_{cd} - // \frac{\partial z_c}{x_d}$. - // - // At the top of this function, we do the - // usual housekeeping in terms of allocating - // some local variables that we will need - // later. In particular, we will allocate - // variables that will hold the values of the - // current solution $W_{n+1}^k$ after the - // $k$th Newton iteration (variable - // W), the previous time step's - // solution $W_{n}$ (variable - // W_old), as well as the linear - // combination $\theta W_{n+1}^k + - // (1-\theta)W_n$ that results from choosing - // different time stepping schemes (variable - // W_theta). - // - // In addition to these, we need the - // gradients of the current variables. It is - // a bit of a shame that we have to compute - // these; we almost don't. The nice thing - // about a simple conservation law is that - // the flux doesn't generally involve any - // gradients. We do need these, however, for - // the diffusion stabilization. - // - // The actual format in which we store these - // variables requires some - // explanation. First, we need values at each - // quadrature point for each of the - // EulerEquations::n_components - // components of the solution vector. This - // makes for a two-dimensional table for - // which we use deal.II's Table class (this - // is more efficient than - // std::vector@ - // @> because it only needs to - // allocate memory once, rather than once for - // each element of the outer - // vector). Similarly, the gradient is a - // three-dimensional table, which the Table - // class also supports. - // - // Secondly, we want to use automatic - // differentiation. To this end, we use the - // Sacado::Fad::DFad template for everything - // that is a computed from the variables with - // respect to which we would like to compute - // derivatives. This includes the current - // solution and gradient at the quadrature - // points (which are linear combinations of - // the degrees of freedom) as well as - // everything that is computed from them such - // as the residual, but not the previous time - // step's solution. These variables are all - // found in the first part of the function, - // along with a variable that we will use to - // store the derivatives of a single - // component of the residual: + // @sect4{ConservationLaw::assemble_cell_term} + // + // This function assembles the cell term by + // computing the cell part of the residual, + // adding its negative to the right hand side + // vector, and adding its derivative with + // respect to the local variables to the + // Jacobian (i.e. the Newton matrix). Recall + // that the cell contributions to the + // residual read $F_i = + // \left(\frac{\mathbf{w}_{n+1} - + // \mathbf{w}_n}{\delta + // t},\mathbf{z}_i\right)_K - + // \left(\mathbf{F}(\tilde{\mathbf{w}}), + // \nabla\mathbf{z}_i\right)_K + + // h^{\eta}(\nabla \mathbf{w} , \nabla + // \mathbf{z}_i)_K - + // (\mathbf{G}(\tilde{\mathbf w}), + // \mathbf{z}_i)_K$ where $\tilde{\mathbf w}$ + // is represented by the variable + // W_theta, $\mathbf{z}_i$ is + // the $i$th test function, and the scalar + // product + // $\left(\mathbf{F}(\tilde{\mathbf{w}}), + // \nabla\mathbf{z}\right)_K$ is understood + // as $\int_K + // \sum_{c=1}^{\text{n\_components}} + // \sum_{d=1}^{\text{dim}} + // \mathbf{F}(\tilde{\mathbf{w}})_{cd} + // \frac{\partial z_c}{x_d}$. + // + // At the top of this function, we do the + // usual housekeeping in terms of allocating + // some local variables that we will need + // later. In particular, we will allocate + // variables that will hold the values of the + // current solution $W_{n+1}^k$ after the + // $k$th Newton iteration (variable + // W), the previous time step's + // solution $W_{n}$ (variable + // W_old), as well as the linear + // combination $\theta W_{n+1}^k + + // (1-\theta)W_n$ that results from choosing + // different time stepping schemes (variable + // W_theta). + // + // In addition to these, we need the + // gradients of the current variables. It is + // a bit of a shame that we have to compute + // these; we almost don't. The nice thing + // about a simple conservation law is that + // the flux doesn't generally involve any + // gradients. We do need these, however, for + // the diffusion stabilization. + // + // The actual format in which we store these + // variables requires some + // explanation. First, we need values at each + // quadrature point for each of the + // EulerEquations::n_components + // components of the solution vector. This + // makes for a two-dimensional table for + // which we use deal.II's Table class (this + // is more efficient than + // std::vector@ + // @> because it only needs to + // allocate memory once, rather than once for + // each element of the outer + // vector). Similarly, the gradient is a + // three-dimensional table, which the Table + // class also supports. + // + // Secondly, we want to use automatic + // differentiation. To this end, we use the + // Sacado::Fad::DFad template for everything + // that is a computed from the variables with + // respect to which we would like to compute + // derivatives. This includes the current + // solution and gradient at the quadrature + // points (which are linear combinations of + // the degrees of freedom) as well as + // everything that is computed from them such + // as the residual, but not the previous time + // step's solution. These variables are all + // found in the first part of the function, + // along with a variable that we will use to + // store the derivatives of a single + // component of the residual: template void ConservationLaw:: assemble_cell_term (const FEValues &fe_v, - const std::vector &dof_indices) + const std::vector &dof_indices) { const unsigned int dofs_per_cell = fe_v.dofs_per_cell; const unsigned int n_q_points = fe_v.n_quadrature_points; @@ -2123,109 +2123,109 @@ namespace Step33 std::vector residual_derivatives (dofs_per_cell); - // Next, we have to define the independent - // variables that we will try to determine - // by solving a Newton step. These - // independent variables are the values of - // the local degrees of freedom which we - // extract here: + // Next, we have to define the independent + // variables that we will try to determine + // by solving a Newton step. These + // independent variables are the values of + // the local degrees of freedom which we + // extract here: std::vector > independent_local_dof_values(dofs_per_cell); for (unsigned int i=0; iindependent_local_dof_values[i] - // as the $i$th independent variable out of - // a total of dofs_per_cell: + // The next step incorporates all the + // magic: we declare a subset of the + // autodifferentiation variables as + // independent degrees of freedom, whereas + // all the other ones remain dependent + // functions. These are precisely the local + // degrees of freedom just extracted. All + // calculations that reference them (either + // directly or indirectly) will accumulate + // sensitivies with respect to these + // variables. + // + // In order to mark the variables as + // independent, the following does the + // trick, marking + // independent_local_dof_values[i] + // as the $i$th independent variable out of + // a total of dofs_per_cell: for (unsigned int i=0; iW, - // W_old, - // W_theta, and - // grad_W, which we can - // compute from the local DoF values by - // using the formula $W(x_q)=\sum_i \mathbf - // W_i \Phi_i(x_q)$, where $\mathbf W_i$ is - // the $i$th entry of the (local part of - // the) solution vector, and $\Phi_i(x_q)$ - // the value of the $i$th vector-valued - // shape function evaluated at quadrature - // point $x_q$. The gradient can be - // computed in a similar way. - // - // Ideally, we could compute this - // information using a call into something - // like FEValues::get_function_values and - // FEValues::get_function_grads, but since - // (i) we would have to extend the FEValues - // class for this, and (ii) we don't want - // to make the entire - // old_solution vector fad - // types, only the local cell variables, we - // explicitly code the loop above. Before - // this, we add another loop that - // initializes all the fad variables to - // zero: + // After all these declarations, let us + // actually compute something. First, the + // values of W, + // W_old, + // W_theta, and + // grad_W, which we can + // compute from the local DoF values by + // using the formula $W(x_q)=\sum_i \mathbf + // W_i \Phi_i(x_q)$, where $\mathbf W_i$ is + // the $i$th entry of the (local part of + // the) solution vector, and $\Phi_i(x_q)$ + // the value of the $i$th vector-valued + // shape function evaluated at quadrature + // point $x_q$. The gradient can be + // computed in a similar way. + // + // Ideally, we could compute this + // information using a call into something + // like FEValues::get_function_values and + // FEValues::get_function_grads, but since + // (i) we would have to extend the FEValues + // class for this, and (ii) we don't want + // to make the entire + // old_solution vector fad + // types, only the local cell variables, we + // explicitly code the loop above. Before + // this, we add another loop that + // initializes all the fad variables to + // zero: for (unsigned int q=0; q::n_components; ++c) - { - W[q][c] = 0; - W_old[q][c] = 0; - W_theta[q][c] = 0; - for (unsigned int d=0; d FluxMatrix[EulerEquations::n_components][dim]; FluxMatrix *flux = new FluxMatrix[n_q_points]; @@ -2234,111 +2234,111 @@ namespace Step33 for (unsigned int q=0; q::compute_flux_matrix (W_theta[q], flux[q]); - EulerEquations::compute_forcing_vector (W_theta[q], forcing[q]); + EulerEquations::compute_flux_matrix (W_theta[q], flux[q]); + EulerEquations::compute_forcing_vector (W_theta[q], forcing[q]); } - // We now have all of the pieces in place, - // so perform the assembly. We have an - // outer loop through the components of the - // system, and an inner loop over the - // quadrature points, where we accumulate - // contributions to the $i$th residual - // $F_i$. The general formula for this - // residual is given in the introduction - // and at the top of this function. We can, - // however, simplify it a bit taking into - // account that the $i$th (vector-valued) - // test function $\mathbf{z}_i$ has in - // reality only a single nonzero component - // (more on this topic can be found in the - // @ref vector_valued module). It will be - // represented by the variable - // component_i below. With - // this, the residual term can be - // re-written as $F_i = - // \left(\frac{(\mathbf{w}_{n+1} - - // \mathbf{w}_n)_{\text{component\_i}}}{\delta - // t},(\mathbf{z}_i)_{\text{component\_i}}\right)_K$ - // $- \sum_{d=1}^{\text{dim}} - // \left(\mathbf{F} - // (\tilde{\mathbf{w}})_{\text{component\_i},d}, - // \frac{\partial(\mathbf{z}_i)_{\text{component\_i}}} - // {\partial x_d}\right)_K$ $+ - // \sum_{d=1}^{\text{dim}} h^{\eta} - // \left(\frac{\partial - // \mathbf{w}_{\text{component\_i}}}{\partial - // x_d} , \frac{\partial - // (\mathbf{z}_i)_{\text{component\_i}}}{\partial - // x_d} \right)_K$ - // $-(\mathbf{G}(\tilde{\mathbf{w}} - // )_{\text{component\_i}}, - // (\mathbf{z}_i)_{\text{component\_i}})_K$, - // where integrals are understood to be - // evaluated through summation over - // quadrature points. - // - // We initialy sum all contributions of the - // residual in the positive sense, so that - // we don't need to negative the Jacobian - // entries. Then, when we sum into the - // right_hand_side vector, - // we negate this residual. + // We now have all of the pieces in place, + // so perform the assembly. We have an + // outer loop through the components of the + // system, and an inner loop over the + // quadrature points, where we accumulate + // contributions to the $i$th residual + // $F_i$. The general formula for this + // residual is given in the introduction + // and at the top of this function. We can, + // however, simplify it a bit taking into + // account that the $i$th (vector-valued) + // test function $\mathbf{z}_i$ has in + // reality only a single nonzero component + // (more on this topic can be found in the + // @ref vector_valued module). It will be + // represented by the variable + // component_i below. With + // this, the residual term can be + // re-written as $F_i = + // \left(\frac{(\mathbf{w}_{n+1} - + // \mathbf{w}_n)_{\text{component\_i}}}{\delta + // t},(\mathbf{z}_i)_{\text{component\_i}}\right)_K$ + // $- \sum_{d=1}^{\text{dim}} + // \left(\mathbf{F} + // (\tilde{\mathbf{w}})_{\text{component\_i},d}, + // \frac{\partial(\mathbf{z}_i)_{\text{component\_i}}} + // {\partial x_d}\right)_K$ $+ + // \sum_{d=1}^{\text{dim}} h^{\eta} + // \left(\frac{\partial + // \mathbf{w}_{\text{component\_i}}}{\partial + // x_d} , \frac{\partial + // (\mathbf{z}_i)_{\text{component\_i}}}{\partial + // x_d} \right)_K$ + // $-(\mathbf{G}(\tilde{\mathbf{w}} + // )_{\text{component\_i}}, + // (\mathbf{z}_i)_{\text{component\_i}})_K$, + // where integrals are understood to be + // evaluated through summation over + // quadrature points. + // + // We initialy sum all contributions of the + // residual in the positive sense, so that + // we don't need to negative the Jacobian + // entries. Then, when we sum into the + // right_hand_side vector, + // we negate this residual. for (unsigned int i=0; i F_i = 0; - - const unsigned int - component_i = fe_v.get_fe().system_to_component_index(i).first; - - // The residual for each row (i) will be accumulating - // into this fad variable. At the end of the assembly - // for this row, we will query for the sensitivities - // to this variable and add them into the Jacobian. - - for (unsigned int point=0; pointdiameter(), - parameters.diffusion_power) * - grad_W[point][component_i][d] * - fe_v.shape_grad_component(i, point, component_i)[d] * - fe_v.JxW(point); - - F_i -= forcing[point][component_i] * - fe_v.shape_value_component(i, point, component_i) * - fe_v.JxW(point); - } - - // At the end of the loop, we have to - // add the sensitivities to the - // matrix and subtract the residual - // from the right hand side. Trilinos - // FAD data type gives us access to - // the derivatives using - // F_i.fastAccessDx(k), - // so we store the data in a - // temporary array. This information - // about the whole row of local dofs - // is then added to the Trilinos - // matrix at once (which supports the - // data types we have chosen). - for (unsigned int k=0; k F_i = 0; + + const unsigned int + component_i = fe_v.get_fe().system_to_component_index(i).first; + + // The residual for each row (i) will be accumulating + // into this fad variable. At the end of the assembly + // for this row, we will query for the sensitivities + // to this variable and add them into the Jacobian. + + for (unsigned int point=0; pointdiameter(), + parameters.diffusion_power) * + grad_W[point][component_i][d] * + fe_v.shape_grad_component(i, point, component_i)[d] * + fe_v.JxW(point); + + F_i -= forcing[point][component_i] * + fe_v.shape_value_component(i, point, component_i) * + fe_v.JxW(point); + } + + // At the end of the loop, we have to + // add the sensitivities to the + // matrix and subtract the residual + // from the right hand side. Trilinos + // FAD data type gives us access to + // the derivatives using + // F_i.fastAccessDx(k), + // so we store the data in a + // temporary array. This information + // about the whole row of local dofs + // is then added to the Trilinos + // matrix at once (which supports the + // data types we have chosen). + for (unsigned int k=0; k void ConservationLaw::assemble_face_term(const unsigned int face_no, - const FEFaceValuesBase &fe_v, - const FEFaceValuesBase &fe_v_neighbor, - const std::vector &dof_indices, - const std::vector &dof_indices_neighbor, - const bool external_face, - const unsigned int boundary_id, - const double face_diameter) + const FEFaceValuesBase &fe_v, + const FEFaceValuesBase &fe_v_neighbor, + const std::vector &dof_indices, + const std::vector &dof_indices_neighbor, + const bool external_face, + const unsigned int boundary_id, + const double face_diameter) { const unsigned int n_q_points = fe_v.n_quadrature_points; const unsigned int dofs_per_cell = fe_v.dofs_per_cell; @@ -2374,130 +2374,130 @@ namespace Step33 std::vector > independent_local_dof_values (dofs_per_cell), independent_neighbor_dof_values (external_face == false ? - dofs_per_cell : - 0); + dofs_per_cell : + 0); const unsigned int n_independent_variables = (external_face == false ? - 2 * dofs_per_cell : - dofs_per_cell); + 2 * dofs_per_cell : + dofs_per_cell); for (unsigned int i = 0; i < dofs_per_cell; i++) { - independent_local_dof_values[i] = current_solution(dof_indices[i]); - independent_local_dof_values[i].diff(i, n_independent_variables); + independent_local_dof_values[i] = current_solution(dof_indices[i]); + independent_local_dof_values[i].diff(i, n_independent_variables); } if (external_face == false) for (unsigned int i = 0; i < dofs_per_cell; i++) - { - independent_neighbor_dof_values[i] - = current_solution(dof_indices_neighbor[i]); - independent_neighbor_dof_values[i] - .diff(i+dofs_per_cell, n_independent_variables); - } - - - // Next, we need to define the values of - // the conservative variables $\tilde - // {\mathbf W}$ on this side of the face - // ($\tilde {\mathbf W}^+$) and on the - // opposite side ($\tilde {\mathbf - // W}^-$). The former can be computed in - // exactly the same way as in the previous - // function, but note that the - // fe_v variable now is of - // type FEFaceValues or FESubfaceValues: + { + independent_neighbor_dof_values[i] + = current_solution(dof_indices_neighbor[i]); + independent_neighbor_dof_values[i] + .diff(i+dofs_per_cell, n_independent_variables); + } + + + // Next, we need to define the values of + // the conservative variables $\tilde + // {\mathbf W}$ on this side of the face + // ($\tilde {\mathbf W}^+$) and on the + // opposite side ($\tilde {\mathbf + // W}^-$). The former can be computed in + // exactly the same way as in the previous + // function, but note that the + // fe_v variable now is of + // type FEFaceValues or FESubfaceValues: Table<2,Sacado::Fad::DFad > Wplus (n_q_points, EulerEquations::n_components), Wminus (n_q_points, EulerEquations::n_components); for (unsigned int q=0; q::max_n_boundaries, - ExcIndexRange (boundary_id, 0, - Parameters::AllParameters::max_n_boundaries)); - - std::vector > - boundary_values(n_q_points, Vector(EulerEquations::n_components)); - parameters.boundary_conditions[boundary_id] - .values.vector_value_list(fe_v.get_quadrature_points(), - boundary_values); - - for (unsigned int q = 0; q < n_q_points; q++) - EulerEquations::compute_Wminus (parameters.boundary_conditions[boundary_id].kind, - fe_v.normal_vector(q), - Wplus[q], - boundary_values[q], - Wminus[q]); + Assert (boundary_id < Parameters::AllParameters::max_n_boundaries, + ExcIndexRange (boundary_id, 0, + Parameters::AllParameters::max_n_boundaries)); + + std::vector > + boundary_values(n_q_points, Vector(EulerEquations::n_components)); + parameters.boundary_conditions[boundary_id] + .values.vector_value_list(fe_v.get_quadrature_points(), + boundary_values); + + for (unsigned int q = 0; q < n_q_points; q++) + EulerEquations::compute_Wminus (parameters.boundary_conditions[boundary_id].kind, + fe_v.normal_vector(q), + Wplus[q], + boundary_values[q], + Wminus[q]); } - // Now that we have $\mathbf w^+$ and - // $\mathbf w^-$, we can go about computing - // the numerical flux function $\mathbf - // H(\mathbf w^+,\mathbf w^-, \mathbf n)$ - // for each quadrature point. Before - // calling the function that does so, we - // also need to determine the - // Lax-Friedrich's stability parameter: + // Now that we have $\mathbf w^+$ and + // $\mathbf w^-$, we can go about computing + // the numerical flux function $\mathbf + // H(\mathbf w^+,\mathbf w^-, \mathbf n)$ + // for each quadrature point. Before + // calling the function that does so, we + // also need to determine the + // Lax-Friedrich's stability parameter: typedef Sacado::Fad::DFad NormalFlux[EulerEquations::n_components]; NormalFlux *normal_fluxes = new NormalFlux[n_q_points]; @@ -2505,76 +2505,76 @@ namespace Step33 switch(parameters.stabilization_kind) { - case Parameters::Flux::constant: - alpha = parameters.stabilization_value; - break; - case Parameters::Flux::mesh_dependent: - alpha = face_diameter/(2.0*parameters.time_step); - break; - default: - Assert (false, ExcNotImplemented()); - alpha = 1; + case Parameters::Flux::constant: + alpha = parameters.stabilization_value; + break; + case Parameters::Flux::mesh_dependent: + alpha = face_diameter/(2.0*parameters.time_step); + break; + default: + Assert (false, ExcNotImplemented()); + alpha = 1; } for (unsigned int q=0; q::numerical_normal_flux(fe_v.normal_vector(q), - Wplus[q], Wminus[q], alpha, - normal_fluxes[q]); - - // Now assemble the face term in exactly - // the same way as for the cell - // contributions in the previous - // function. The only difference is that if - // this is an internal face, we also have - // to take into account the sensitivies of - // the residual contributions to the - // degrees of freedom on the neighboring - // cell: + Wplus[q], Wminus[q], alpha, + normal_fluxes[q]); + + // Now assemble the face term in exactly + // the same way as for the cell + // contributions in the previous + // function. The only difference is that if + // this is an internal face, we also have + // to take into account the sensitivies of + // the residual contributions to the + // degrees of freedom on the neighboring + // cell: std::vector residual_derivatives (dofs_per_cell); for (unsigned int i=0; i F_i = 0; - - for (unsigned int point=0; point F_i = 0; + + for (unsigned int point=0; point std::pair @@ -2582,126 +2582,126 @@ namespace Step33 { switch (parameters.solver) { - // If the parameter file specified - // that a direct solver shall be - // used, then we'll get here. The - // process is straightforward, since - // deal.II provides a wrapper class - // to the Amesos direct solver within - // Trilinos. All we have to do is to - // create a solver control object - // (which is just a dummy object - // here, since we won't perform any - // iterations), and then create the - // direct solver object. When - // actually doing the solve, note - // that we don't pass a - // preconditioner. That wouldn't make - // much sense for a direct solver - // anyway. At the end we return the - // solver control statistics — - // which will tell that no iterations - // have been performed and that the - // final linear residual is zero, - // absent any better information that - // may be provided here: - case Parameters::Solver::direct: - { - SolverControl solver_control (1,0); - TrilinosWrappers::SolverDirect direct (solver_control, - parameters.output == - Parameters::Solver::verbose); - - direct.solve (system_matrix, newton_update, right_hand_side); - - return std::pair (solver_control.last_step(), - solver_control.last_value()); - } - - // Likewise, if we are to use an - // iterative solver, we use Aztec's - // GMRES solver. We could use the - // Trilinos wrapper classes for - // iterative solvers and - // preconditioners here as well, but - // we choose to use an Aztec solver - // directly. For the given problem, - // Aztec's internal preconditioner - // implementations are superior over - // the ones deal.II has wrapper - // classes to, so we use ILU-T - // preconditioning within the AztecOO - // solver and set a bunch of options - // that can be changed from the - // parameter file. - // - // There are two more practicalities: - // Since we have built our right hand - // side and solution vector as - // deal.II Vector objects (as opposed - // to the matrix, which is a Trilinos - // object), we must hand the solvers - // Trilinos Epetra vectors. Luckily, - // they support the concept of a - // 'view', so we just send in a - // pointer to our deal.II vectors. We - // have to provide an Epetra_Map for - // the vector that sets the parallel - // distribution, which is just a - // dummy object in serial. The - // easiest way is to ask the matrix - // for its map, and we're going to be - // ready for matrix-vector products - // with it. - // - // Secondly, the Aztec solver wants - // us to pass a Trilinos - // Epetra_CrsMatrix in, not the - // deal.II wrapper class itself. So - // we access to the actual Trilinos - // matrix in the Trilinos wrapper - // class by the command - // trilinos_matrix(). Trilinos wants - // the matrix to be non-constant, so - // we have to manually remove the - // constantness using a const_cast. - case Parameters::Solver::gmres: - { - Epetra_Vector x(View, system_matrix.domain_partitioner(), - newton_update.begin()); - Epetra_Vector b(View, system_matrix.range_partitioner(), - right_hand_side.begin()); - - AztecOO solver; - solver.SetAztecOption(AZ_output, - (parameters.output == - Parameters::Solver::quiet - ? - AZ_none - : - AZ_all)); - solver.SetAztecOption(AZ_solver, AZ_gmres); - solver.SetRHS(&b); - solver.SetLHS(&x); - - solver.SetAztecOption(AZ_precond, AZ_dom_decomp); - solver.SetAztecOption(AZ_subdomain_solve, AZ_ilut); - solver.SetAztecOption(AZ_overlap, 0); - solver.SetAztecOption(AZ_reorder, 0); - - solver.SetAztecParam(AZ_drop, parameters.ilut_drop); - solver.SetAztecParam(AZ_ilut_fill, parameters.ilut_fill); - solver.SetAztecParam(AZ_athresh, parameters.ilut_atol); - solver.SetAztecParam(AZ_rthresh, parameters.ilut_rtol); - - solver.SetUserMatrix(const_cast - (&system_matrix.trilinos_matrix())); - - solver.Iterate(parameters.max_iterations, parameters.linear_residual); - - return std::pair (solver.NumIters(), - solver.TrueResidual()); - } + // If the parameter file specified + // that a direct solver shall be + // used, then we'll get here. The + // process is straightforward, since + // deal.II provides a wrapper class + // to the Amesos direct solver within + // Trilinos. All we have to do is to + // create a solver control object + // (which is just a dummy object + // here, since we won't perform any + // iterations), and then create the + // direct solver object. When + // actually doing the solve, note + // that we don't pass a + // preconditioner. That wouldn't make + // much sense for a direct solver + // anyway. At the end we return the + // solver control statistics — + // which will tell that no iterations + // have been performed and that the + // final linear residual is zero, + // absent any better information that + // may be provided here: + case Parameters::Solver::direct: + { + SolverControl solver_control (1,0); + TrilinosWrappers::SolverDirect direct (solver_control, + parameters.output == + Parameters::Solver::verbose); + + direct.solve (system_matrix, newton_update, right_hand_side); + + return std::pair (solver_control.last_step(), + solver_control.last_value()); + } + + // Likewise, if we are to use an + // iterative solver, we use Aztec's + // GMRES solver. We could use the + // Trilinos wrapper classes for + // iterative solvers and + // preconditioners here as well, but + // we choose to use an Aztec solver + // directly. For the given problem, + // Aztec's internal preconditioner + // implementations are superior over + // the ones deal.II has wrapper + // classes to, so we use ILU-T + // preconditioning within the AztecOO + // solver and set a bunch of options + // that can be changed from the + // parameter file. + // + // There are two more practicalities: + // Since we have built our right hand + // side and solution vector as + // deal.II Vector objects (as opposed + // to the matrix, which is a Trilinos + // object), we must hand the solvers + // Trilinos Epetra vectors. Luckily, + // they support the concept of a + // 'view', so we just send in a + // pointer to our deal.II vectors. We + // have to provide an Epetra_Map for + // the vector that sets the parallel + // distribution, which is just a + // dummy object in serial. The + // easiest way is to ask the matrix + // for its map, and we're going to be + // ready for matrix-vector products + // with it. + // + // Secondly, the Aztec solver wants + // us to pass a Trilinos + // Epetra_CrsMatrix in, not the + // deal.II wrapper class itself. So + // we access to the actual Trilinos + // matrix in the Trilinos wrapper + // class by the command + // trilinos_matrix(). Trilinos wants + // the matrix to be non-constant, so + // we have to manually remove the + // constantness using a const_cast. + case Parameters::Solver::gmres: + { + Epetra_Vector x(View, system_matrix.domain_partitioner(), + newton_update.begin()); + Epetra_Vector b(View, system_matrix.range_partitioner(), + right_hand_side.begin()); + + AztecOO solver; + solver.SetAztecOption(AZ_output, + (parameters.output == + Parameters::Solver::quiet + ? + AZ_none + : + AZ_all)); + solver.SetAztecOption(AZ_solver, AZ_gmres); + solver.SetRHS(&b); + solver.SetLHS(&x); + + solver.SetAztecOption(AZ_precond, AZ_dom_decomp); + solver.SetAztecOption(AZ_subdomain_solve, AZ_ilut); + solver.SetAztecOption(AZ_overlap, 0); + solver.SetAztecOption(AZ_reorder, 0); + + solver.SetAztecParam(AZ_drop, parameters.ilut_drop); + solver.SetAztecParam(AZ_ilut_fill, parameters.ilut_fill); + solver.SetAztecParam(AZ_athresh, parameters.ilut_atol); + solver.SetAztecParam(AZ_rthresh, parameters.ilut_rtol); + + solver.SetUserMatrix(const_cast + (&system_matrix.trilinos_matrix())); + + solver.Iterate(parameters.max_iterations, parameters.linear_residual); + + return std::pair (solver.NumIters(), + solver.TrueResidual()); + } } Assert (false, ExcNotImplemented()); @@ -2709,35 +2709,35 @@ namespace Step33 } - // @sect4{ConservationLaw::compute_refinement_indicators} + // @sect4{ConservationLaw::compute_refinement_indicators} - // This function is real simple: We don't - // pretend that we know here what a good - // refinement indicator would be. Rather, we - // assume that the EulerEquation - // class would know about this, and so we - // simply defer to the respective function - // we've implemented there: + // This function is real simple: We don't + // pretend that we know here what a good + // refinement indicator would be. Rather, we + // assume that the EulerEquation + // class would know about this, and so we + // simply defer to the respective function + // we've implemented there: template void ConservationLaw:: compute_refinement_indicators (Vector &refinement_indicators) const { EulerEquations::compute_refinement_indicators (dof_handler, - mapping, - predictor, - refinement_indicators); + mapping, + predictor, + refinement_indicators); } - // @sect4{ConservationLaw::refine_grid} + // @sect4{ConservationLaw::refine_grid} - // Here, we use the refinement indicators - // computed before and refine the mesh. At - // the beginning, we loop over all cells and - // mark those that we think should be - // refined: + // Here, we use the refinement indicators + // computed before and refine the mesh. At + // the beginning, we loop over all cells and + // mark those that we think should be + // refined: template void ConservationLaw::refine_grid (const Vector &refinement_indicators) @@ -2748,31 +2748,31 @@ namespace Step33 for (unsigned int cell_no=0; cell!=endc; ++cell, ++cell_no) { - cell->clear_coarsen_flag(); - cell->clear_refine_flag(); - - if ((cell->level() < parameters.shock_levels) && - (std::fabs(refinement_indicators(cell_no)) > parameters.shock_val)) - cell->set_refine_flag(); - else - if ((cell->level() > 0) && - (std::fabs(refinement_indicators(cell_no)) < 0.75*parameters.shock_val)) - cell->set_coarsen_flag(); + cell->clear_coarsen_flag(); + cell->clear_refine_flag(); + + if ((cell->level() < parameters.shock_levels) && + (std::fabs(refinement_indicators(cell_no)) > parameters.shock_val)) + cell->set_refine_flag(); + else + if ((cell->level() > 0) && + (std::fabs(refinement_indicators(cell_no)) < 0.75*parameters.shock_val)) + cell->set_coarsen_flag(); } - // Then we need to transfer the - // various solution vectors from - // the old to the new grid while we - // do the refinement. The - // SolutionTransfer class is our - // friend here; it has a fairly - // extensive documentation, - // including examples, so we won't - // comment much on the following - // code. The last three lines - // simply re-set the sizes of some - // other vectors to the now correct - // size: + // Then we need to transfer the + // various solution vectors from + // the old to the new grid while we + // do the refinement. The + // SolutionTransfer class is our + // friend here; it has a fairly + // extensive documentation, + // including examples, so we won't + // comment much on the following + // code. The last three lines + // simply re-set the sizes of some + // other vectors to the now correct + // size: std::vector > transfer_in; std::vector > transfer_out; @@ -2813,23 +2813,23 @@ namespace Step33 } - // @sect4{ConservationLaw::output_results} - - // This function now is rather - // straightforward. All the magic, including - // transforming data from conservative - // variables to physical ones has been - // abstracted and moved into the - // EulerEquations class so that it can be - // replaced in case we want to solve some - // other hyperbolic conservation law. - // - // Note that the number of the output file is - // determined by keeping a counter in the - // form of a static variable that is set to - // zero the first time we come to this - // function and is incremented by one at the - // end of each invokation. + // @sect4{ConservationLaw::output_results} + + // This function now is rather + // straightforward. All the magic, including + // transforming data from conservative + // variables to physical ones has been + // abstracted and moved into the + // EulerEquations class so that it can be + // replaced in case we want to solve some + // other hyperbolic conservation law. + // + // Note that the number of the output file is + // determined by keeping a counter in the + // form of a static variable that is set to + // zero the first time we come to this + // function and is incremented by one at the + // end of each invokation. template void ConservationLaw::output_results () const { @@ -2840,9 +2840,9 @@ namespace Step33 data_out.attach_dof_handler (dof_handler); data_out.add_data_vector (current_solution, - EulerEquations::component_names (), - DataOut::type_dof_data, - EulerEquations::component_interpretation ()); + EulerEquations::component_names (), + DataOut::type_dof_data, + EulerEquations::component_interpretation ()); data_out.add_data_vector (current_solution, postprocessor); @@ -2850,8 +2850,8 @@ namespace Step33 static unsigned int output_file_number = 0; std::string filename = "solution-" + - Utilities::int_to_string (output_file_number, 3) + - ".vtk"; + Utilities::int_to_string (output_file_number, 3) + + ".vtk"; std::ofstream output (filename.c_str()); data_out.write_vtk (output); @@ -2861,22 +2861,22 @@ namespace Step33 - // @sect4{ConservationLaw::run} + // @sect4{ConservationLaw::run} - // This function contains the top-level logic - // of this program: initialization, the time - // loop, and the inner Newton iteration. - // - // At the beginning, we read the mesh file - // specified by the parameter file, setup the - // DoFHandler and various vectors, and then - // interpolate the given initial conditions - // on this mesh. We then perform a number of - // mesh refinements, based on the initial - // conditions, to obtain a mesh that is - // already well adapted to the starting - // solution. At the end of this process, we - // output the initial solution. + // This function contains the top-level logic + // of this program: initialization, the time + // loop, and the inner Newton iteration. + // + // At the beginning, we read the mesh file + // specified by the parameter file, setup the + // DoFHandler and various vectors, and then + // interpolate the given initial conditions + // on this mesh. We then perform a number of + // mesh refinements, based on the initial + // conditions, to obtain a mesh that is + // already well adapted to the starting + // solution. At the end of this process, we + // output the initial solution. template void ConservationLaw::run () { @@ -2893,7 +2893,7 @@ namespace Step33 dof_handler.clear(); dof_handler.distribute_dofs (fe); - // Size all of the fields. + // Size all of the fields. old_solution.reinit (dof_handler.n_dofs()); current_solution.reinit (dof_handler.n_dofs()); predictor.reinit (dof_handler.n_dofs()); @@ -2902,35 +2902,35 @@ namespace Step33 setup_system(); VectorTools::interpolate(dof_handler, - parameters.initial_conditions, old_solution); + parameters.initial_conditions, old_solution); current_solution = old_solution; predictor = old_solution; if (parameters.do_refine == true) for (unsigned int i=0; i refinement_indicators (triangulation.n_active_cells()); + { + Vector refinement_indicators (triangulation.n_active_cells()); - compute_refinement_indicators(refinement_indicators); - refine_grid(refinement_indicators); + compute_refinement_indicators(refinement_indicators); + refine_grid(refinement_indicators); - setup_system(); + setup_system(); - VectorTools::interpolate(dof_handler, - parameters.initial_conditions, old_solution); - current_solution = old_solution; - predictor = old_solution; - } + VectorTools::interpolate(dof_handler, + parameters.initial_conditions, old_solution); + current_solution = old_solution; + predictor = old_solution; + } output_results (); - // We then enter into the main time - // stepping loop. At the top we simply - // output some status information so one - // can keep track of where a computation - // is, as well as the header for a table - // that indicates progress of the nonlinear - // inner iteration: + // We then enter into the main time + // stepping loop. At the top we simply + // output some status information so one + // can keep track of where a computation + // is, as well as the header for a table + // that indicates progress of the nonlinear + // inner iteration: Vector newton_update (dof_handler.n_dofs()); double time = 0; @@ -2939,164 +2939,164 @@ namespace Step33 predictor = old_solution; while (time < parameters.final_time) { - std::cout << "T=" << time << std::endl - << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl - << std::endl; - - std::cout << " NonLin Res Lin Iter Lin Res" << std::endl - << " _____________________________________" << std::endl; - - // Then comes the inner Newton - // iteration to solve the nonlinear - // problem in each time step. The way - // it works is to reset matrix and - // right hand side to zero, then - // assemble the linear system. If the - // norm of the right hand side is small - // enough, then we declare that the - // Newton iteration has - // converged. Otherwise, we solve the - // linear system, update the current - // solution with the Newton increment, - // and output convergence - // information. At the end, we check - // that the number of Newton iterations - // is not beyond a limit of 10 -- if it - // is, it appears likely that - // iterations are diverging and further - // iterations would do no good. If that - // happens, we throw an exception that - // will be caught in - // main() with status - // information being displayed before - // the program aborts. - // - // Note that the way we write the - // AssertThrow macro below is by and - // large equivalent to writing - // something like if - // (!(nonlin_iter @<= 10)) throw - // ExcMessage ("No convergence in - // nonlinear solver");. The only - // significant difference is that - // AssertThrow also makes sure that the - // exception being thrown carries with - // it information about the location - // (file name and line number) where it - // was generated. This is not overly - // critical here, because there is only - // a single place where this sort of - // exception can happen; however, it is - // generally a very useful tool when - // one wants to find out where an error - // occurred. - unsigned int nonlin_iter = 0; - current_solution = predictor; - while (true) - { - system_matrix = 0; - - right_hand_side = 0; - assemble_system (); - - const double res_norm = right_hand_side.l2_norm(); - if (std::fabs(res_norm) < 1e-10) - { - std::printf(" %-16.3e (converged)\n\n", res_norm); - break; - } - else - { - newton_update = 0; - - std::pair convergence - = solve (newton_update); - - current_solution += newton_update; - - std::printf(" %-16.3e %04d %-5.2e\n", - res_norm, convergence.first, convergence.second); - } - - ++nonlin_iter; - AssertThrow (nonlin_iter <= 10, - ExcMessage ("No convergence in nonlinear solver")); - } - - // We only get to this point if the - // Newton iteration has converged, so - // do various post convergence tasks - // here: - // - // First, we update the time - // and produce graphical output - // if so desired. Then we - // update a predictor for the - // solution at the next time - // step by approximating - // $\mathbf w^{n+1}\approx - // \mathbf w^n + \delta t - // \frac{\partial \mathbf - // w}{\partial t} \approx - // \mathbf w^n + \delta t \; - // \frac{\mathbf w^n-\mathbf - // w^{n-1}}{\delta t} = 2 - // \mathbf w^n - \mathbf - // w^{n-1}$ to try and make - // adaptivity work better. The - // idea is to try and refine - // ahead of a front, rather - // than stepping into a coarse - // set of elements and smearing - // the old_solution. This - // simple time extrapolator - // does the job. With this, we - // then refine the mesh if so - // desired by the user, and - // finally continue on with the - // next time step: - time += parameters.time_step; - - if (parameters.output_step < 0) - output_results (); - else if (time >= next_output) - { - output_results (); - next_output += parameters.output_step; - } - - predictor = current_solution; - predictor.sadd (2.0, -1.0, old_solution); - - old_solution = current_solution; - - if (parameters.do_refine == true) - { - Vector refinement_indicators (triangulation.n_active_cells()); - compute_refinement_indicators(refinement_indicators); - - refine_grid(refinement_indicators); - setup_system(); - - newton_update.reinit (dof_handler.n_dofs()); - } + std::cout << "T=" << time << std::endl + << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl + << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << std::endl + << std::endl; + + std::cout << " NonLin Res Lin Iter Lin Res" << std::endl + << " _____________________________________" << std::endl; + + // Then comes the inner Newton + // iteration to solve the nonlinear + // problem in each time step. The way + // it works is to reset matrix and + // right hand side to zero, then + // assemble the linear system. If the + // norm of the right hand side is small + // enough, then we declare that the + // Newton iteration has + // converged. Otherwise, we solve the + // linear system, update the current + // solution with the Newton increment, + // and output convergence + // information. At the end, we check + // that the number of Newton iterations + // is not beyond a limit of 10 -- if it + // is, it appears likely that + // iterations are diverging and further + // iterations would do no good. If that + // happens, we throw an exception that + // will be caught in + // main() with status + // information being displayed before + // the program aborts. + // + // Note that the way we write the + // AssertThrow macro below is by and + // large equivalent to writing + // something like if + // (!(nonlin_iter @<= 10)) throw + // ExcMessage ("No convergence in + // nonlinear solver");. The only + // significant difference is that + // AssertThrow also makes sure that the + // exception being thrown carries with + // it information about the location + // (file name and line number) where it + // was generated. This is not overly + // critical here, because there is only + // a single place where this sort of + // exception can happen; however, it is + // generally a very useful tool when + // one wants to find out where an error + // occurred. + unsigned int nonlin_iter = 0; + current_solution = predictor; + while (true) + { + system_matrix = 0; + + right_hand_side = 0; + assemble_system (); + + const double res_norm = right_hand_side.l2_norm(); + if (std::fabs(res_norm) < 1e-10) + { + std::printf(" %-16.3e (converged)\n\n", res_norm); + break; + } + else + { + newton_update = 0; + + std::pair convergence + = solve (newton_update); + + current_solution += newton_update; + + std::printf(" %-16.3e %04d %-5.2e\n", + res_norm, convergence.first, convergence.second); + } + + ++nonlin_iter; + AssertThrow (nonlin_iter <= 10, + ExcMessage ("No convergence in nonlinear solver")); + } + + // We only get to this point if the + // Newton iteration has converged, so + // do various post convergence tasks + // here: + // + // First, we update the time + // and produce graphical output + // if so desired. Then we + // update a predictor for the + // solution at the next time + // step by approximating + // $\mathbf w^{n+1}\approx + // \mathbf w^n + \delta t + // \frac{\partial \mathbf + // w}{\partial t} \approx + // \mathbf w^n + \delta t \; + // \frac{\mathbf w^n-\mathbf + // w^{n-1}}{\delta t} = 2 + // \mathbf w^n - \mathbf + // w^{n-1}$ to try and make + // adaptivity work better. The + // idea is to try and refine + // ahead of a front, rather + // than stepping into a coarse + // set of elements and smearing + // the old_solution. This + // simple time extrapolator + // does the job. With this, we + // then refine the mesh if so + // desired by the user, and + // finally continue on with the + // next time step: + time += parameters.time_step; + + if (parameters.output_step < 0) + output_results (); + else if (time >= next_output) + { + output_results (); + next_output += parameters.output_step; + } + + predictor = current_solution; + predictor.sadd (2.0, -1.0, old_solution); + + old_solution = current_solution; + + if (parameters.do_refine == true) + { + Vector refinement_indicators (triangulation.n_active_cells()); + compute_refinement_indicators(refinement_indicators); + + refine_grid(refinement_indicators); + setup_system(); + + newton_update.reinit (dof_handler.n_dofs()); + } } } } // @sect3{main()} - // The following ``main'' function is - // similar to previous examples and - // need not to be commented on. Note - // that the program aborts if no input - // file name is given on the command - // line. + // The following ``main'' function is + // similar to previous examples and + // need not to be commented on. Note + // that the program aborts if no input + // file name is given on the command + // line. int main (int argc, char *argv[]) { try @@ -3106,10 +3106,10 @@ int main (int argc, char *argv[]) deallog.depth_console(0); if (argc != 2) - { - std::cout << "Usage:" << argv[0] << " input_file" << std::endl; - std::exit(1); - } + { + std::cout << "Usage:" << argv[0] << " input_file" << std::endl; + std::exit(1); + } Utilities::System::MPI_InitFinalize mpi_initialization (argc, argv); @@ -3119,24 +3119,24 @@ int main (int argc, char *argv[]) catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; }; diff --git a/deal.II/examples/step-34/step-34.cc b/deal.II/examples/step-34/step-34.cc index ebb7399541..d101457575 100644 --- a/deal.II/examples/step-34/step-34.cc +++ b/deal.II/examples/step-34/step-34.cc @@ -9,13 +9,13 @@ /* to the file deal.II/doc/license.html for the text and */ /* further information on this license. */ - // @sect3{Include files} + // @sect3{Include files} - // The program starts with including a bunch - // of include files that we will use in the - // various parts of the program. Most of them - // have been discussed in previous tutorials - // already: + // The program starts with including a bunch + // of include files that we will use in the + // various parts of the program. Most of them + // have been discussed in previous tutorials + // already: #include #include #include @@ -48,53 +48,53 @@ #include #include - // And here are a few C++ standard header - // files that we will need: + // And here are a few C++ standard header + // files that we will need: #include #include #include #include - // The last part of this preamble is to - // import everything in the dealii namespace - // into the one into which everything in this - // program will go: + // The last part of this preamble is to + // import everything in the dealii namespace + // into the one into which everything in this + // program will go: namespace Step34 { using namespace dealii; - // @sect3{Single and double layer operator kernels} + // @sect3{Single and double layer operator kernels} - // First, let us define a bit of the - // boundary integral equation - // machinery. + // First, let us define a bit of the + // boundary integral equation + // machinery. - // The following two functions are - // the actual calculations of the - // single and double layer potential - // kernels, that is $G$ and $\nabla - // G$. They are well defined only if - // the vector $R = - // \mathbf{y}-\mathbf{x}$ is - // different from zero. + // The following two functions are + // the actual calculations of the + // single and double layer potential + // kernels, that is $G$ and $\nabla + // G$. They are well defined only if + // the vector $R = + // \mathbf{y}-\mathbf{x}$ is + // different from zero. namespace LaplaceKernel { template double single_layer(const Point &R) { switch(dim) - { - case 2: - return (-std::log(R.norm()) / (2*numbers::PI) ); + { + case 2: + return (-std::log(R.norm()) / (2*numbers::PI) ); - case 3: - return (1./( R.norm()*4*numbers::PI ) ); + case 3: + return (1./( R.norm()*4*numbers::PI ) ); - default: - Assert(false, ExcInternalError()); - return 0.; - } + default: + Assert(false, ExcInternalError()); + return 0.; + } } @@ -103,44 +103,44 @@ namespace Step34 Point double_layer(const Point &R) { switch(dim) - { - case 2: - return R / ( -2*numbers::PI * R.square()); - case 3: - return R / ( -4*numbers::PI * R.square() * R.norm() ); - - default: - Assert(false, ExcInternalError()); - return Point(); - } + { + case 2: + return R / ( -2*numbers::PI * R.square()); + case 3: + return R / ( -4*numbers::PI * R.square() * R.norm() ); + + default: + Assert(false, ExcInternalError()); + return Point(); + } } } - // @sect3{The BEMProblem class} - - // The structure of a boundary - // element method code is very - // similar to the structure of a - // finite element code, and so the - // member functions of this class are - // like those of most of the other - // tutorial programs. In particular, - // by now you should be familiar with - // reading parameters from an - // external file, and with the - // splitting of the different tasks - // into different modules. The same - // applies to boundary element - // methods, and we won't comment too - // much on them, except on the - // differences. + // @sect3{The BEMProblem class} + + // The structure of a boundary + // element method code is very + // similar to the structure of a + // finite element code, and so the + // member functions of this class are + // like those of most of the other + // tutorial programs. In particular, + // by now you should be familiar with + // reading parameters from an + // external file, and with the + // splitting of the different tasks + // into different modules. The same + // applies to boundary element + // methods, and we won't comment too + // much on them, except on the + // differences. template class BEMProblem { public: BEMProblem(const unsigned int fe_degree = 1, - const unsigned int mapping_degree = 1); + const unsigned int mapping_degree = 1); void run(); @@ -152,273 +152,273 @@ namespace Step34 void refine_and_resize(); - // The only really different - // function that we find here is - // the assembly routine. We wrote - // this function in the most - // possible general way, in order - // to allow for easy - // generalization to higher order - // methods and to different - // fundamental solutions (e.g., - // Stokes or Maxwell). - // - // The most noticeable difference - // is the fact that the final - // matrix is full, and that we - // have a nested loop inside the - // usual loop on cells that - // visits all support points of - // the degrees of freedom. - // Moreover, when the support - // point lies inside the cell - // which we are visiting, then - // the integral we perform - // becomes singular. - // - // The practical consequence is - // that we have two sets of - // quadrature formulas, finite - // element values and temporary - // storage, one for standard - // integration and one for the - // singular integration, which - // are used where necessary. + // The only really different + // function that we find here is + // the assembly routine. We wrote + // this function in the most + // possible general way, in order + // to allow for easy + // generalization to higher order + // methods and to different + // fundamental solutions (e.g., + // Stokes or Maxwell). + // + // The most noticeable difference + // is the fact that the final + // matrix is full, and that we + // have a nested loop inside the + // usual loop on cells that + // visits all support points of + // the degrees of freedom. + // Moreover, when the support + // point lies inside the cell + // which we are visiting, then + // the integral we perform + // becomes singular. + // + // The practical consequence is + // that we have two sets of + // quadrature formulas, finite + // element values and temporary + // storage, one for standard + // integration and one for the + // singular integration, which + // are used where necessary. void assemble_system(); - // There are two options for the - // solution of this problem. The - // first is to use a direct - // solver, and the second is to - // use an iterative solver. We - // opt for the second option. - // - // The matrix that we assemble is - // not symmetric, and we opt to - // use the GMRES method; however - // the construction of an - // efficient preconditioner for - // boundary element methods is - // not a trivial issue. Here we - // use a non preconditioned GMRES - // solver. The options for the - // iterative solver, such as the - // tolerance, the maximum number - // of iterations, are selected - // through the parameter file. + // There are two options for the + // solution of this problem. The + // first is to use a direct + // solver, and the second is to + // use an iterative solver. We + // opt for the second option. + // + // The matrix that we assemble is + // not symmetric, and we opt to + // use the GMRES method; however + // the construction of an + // efficient preconditioner for + // boundary element methods is + // not a trivial issue. Here we + // use a non preconditioned GMRES + // solver. The options for the + // iterative solver, such as the + // tolerance, the maximum number + // of iterations, are selected + // through the parameter file. void solve_system(); - // Once we obtained the solution, - // we compute the $L^2$ error of - // the computed potential as well - // as the $L^\infty$ error of the - // approximation of the solid - // angle. The mesh we are using - // is an approximation of a - // smooth curve, therefore the - // computed diagonal matrix of - // fraction of angles or solid - // angles $\alpha(\mathbf{x})$ - // should be constantly equal to - // $\frac 12$. In this routine we - // output the error on the - // potential and the error in the - // approximation of the computed - // angle. Notice that the latter - // error is actually not the - // error in the computation of - // the angle, but a measure of - // how well we are approximating - // the sphere and the circle. - // - // Experimenting a little with - // the computation of the angles - // gives very accurate results - // for simpler geometries. To - // verify this you can comment - // out, in the read_domain() - // method, the - // tria.set_boundary(1, boundary) - // line, and check the alpha that - // is generated by the - // program. By removing this - // call, whenever the mesh is - // refined new nodes will be - // placed along the straight - // lines that made up the coarse - // mesh, rather than be pulled - // onto the surface that we - // really want to approximate. In - // the three dimensional case, - // the coarse grid of the sphere - // is obtained starting from a - // cube, and the obtained values - // of alphas are exactly $\frac - // 12$ on the nodes of the faces, - // $\frac 34$ on the nodes of the - // edges and $\frac 78$ on the 8 - // nodes of the vertices. + // Once we obtained the solution, + // we compute the $L^2$ error of + // the computed potential as well + // as the $L^\infty$ error of the + // approximation of the solid + // angle. The mesh we are using + // is an approximation of a + // smooth curve, therefore the + // computed diagonal matrix of + // fraction of angles or solid + // angles $\alpha(\mathbf{x})$ + // should be constantly equal to + // $\frac 12$. In this routine we + // output the error on the + // potential and the error in the + // approximation of the computed + // angle. Notice that the latter + // error is actually not the + // error in the computation of + // the angle, but a measure of + // how well we are approximating + // the sphere and the circle. + // + // Experimenting a little with + // the computation of the angles + // gives very accurate results + // for simpler geometries. To + // verify this you can comment + // out, in the read_domain() + // method, the + // tria.set_boundary(1, boundary) + // line, and check the alpha that + // is generated by the + // program. By removing this + // call, whenever the mesh is + // refined new nodes will be + // placed along the straight + // lines that made up the coarse + // mesh, rather than be pulled + // onto the surface that we + // really want to approximate. In + // the three dimensional case, + // the coarse grid of the sphere + // is obtained starting from a + // cube, and the obtained values + // of alphas are exactly $\frac + // 12$ on the nodes of the faces, + // $\frac 34$ on the nodes of the + // edges and $\frac 78$ on the 8 + // nodes of the vertices. void compute_errors(const unsigned int cycle); - // Once we obtained a solution on - // the codimension one domain, we - // want to interpolate it to the - // rest of the space. This is - // done by performing again the - // convolution of the solution - // with the kernel in the - // compute_exterior_solution() - // function. - // - // We would like to plot the - // velocity variable which is the - // gradient of the potential - // solution. The potential - // solution is only known on the - // boundary, but we use the - // convolution with the - // fundamental solution to - // interpolate it on a standard - // dim dimensional continuous - // finite element space. The plot - // of the gradient of the - // extrapolated solution will - // give us the velocity we want. - // - // In addition to the solution on - // the exterior domain, we also - // output the solution on the - // domain's boundary in the - // output_results() function, of - // course. + // Once we obtained a solution on + // the codimension one domain, we + // want to interpolate it to the + // rest of the space. This is + // done by performing again the + // convolution of the solution + // with the kernel in the + // compute_exterior_solution() + // function. + // + // We would like to plot the + // velocity variable which is the + // gradient of the potential + // solution. The potential + // solution is only known on the + // boundary, but we use the + // convolution with the + // fundamental solution to + // interpolate it on a standard + // dim dimensional continuous + // finite element space. The plot + // of the gradient of the + // extrapolated solution will + // give us the velocity we want. + // + // In addition to the solution on + // the exterior domain, we also + // output the solution on the + // domain's boundary in the + // output_results() function, of + // course. void compute_exterior_solution(); void output_results(const unsigned int cycle); - // To allow for dimension - // independent programming, we - // specialize this single - // function to extract the - // singular quadrature formula - // needed to integrate the - // singular kernels in the - // interior of the cells. + // To allow for dimension + // independent programming, we + // specialize this single + // function to extract the + // singular quadrature formula + // needed to integrate the + // singular kernels in the + // interior of the cells. const Quadrature & get_singular_quadrature( - const typename DoFHandler::active_cell_iterator &cell, - const unsigned int index) const; - - - // The usual deal.II classes can - // be used for boundary element - // methods by specifying the - // "codimension" of the - // problem. This is done by - // setting the optional second - // template arguments to - // Triangulation, FiniteElement - // and DoFHandler to the - // dimension of the embedding - // space. In our case we generate - // either 1 or 2 dimensional - // meshes embedded in 2 or 3 - // dimensional spaces. - // - // The optional argument by - // default is equal to the first - // argument, and produces the - // usual finite element classes - // that we saw in all previous - // examples. - // - // The class is constructed in a - // way to allow for arbitrary - // order of approximation of both - // the domain (through high order - // mapping) and the finite - // element space. The order of - // the finite element space and - // of the mapping can be selected - // in the constructor of the class. + const typename DoFHandler::active_cell_iterator &cell, + const unsigned int index) const; + + + // The usual deal.II classes can + // be used for boundary element + // methods by specifying the + // "codimension" of the + // problem. This is done by + // setting the optional second + // template arguments to + // Triangulation, FiniteElement + // and DoFHandler to the + // dimension of the embedding + // space. In our case we generate + // either 1 or 2 dimensional + // meshes embedded in 2 or 3 + // dimensional spaces. + // + // The optional argument by + // default is equal to the first + // argument, and produces the + // usual finite element classes + // that we saw in all previous + // examples. + // + // The class is constructed in a + // way to allow for arbitrary + // order of approximation of both + // the domain (through high order + // mapping) and the finite + // element space. The order of + // the finite element space and + // of the mapping can be selected + // in the constructor of the class. Triangulation tria; FE_Q fe; DoFHandler dh; - MappingQ mapping; - - // In BEM methods, the matrix - // that is generated is - // dense. Depending on the size - // of the problem, the final - // system might be solved by - // direct LU decomposition, or by - // iterative methods. In this - // example we use an - // unpreconditioned GMRES - // method. Building a - // preconditioner for BEM method - // is non trivial, and we don't - // treat this subject here. + MappingQ mapping; + + // In BEM methods, the matrix + // that is generated is + // dense. Depending on the size + // of the problem, the final + // system might be solved by + // direct LU decomposition, or by + // iterative methods. In this + // example we use an + // unpreconditioned GMRES + // method. Building a + // preconditioner for BEM method + // is non trivial, and we don't + // treat this subject here. FullMatrix system_matrix; Vector system_rhs; - // The next two variables will - // denote the solution $\phi$ as - // well as a vector that will - // hold the values of - // $\alpha(\mathbf x)$ (the - // fraction of $\Omega$ visible - // from a point $\mathbf x$) at - // the support points of our - // shape functions. + // The next two variables will + // denote the solution $\phi$ as + // well as a vector that will + // hold the values of + // $\alpha(\mathbf x)$ (the + // fraction of $\Omega$ visible + // from a point $\mathbf x$) at + // the support points of our + // shape functions. Vector phi; Vector alpha; - // The convergence table is used - // to output errors in the exact - // solution and in the computed - // alphas. - - ConvergenceTable convergence_table; - - // The following variables are - // the ones that we fill through - // a parameter file. The new - // objects that we use in this - // example are the - // Functions::ParsedFunction - // object and the - // QuadratureSelector object. - // - // The Functions::ParsedFunction - // class allows us to easily and - // quickly define new function - // objects via parameter files, - // with custom definitions which - // can be very complex (see the - // documentation of that class - // for all the available - // options). - // - // We will allocate the - // quadrature object using the - // QuadratureSelector class that - // allows us to generate - // quadrature formulas based on - // an identifying string and on - // the possible degree of the - // formula itself. We used this - // to allow custom selection of - // the quadrature formulas for - // the standard integration, and - // to define the order of the - // singular quadrature rule. - // - // We also define a couple of - // parameters which are used in - // case we wanted to extend the - // solution to the entire domain. + // The convergence table is used + // to output errors in the exact + // solution and in the computed + // alphas. + + ConvergenceTable convergence_table; + + // The following variables are + // the ones that we fill through + // a parameter file. The new + // objects that we use in this + // example are the + // Functions::ParsedFunction + // object and the + // QuadratureSelector object. + // + // The Functions::ParsedFunction + // class allows us to easily and + // quickly define new function + // objects via parameter files, + // with custom definitions which + // can be very complex (see the + // documentation of that class + // for all the available + // options). + // + // We will allocate the + // quadrature object using the + // QuadratureSelector class that + // allows us to generate + // quadrature formulas based on + // an identifying string and on + // the possible degree of the + // formula itself. We used this + // to allow custom selection of + // the quadrature formulas for + // the standard integration, and + // to define the order of the + // singular quadrature rule. + // + // We also define a couple of + // parameters which are used in + // case we wanted to extend the + // solution to the entire domain. Functions::ParsedFunction wind; Functions::ParsedFunction exact_solution; @@ -436,42 +436,42 @@ namespace Step34 }; - // @sect4{BEMProblem::BEMProblem and BEMProblem::read_parameters} - - // The constructor initializes the - // variuous object in much the same - // way as done in the finite element - // programs such as step-4 or - // step-6. The only new ingredient - // here is the ParsedFunction object, - // which needs, at construction time, - // the specification of the number of - // components. - // - // For the exact solution the number - // of vector components is one, and - // no action is required since one is - // the default value for a - // ParsedFunction object. The wind, - // however, requires dim components - // to be specified. Notice that when - // declaring entries in a parameter - // file for the expression of the - // Functions::ParsedFunction, we need - // to specify the number of - // components explicitly, since the - // function - // Functions::ParsedFunction::declare_parameters - // is static, and has no knowledge of - // the number of components. + // @sect4{BEMProblem::BEMProblem and BEMProblem::read_parameters} + + // The constructor initializes the + // variuous object in much the same + // way as done in the finite element + // programs such as step-4 or + // step-6. The only new ingredient + // here is the ParsedFunction object, + // which needs, at construction time, + // the specification of the number of + // components. + // + // For the exact solution the number + // of vector components is one, and + // no action is required since one is + // the default value for a + // ParsedFunction object. The wind, + // however, requires dim components + // to be specified. Notice that when + // declaring entries in a parameter + // file for the expression of the + // Functions::ParsedFunction, we need + // to specify the number of + // components explicitly, since the + // function + // Functions::ParsedFunction::declare_parameters + // is static, and has no knowledge of + // the number of components. template BEMProblem::BEMProblem(const unsigned int fe_degree, - const unsigned int mapping_degree) - : - fe(fe_degree), - dh(tria), - mapping(mapping_degree, true), - wind(dim) + const unsigned int mapping_degree) + : + fe(fe_degree), + dh(tria), + mapping(mapping_degree, true), + wind(dim) {} @@ -479,85 +479,85 @@ namespace Step34 void BEMProblem::read_parameters (const std::string &filename) { deallog << std::endl << "Parsing parameter file " << filename << std::endl - << "for a " << dim << " dimensional simulation. " << std::endl; + << "for a " << dim << " dimensional simulation. " << std::endl; ParameterHandler prm; prm.declare_entry("Number of cycles", "4", - Patterns::Integer()); + Patterns::Integer()); prm.declare_entry("External refinement", "5", - Patterns::Integer()); + Patterns::Integer()); prm.declare_entry("Extend solution on the -2,2 box", "true", - Patterns::Bool()); + Patterns::Bool()); prm.declare_entry("Run 2d simulation", "true", - Patterns::Bool()); + Patterns::Bool()); prm.declare_entry("Run 3d simulation", "true", - Patterns::Bool()); + Patterns::Bool()); prm.enter_subsection("Quadrature rules"); { prm.declare_entry("Quadrature type", "gauss", - Patterns::Selection(QuadratureSelector<(dim-1)>::get_quadrature_names())); + Patterns::Selection(QuadratureSelector<(dim-1)>::get_quadrature_names())); prm.declare_entry("Quadrature order", "4", Patterns::Integer()); prm.declare_entry("Singular quadrature order", "5", Patterns::Integer()); } prm.leave_subsection(); - // For both two and three - // dimensions, we set the default - // input data to be such that the - // solution is $x+y$ or - // $x+y+z$. The actually computed - // solution will have value zero at - // infinity. In this case, this - // coincide with the exact - // solution, and no additional - // corrections are needed, but you - // should be aware of the fact that - // we arbitrarily set - // $\phi_\infty$, and the exact - // solution we pass to the program - // needs to have the same value at - // infinity for the error to be - // computed correctly. - // - // The use of the - // Functions::ParsedFunction object - // is pretty straight forward. The - // Functions::ParsedFunction::declare_parameters - // function takes an additional - // integer argument that specifies - // the number of components of the - // given function. Its default - // value is one. When the - // corresponding - // Functions::ParsedFunction::parse_parameters - // method is called, the calling - // object has to have the same - // number of components defined - // here, otherwise an exception is - // thrown. - // - // When declaring entries, we - // declare both 2 and three - // dimensional functions. However - // only the dim-dimensional one is - // ultimately parsed. This allows - // us to have only one parameter - // file for both 2 and 3 - // dimensional problems. - // - // Notice that from a mathematical - // point of view, the wind function - // on the boundary should satisfy - // the condition - // $\int_{\partial\Omega} - // \mathbf{v}\cdot \mathbf{n} d - // \Gamma = 0$, for the problem to - // have a solution. If this - // condition is not satisfied, then - // no solution can be found, and - // the solver will not converge. + // For both two and three + // dimensions, we set the default + // input data to be such that the + // solution is $x+y$ or + // $x+y+z$. The actually computed + // solution will have value zero at + // infinity. In this case, this + // coincide with the exact + // solution, and no additional + // corrections are needed, but you + // should be aware of the fact that + // we arbitrarily set + // $\phi_\infty$, and the exact + // solution we pass to the program + // needs to have the same value at + // infinity for the error to be + // computed correctly. + // + // The use of the + // Functions::ParsedFunction object + // is pretty straight forward. The + // Functions::ParsedFunction::declare_parameters + // function takes an additional + // integer argument that specifies + // the number of components of the + // given function. Its default + // value is one. When the + // corresponding + // Functions::ParsedFunction::parse_parameters + // method is called, the calling + // object has to have the same + // number of components defined + // here, otherwise an exception is + // thrown. + // + // When declaring entries, we + // declare both 2 and three + // dimensional functions. However + // only the dim-dimensional one is + // ultimately parsed. This allows + // us to have only one parameter + // file for both 2 and 3 + // dimensional problems. + // + // Notice that from a mathematical + // point of view, the wind function + // on the boundary should satisfy + // the condition + // $\int_{\partial\Omega} + // \mathbf{v}\cdot \mathbf{n} d + // \Gamma = 0$, for the problem to + // have a solution. If this + // condition is not satisfied, then + // no solution can be found, and + // the solver will not converge. prm.enter_subsection("Wind function 2d"); { Functions::ParsedFunction<2>::declare_parameters(prm, 2); @@ -587,23 +587,23 @@ namespace Step34 prm.leave_subsection(); - // In the solver section, we set - // all SolverControl - // parameters. The object will then - // be fed to the GMRES solver in - // the solve_system() function. + // In the solver section, we set + // all SolverControl + // parameters. The object will then + // be fed to the GMRES solver in + // the solve_system() function. prm.enter_subsection("Solver"); SolverControl::declare_parameters(prm); prm.leave_subsection(); - // After declaring all these - // parameters to the - // ParameterHandler object, let's - // read an input file that will - // give the parameters their - // values. We then proceed to - // extract these values from the - // ParameterHandler object: + // After declaring all these + // parameters to the + // ParameterHandler object, let's + // read an input file that will + // give the parameters their + // values. We then proceed to + // extract these values from the + // ParameterHandler object: prm.read_input(filename); n_cycles = prm.get_integer("Number of cycles"); @@ -613,22 +613,22 @@ namespace Step34 prm.enter_subsection("Quadrature rules"); { quadrature = - std_cxx1x::shared_ptr > - (new QuadratureSelector (prm.get("Quadrature type"), - prm.get_integer("Quadrature order"))); + std_cxx1x::shared_ptr > + (new QuadratureSelector (prm.get("Quadrature type"), + prm.get_integer("Quadrature order"))); singular_quadrature_order = prm.get_integer("Singular quadrature order"); } prm.leave_subsection(); prm.enter_subsection(std::string("Wind function ")+ - Utilities::int_to_string(dim)+std::string("d")); + Utilities::int_to_string(dim)+std::string("d")); { wind.parse_parameters(prm); } prm.leave_subsection(); prm.enter_subsection(std::string("Exact solution ")+ - Utilities::int_to_string(dim)+std::string("d")); + Utilities::int_to_string(dim)+std::string("d")); { exact_solution.parse_parameters(prm); } @@ -639,74 +639,74 @@ namespace Step34 prm.leave_subsection(); - // Finally, here's another example - // of how to use parameter files in - // dimension independent - // programming. If we wanted to - // switch off one of the two - // simulations, we could do this by - // setting the corresponding "Run - // 2d simulation" or "Run 3d - // simulation" flag to false: + // Finally, here's another example + // of how to use parameter files in + // dimension independent + // programming. If we wanted to + // switch off one of the two + // simulations, we could do this by + // setting the corresponding "Run + // 2d simulation" or "Run 3d + // simulation" flag to false: run_in_this_dimension = prm.get_bool("Run " + - Utilities::int_to_string(dim) + - "d simulation"); + Utilities::int_to_string(dim) + + "d simulation"); } - // @sect4{BEMProblem::read_domain} - - // A boundary element method - // triangulation is basically the - // same as a (dim-1) dimensional - // triangulation, with the difference - // that the vertices belong to a - // (dim) dimensional space. - // - // Some of the mesh formats supported - // in deal.II use by default three - // dimensional points to describe - // meshes. These are the formats - // which are compatible with the - // boundary element method - // capabilities of deal.II. In - // particular we can use either UCD - // or GMSH formats. In both cases, we - // have to be particularly careful - // with the orientation of the mesh, - // because, unlike in the standard - // finite element case, no reordering - // or compatibility check is - // performed here. All meshes are - // considered as oriented, because - // they are embedded in a higher - // dimensional space. (See the - // documentation of the GridIn and of - // the Triangulation for further - // details on orientation of cells in - // a triangulation.) In our case, the - // normals to the mesh are external - // to both the circle in 2d or the - // sphere in 3d. - // - // The other detail that is required - // for appropriate refinement of the - // boundary element mesh, is an - // accurate description of the - // manifold that the mesh is - // approximating. We already saw this - // several times for the boundary of - // standard finite element meshes - // (for example in step-5 and - // step-6), and here the principle - // and usage is the same, except that - // the HyperBallBoundary class takes - // an additional template parameter - // that specifies the embedding space - // dimension. The function object - // still has to be static to live at - // least as long as the triangulation - // object to which it is attached. + // @sect4{BEMProblem::read_domain} + + // A boundary element method + // triangulation is basically the + // same as a (dim-1) dimensional + // triangulation, with the difference + // that the vertices belong to a + // (dim) dimensional space. + // + // Some of the mesh formats supported + // in deal.II use by default three + // dimensional points to describe + // meshes. These are the formats + // which are compatible with the + // boundary element method + // capabilities of deal.II. In + // particular we can use either UCD + // or GMSH formats. In both cases, we + // have to be particularly careful + // with the orientation of the mesh, + // because, unlike in the standard + // finite element case, no reordering + // or compatibility check is + // performed here. All meshes are + // considered as oriented, because + // they are embedded in a higher + // dimensional space. (See the + // documentation of the GridIn and of + // the Triangulation for further + // details on orientation of cells in + // a triangulation.) In our case, the + // normals to the mesh are external + // to both the circle in 2d or the + // sphere in 3d. + // + // The other detail that is required + // for appropriate refinement of the + // boundary element mesh, is an + // accurate description of the + // manifold that the mesh is + // approximating. We already saw this + // several times for the boundary of + // standard finite element meshes + // (for example in step-5 and + // step-6), and here the principle + // and usage is the same, except that + // the HyperBallBoundary class takes + // an additional template parameter + // that specifies the embedding space + // dimension. The function object + // still has to be static to live at + // least as long as the triangulation + // object to which it is attached. template void BEMProblem::read_domain() @@ -717,16 +717,16 @@ namespace Step34 std::ifstream in; switch (dim) { - case 2: - in.open ("coarse_circle.inp"); - break; + case 2: + in.open ("coarse_circle.inp"); + break; - case 3: - in.open ("coarse_sphere.inp"); - break; + case 3: + in.open ("coarse_sphere.inp"); + break; - default: - Assert (false, ExcNotImplemented()); + default: + Assert (false, ExcNotImplemented()); } GridIn gi; @@ -737,12 +737,12 @@ namespace Step34 } - // @sect4{BEMProblem::refine_and_resize} + // @sect4{BEMProblem::refine_and_resize} - // This function globally refines the - // mesh, distributes degrees of - // freedom, and resizes matrices and - // vectors. + // This function globally refines the + // mesh, distributes degrees of + // freedom, and resizes matrices and + // vectors. template void BEMProblem::refine_and_resize() @@ -761,31 +761,31 @@ namespace Step34 } - // @sect4{BEMProblem::assemble_system} + // @sect4{BEMProblem::assemble_system} - // The following is the main function - // of this program, assembling the - // matrix that corresponds to the - // boundary integral equation. + // The following is the main function + // of this program, assembling the + // matrix that corresponds to the + // boundary integral equation. template void BEMProblem::assemble_system() { - // First we initialize an FEValues - // object with the quadrature - // formula for the integration of - // the kernel in non singular - // cells. This quadrature is - // selected with the parameter - // file, and needs to be quite - // precise, since the functions we - // are integrating are not - // polynomial functions. + // First we initialize an FEValues + // object with the quadrature + // formula for the integration of + // the kernel in non singular + // cells. This quadrature is + // selected with the parameter + // file, and needs to be quite + // precise, since the functions we + // are integrating are not + // polynomial functions. FEValues fe_v(mapping, fe, *quadrature, - update_values | - update_cell_normal_vectors | - update_quadrature_points | - update_JxW_values); + update_values | + update_cell_normal_vectors | + update_quadrature_points | + update_JxW_values); const unsigned int n_q_points = fe_v.n_quadrature_points; @@ -794,232 +794,232 @@ namespace Step34 std::vector > cell_wind(n_q_points, Vector(dim) ); double normal_wind; - // Unlike in finite element - // methods, if we use a collocation - // boundary element method, then in - // each assembly loop we only - // assemble the information that - // refers to the coupling between - // one degree of freedom (the - // degree associated with support - // point $i$) and the current - // cell. This is done using a - // vector of fe.dofs_per_cell - // elements, which will then be - // distributed to the matrix in the - // global row $i$. The following - // object will hold this - // information: + // Unlike in finite element + // methods, if we use a collocation + // boundary element method, then in + // each assembly loop we only + // assemble the information that + // refers to the coupling between + // one degree of freedom (the + // degree associated with support + // point $i$) and the current + // cell. This is done using a + // vector of fe.dofs_per_cell + // elements, which will then be + // distributed to the matrix in the + // global row $i$. The following + // object will hold this + // information: Vector local_matrix_row_i(fe.dofs_per_cell); - // The index $i$ runs on the - // collocation points, which are - // the support points of the $i$th - // basis function, while $j$ runs - // on inner integration points. + // The index $i$ runs on the + // collocation points, which are + // the support points of the $i$th + // basis function, while $j$ runs + // on inner integration points. - // We construct a vector - // of support points which will be - // used in the local integrations: + // We construct a vector + // of support points which will be + // used in the local integrations: std::vector > support_points(dh.n_dofs()); DoFTools::map_dofs_to_support_points( mapping, dh, support_points); - // After doing so, we can start the - // integration loop over all cells, - // where we first initialize the - // FEValues object and get the - // values of $\mathbf{\tilde v}$ at - // the quadrature points (this - // vector field should be constant, - // but it doesn't hurt to be more - // general): + // After doing so, we can start the + // integration loop over all cells, + // where we first initialize the + // FEValues object and get the + // values of $\mathbf{\tilde v}$ at + // the quadrature points (this + // vector field should be constant, + // but it doesn't hurt to be more + // general): typename DoFHandler::active_cell_iterator cell = dh.begin_active(), endc = dh.end(); for (cell = dh.begin_active(); cell != endc; ++cell) { - fe_v.reinit(cell); - cell->get_dof_indices(local_dof_indices); - - const std::vector > &q_points = fe_v.get_quadrature_points(); - const std::vector > &normals = fe_v.get_cell_normal_vectors(); - wind.vector_value_list(q_points, cell_wind); - - // We then form the integral over - // the current cell for all - // degrees of freedom (note that - // this includes degrees of - // freedom not located on the - // current cell, a deviation from - // the usual finite element - // integrals). The integral that - // we need to perform is singular - // if one of the local degrees of - // freedom is the same as the - // support point $i$. A the - // beginning of the loop we - // therefore check wether this is - // the case, and we store which - // one is the singular index: - for (unsigned int i=0; i R = q_points[q] - support_points[i]; - - system_rhs(i) += ( LaplaceKernel::single_layer(R) * - normal_wind * - fe_v.JxW(q) ); - - for (unsigned int j=0; j & singular_quadrature = - get_singular_quadrature(cell, singular_index); - - FEValues fe_v_singular (mapping, fe, singular_quadrature, - update_jacobians | - update_values | - update_cell_normal_vectors | - update_quadrature_points ); - - fe_v_singular.reinit(cell); - - std::vector > singular_cell_wind( singular_quadrature.size(), - Vector(dim) ); - - const std::vector > &singular_normals = fe_v_singular.get_cell_normal_vectors(); - const std::vector > &singular_q_points = fe_v_singular.get_quadrature_points(); - - wind.vector_value_list(singular_q_points, singular_cell_wind); - - for (unsigned int q=0; q R = singular_q_points[q] - support_points[i]; - double normal_wind = 0; - for (unsigned int d=0; dget_dof_indices(local_dof_indices); + + const std::vector > &q_points = fe_v.get_quadrature_points(); + const std::vector > &normals = fe_v.get_cell_normal_vectors(); + wind.vector_value_list(q_points, cell_wind); + + // We then form the integral over + // the current cell for all + // degrees of freedom (note that + // this includes degrees of + // freedom not located on the + // current cell, a deviation from + // the usual finite element + // integrals). The integral that + // we need to perform is singular + // if one of the local degrees of + // freedom is the same as the + // support point $i$. A the + // beginning of the loop we + // therefore check wether this is + // the case, and we store which + // one is the singular index: + for (unsigned int i=0; i R = q_points[q] - support_points[i]; + + system_rhs(i) += ( LaplaceKernel::single_layer(R) * + normal_wind * + fe_v.JxW(q) ); + + for (unsigned int j=0; j & singular_quadrature = + get_singular_quadrature(cell, singular_index); + + FEValues fe_v_singular (mapping, fe, singular_quadrature, + update_jacobians | + update_values | + update_cell_normal_vectors | + update_quadrature_points ); + + fe_v_singular.reinit(cell); + + std::vector > singular_cell_wind( singular_quadrature.size(), + Vector(dim) ); + + const std::vector > &singular_normals = fe_v_singular.get_cell_normal_vectors(); + const std::vector > &singular_q_points = fe_v_singular.get_quadrature_points(); + + wind.vector_value_list(singular_q_points, singular_cell_wind); + + for (unsigned int q=0; q R = singular_q_points[q] - support_points[i]; + double normal_wind = 0; + for (unsigned int d=0; d ones(dh.n_dofs()); ones.add(-1.); @@ -1030,10 +1030,10 @@ namespace Step34 } - // @sect4{BEMProblem::solve_system} + // @sect4{BEMProblem::solve_system} - // The next function simply solves - // the linear system. + // The next function simply solves + // the linear system. template void BEMProblem::solve_system() { @@ -1042,37 +1042,37 @@ namespace Step34 } - // @sect4{BEMProblem::compute_errors} + // @sect4{BEMProblem::compute_errors} - // The computation of the errors is - // exactly the same in all other - // example programs, and we won't - // comment too much. Notice how the - // same methods that are used in the - // finite element methods can be used - // here. + // The computation of the errors is + // exactly the same in all other + // example programs, and we won't + // comment too much. Notice how the + // same methods that are used in the + // finite element methods can be used + // here. template void BEMProblem::compute_errors(const unsigned int cycle) { Vector difference_per_cell (tria.n_active_cells()); VectorTools::integrate_difference (mapping, dh, phi, - exact_solution, - difference_per_cell, - QGauss<(dim-1)>(2*fe.degree+1), - VectorTools::L2_norm); + exact_solution, + difference_per_cell, + QGauss<(dim-1)>(2*fe.degree+1), + VectorTools::L2_norm); const double L2_error = difference_per_cell.l2_norm(); - // The error in the alpha vector - // can be computed directly using - // the Vector::linfty_norm() - // function, since on each node, - // the value should be $\frac - // 12$. All errors are then output - // and appended to our - // ConvergenceTable object for - // later computation of convergence - // rates: + // The error in the alpha vector + // can be computed directly using + // the Vector::linfty_norm() + // function, since on each node, + // the value should be $\frac + // 12$. All errors are then output + // and appended to our + // ConvergenceTable object for + // later computation of convergence + // rates: Vector difference_per_node(alpha); difference_per_node.add(-.5); @@ -1081,13 +1081,13 @@ namespace Step34 const unsigned int n_dofs=dh.n_dofs(); deallog << "Cycle " << cycle << ':' - << std::endl - << " Number of active cells: " - << n_active_cells - << std::endl - << " Number of degrees of freedom: " - << n_dofs - << std::endl; + << std::endl + << " Number of active cells: " + << n_active_cells + << std::endl + << " Number of degrees of freedom: " + << n_dofs + << std::endl; convergence_table.add_value("cycle", cycle); convergence_table.add_value("cells", n_active_cells); @@ -1097,123 +1097,123 @@ namespace Step34 } - // Singular integration requires a - // careful selection of the - // quadrature rules. In particular - // the deal.II library provides - // quadrature rules which are - // taylored for logarithmic - // singularities (QGaussLog, - // QGaussLogR), as well as for 1/R - // singularities (QGaussOneOverR). - // - // Singular integration is typically - // obtained by constructing weighted - // quadrature formulas with singular - // weights, so that it is possible to - // write - // - // \f[ - // \int_K f(x) s(x) dx = \sum_{i=1}^N w_i f(q_i) - // \f] - // - // where $s(x)$ is a given - // singularity, and the weights and - // quadrature points $w_i,q_i$ are - // carefully selected to make the - // formula above an equality for a - // certain class of functions $f(x)$. - // - // In all the finite element examples - // we have seen so far, the weight of - // the quadrature itself (namely, the - // function $s(x)$), was always - // constantly equal to 1. For - // singular integration, we have two - // choices: we can use the definition - // above, factoring out the - // singularity from the integrand - // (i.e., integrating $f(x)$ with the - // special quadrature rule), or we - // can ask the quadrature rule to - // "normalize" the weights $w_i$ with - // $s(q_i)$: - // - // \f[ - // \int_K f(x) s(x) dx = - // \int_K g(x) dx = \sum_{i=1}^N \frac{w_i}{s(q_i)} g(q_i) - // \f] - // - // We use this second option, through - // the @p factor_out_singularity - // parameter of both QGaussLogR and - // QGaussOneOverR. - // - // These integrals are somewhat - // delicate, especially in two - // dimensions, due to the - // transformation from the real to - // the reference cell, where the - // variable of integration is scaled - // with the determinant of the - // transformation. - // - // In two dimensions this process - // does not result only in a factor - // appearing as a constant factor on - // the entire integral, but also on - // an additional integral alltogether - // that needs to be evaluated: - // - // \f[ - // \int_0^1 f(x)\ln(x/\alpha) dx = - // \int_0^1 f(x)\ln(x) dx - \int_0^1 f(x) \ln(\alpha) dx. - // \f] - // - // This process is taken care of by - // the constructor of the QGaussLogR - // class, which adds additional - // quadrature points and weights to - // take into consideration also the - // second part of the integral. - // - // A similar reasoning should be done - // in the three dimensional case, - // since the singular quadrature is - // taylored on the inverse of the - // radius $r$ in the reference cell, - // while our singular function lives - // in real space, however in the - // three dimensional case everything - // is simpler because the singularity - // scales linearly with the - // determinant of the - // transformation. This allows us to - // build the singular two dimensional - // quadrature rules only once and, - // reuse them over all cells. - // - // In the one dimensional singular - // integration this is not possible, - // since we need to know the scaling - // parameter for the quadrature, - // which is not known a priori. Here, - // the quadrature rule itself depends - // also on the size of the current - // cell. For this reason, it is - // necessary to create a new - // quadrature for each singular - // integration. - // - // The different quadrature rules are - // built inside the - // get_singular_quadrature, which is - // specialized for dim=2 and dim=3, - // and they are retrieved inside the - // assemble_system function. The - // index given as an argument is the - // index of the unit support point - // where the singularity is located. + // Singular integration requires a + // careful selection of the + // quadrature rules. In particular + // the deal.II library provides + // quadrature rules which are + // taylored for logarithmic + // singularities (QGaussLog, + // QGaussLogR), as well as for 1/R + // singularities (QGaussOneOverR). + // + // Singular integration is typically + // obtained by constructing weighted + // quadrature formulas with singular + // weights, so that it is possible to + // write + // + // \f[ + // \int_K f(x) s(x) dx = \sum_{i=1}^N w_i f(q_i) + // \f] + // + // where $s(x)$ is a given + // singularity, and the weights and + // quadrature points $w_i,q_i$ are + // carefully selected to make the + // formula above an equality for a + // certain class of functions $f(x)$. + // + // In all the finite element examples + // we have seen so far, the weight of + // the quadrature itself (namely, the + // function $s(x)$), was always + // constantly equal to 1. For + // singular integration, we have two + // choices: we can use the definition + // above, factoring out the + // singularity from the integrand + // (i.e., integrating $f(x)$ with the + // special quadrature rule), or we + // can ask the quadrature rule to + // "normalize" the weights $w_i$ with + // $s(q_i)$: + // + // \f[ + // \int_K f(x) s(x) dx = + // \int_K g(x) dx = \sum_{i=1}^N \frac{w_i}{s(q_i)} g(q_i) + // \f] + // + // We use this second option, through + // the @p factor_out_singularity + // parameter of both QGaussLogR and + // QGaussOneOverR. + // + // These integrals are somewhat + // delicate, especially in two + // dimensions, due to the + // transformation from the real to + // the reference cell, where the + // variable of integration is scaled + // with the determinant of the + // transformation. + // + // In two dimensions this process + // does not result only in a factor + // appearing as a constant factor on + // the entire integral, but also on + // an additional integral alltogether + // that needs to be evaluated: + // + // \f[ + // \int_0^1 f(x)\ln(x/\alpha) dx = + // \int_0^1 f(x)\ln(x) dx - \int_0^1 f(x) \ln(\alpha) dx. + // \f] + // + // This process is taken care of by + // the constructor of the QGaussLogR + // class, which adds additional + // quadrature points and weights to + // take into consideration also the + // second part of the integral. + // + // A similar reasoning should be done + // in the three dimensional case, + // since the singular quadrature is + // taylored on the inverse of the + // radius $r$ in the reference cell, + // while our singular function lives + // in real space, however in the + // three dimensional case everything + // is simpler because the singularity + // scales linearly with the + // determinant of the + // transformation. This allows us to + // build the singular two dimensional + // quadrature rules only once and, + // reuse them over all cells. + // + // In the one dimensional singular + // integration this is not possible, + // since we need to know the scaling + // parameter for the quadrature, + // which is not known a priori. Here, + // the quadrature rule itself depends + // also on the size of the current + // cell. For this reason, it is + // necessary to create a new + // quadrature for each singular + // integration. + // + // The different quadrature rules are + // built inside the + // get_singular_quadrature, which is + // specialized for dim=2 and dim=3, + // and they are retrieved inside the + // assemble_system function. The + // index given as an argument is the + // index of the unit support point + // where the singularity is located. template<> const Quadrature<2> & BEMProblem<3>::get_singular_quadrature( @@ -1221,14 +1221,14 @@ namespace Step34 const unsigned int index) const { Assert(index < fe.dofs_per_cell, - ExcIndexRange(0, fe.dofs_per_cell, index)); + ExcIndexRange(0, fe.dofs_per_cell, index)); static std::vector > quadratures; if (quadratures.size() == 0) for (unsigned int i=0; i(singular_quadrature_order, - fe.get_unit_support_points()[i], - true)); + quadratures.push_back(QGaussOneOverR<2>(singular_quadrature_order, + fe.get_unit_support_points()[i], + true)); return quadratures[index]; } @@ -1239,49 +1239,49 @@ namespace Step34 const unsigned int index) const { Assert(index < fe.dofs_per_cell, - ExcIndexRange(0, fe.dofs_per_cell, index)); + ExcIndexRange(0, fe.dofs_per_cell, index)); static Quadrature<1> * q_pointer = NULL; if (q_pointer) delete q_pointer; q_pointer = new QGaussLogR<1>(singular_quadrature_order, - fe.get_unit_support_points()[index], - 1./cell->measure(), true); + fe.get_unit_support_points()[index], + 1./cell->measure(), true); return (*q_pointer); } - // @sect4{BEMProblem::compute_exterior_solution} - - // We'd like to also know something - // about the value of the potential - // $\phi$ in the exterior domain: - // after all our motivation to - // consider the boundary integral - // problem was that we wanted to know - // the velocity in the exterior - // domain! - // - // To this end, let us assume here - // that the boundary element domain - // is contained in the box - // $[-2,2]^{\text{dim}}$, and we - // extrapolate the actual solution - // inside this box using the - // convolution with the fundamental - // solution. The formula for this is - // given in the introduction. - // - // The reconstruction of the solution - // in the entire space is done on a - // continuous finite element grid of - // dimension dim. These are the usual - // ones, and we don't comment any - // further on them. At the end of the - // function, we output this exterior - // solution in, again, much the usual - // way. + // @sect4{BEMProblem::compute_exterior_solution} + + // We'd like to also know something + // about the value of the potential + // $\phi$ in the exterior domain: + // after all our motivation to + // consider the boundary integral + // problem was that we wanted to know + // the velocity in the exterior + // domain! + // + // To this end, let us assume here + // that the boundary element domain + // is contained in the box + // $[-2,2]^{\text{dim}}$, and we + // extrapolate the actual solution + // inside this box using the + // convolution with the fundamental + // solution. The formula for this is + // given in the introduction. + // + // The reconstruction of the solution + // in the entire space is done on a + // continuous finite element grid of + // dimension dim. These are the usual + // ones, and we don't comment any + // further on them. At the end of the + // function, we output this exterior + // solution in, again, much the usual + // way. template void BEMProblem::compute_exterior_solution() { @@ -1302,10 +1302,10 @@ namespace Step34 FEValues fe_v(mapping, fe, *quadrature, - update_values | - update_cell_normal_vectors | - update_quadrature_points | - update_JxW_values); + update_values | + update_cell_normal_vectors | + update_quadrature_points | + update_JxW_values); const unsigned int n_q_points = fe_v.n_quadrature_points; @@ -1317,40 +1317,40 @@ namespace Step34 std::vector > external_support_points(external_dh.n_dofs()); DoFTools::map_dofs_to_support_points(StaticMappingQ1::mapping, - external_dh, external_support_points); + external_dh, external_support_points); for (cell = dh.begin_active(); cell != endc; ++cell) { - fe_v.reinit(cell); + fe_v.reinit(cell); - const std::vector > &q_points = fe_v.get_quadrature_points(); - const std::vector > &normals = fe_v.get_cell_normal_vectors(); + const std::vector > &q_points = fe_v.get_quadrature_points(); + const std::vector > &normals = fe_v.get_cell_normal_vectors(); - cell->get_dof_indices(dofs); - fe_v.get_function_values(phi, local_phi); + cell->get_dof_indices(dofs); + fe_v.get_function_values(phi, local_phi); - wind.vector_value_list(q_points, local_wind); + wind.vector_value_list(q_points, local_wind); - for (unsigned int q=0; q R = q_points[q] - external_support_points[i]; + const Point R = q_points[q] - external_support_points[i]; - external_phi(i) += ( ( LaplaceKernel::single_layer(R) * - normal_wind[q] - + - (LaplaceKernel::double_layer(R) * - normals[q] ) * - local_phi[q] ) * - fe_v.JxW(q) ); - } + external_phi(i) += ( ( LaplaceKernel::single_layer(R) * + normal_wind[q] + + + (LaplaceKernel::double_layer(R) * + normals[q] ) * + local_phi[q] ) * + fe_v.JxW(q) ); + } } DataOut data_out; @@ -1367,13 +1367,13 @@ namespace Step34 } - // @sect4{BEMProblem::output_results} + // @sect4{BEMProblem::output_results} - // Outputting the results of our - // computations is a rather - // mechanical tasks. All the - // components of this function have - // been discussed before. + // Outputting the results of our + // computations is a rather + // mechanical tasks. All the + // components of this function have + // been discussed before. template void BEMProblem::output_results(const unsigned int cycle) { @@ -1383,40 +1383,40 @@ namespace Step34 dataout.add_data_vector(phi, "phi"); dataout.add_data_vector(alpha, "alpha"); dataout.build_patches(mapping, - mapping.get_degree(), - DataOut >::curved_inner_cells); + mapping.get_degree(), + DataOut >::curved_inner_cells); std::string filename = ( Utilities::int_to_string(dim) + - "d_boundary_solution_" + - Utilities::int_to_string(cycle) + - ".vtk" ); + "d_boundary_solution_" + + Utilities::int_to_string(cycle) + + ".vtk" ); std::ofstream file(filename.c_str()); dataout.write_vtk(file); if (cycle == n_cycles-1) { - convergence_table.set_precision("L2(phi)", 3); - convergence_table.set_precision("Linfty(alpha)", 3); - - convergence_table.set_scientific("L2(phi)", true); - convergence_table.set_scientific("Linfty(alpha)", true); - - convergence_table - .evaluate_convergence_rates("L2(phi)", ConvergenceTable::reduction_rate_log2); - convergence_table - .evaluate_convergence_rates("Linfty(alpha)", ConvergenceTable::reduction_rate_log2); - deallog << std::endl; - convergence_table.write_text(std::cout); + convergence_table.set_precision("L2(phi)", 3); + convergence_table.set_precision("Linfty(alpha)", 3); + + convergence_table.set_scientific("L2(phi)", true); + convergence_table.set_scientific("Linfty(alpha)", true); + + convergence_table + .evaluate_convergence_rates("L2(phi)", ConvergenceTable::reduction_rate_log2); + convergence_table + .evaluate_convergence_rates("Linfty(alpha)", ConvergenceTable::reduction_rate_log2); + deallog << std::endl; + convergence_table.write_text(std::cout); } } - // @sect4{BEMProblem::run} + // @sect4{BEMProblem::run} - // This is the main function. It - // should be self explanatory in its - // briefness: + // This is the main function. It + // should be self explanatory in its + // briefness: template void BEMProblem::run() { @@ -1425,21 +1425,21 @@ namespace Step34 if (run_in_this_dimension == false) { - deallog << "Run in dimension " << dim - << " explicitly disabled in parameter file. " - << std::endl; - return; + deallog << "Run in dimension " << dim + << " explicitly disabled in parameter file. " + << std::endl; + return; } read_domain(); for (unsigned int cycle=0; cycle::value (points[i]); + values[i] = Pressure::value (points[i]); } } - // @sect3{The NavierStokesProjection class} + // @sect3{The NavierStokesProjection class} - // Now for the main class of the program. It - // implements the various versions of the - // projection method for Navier-Stokes - // equations. The names for all the methods - // and member variables should be - // self-explanatory, taking into account the - // implementation details given in the - // introduction. + // Now for the main class of the program. It + // implements the various versions of the + // projection method for Navier-Stokes + // equations. The names for all the methods + // and member variables should be + // self-explanatory, taking into account the + // implementation details given in the + // introduction. template class NavierStokesProjection { @@ -423,7 +423,7 @@ namespace Step35 NavierStokesProjection (const RunTimeParameters::Data_Storage &data); void run (const bool verbose = false, - const unsigned int n_plots = 10); + const unsigned int n_plots = 10); protected: RunTimeParameters::MethodFormulation type; @@ -478,10 +478,10 @@ namespace Step35 SparseDirectUMFPACK prec_vel_mass; DeclException2 (ExcInvalidTimeStep, - double, double, - << " The time step " << arg1 << " is out of range." - << std::endl - << " The permitted range is (0," << arg2 << "]"); + double, double, + << " The time step " << arg1 << " is out of range." + << std::endl + << " The permitted range is (0," << arg2 << "]"); void create_triangulation_and_dofs (const unsigned int n_refines); @@ -507,54 +507,54 @@ namespace Step35 void initialize_pressure_matrices(); - // The next few structures and functions - // are for doing various things in - // parallel. They follow the scheme laid - // out in @ref threads, using the - // WorkStream class. As explained there, - // this requires us to declare two - // structures for each of the assemblers, - // a per-task data and a scratch data - // structure. These are then handed over - // to functions that assemble local - // contributions and that copy these - // local contributions to the global - // objects. - // - // One of the things that are specific to - // this program is that we don't just - // have a single DoFHandler object that - // represents both the velocities and the - // pressure, but we use individual - // DoFHandler objects for these two kinds - // of variables. We pay for this - // optimization when we want to assemble - // terms that involve both variables, - // such as the divergence of the velocity - // and the gradient of the pressure, - // times the respective test - // functions. When doing so, we can't - // just anymore use a single FEValues - // object, but rather we need two, and - // they need to be initialized with cell - // iterators that point to the same cell - // in the triangulation but different - // DoFHandlers. - // - // To do this in practice, we declare a - // "synchronous" iterator -- an object - // that internally consists of several - // (in our case two) iterators, and each - // time the synchronous iteration is - // moved up one step, each of the - // iterators stored internally is moved - // up one step as well, thereby always - // staying in sync. As it so happens, - // there is a deal.II class that - // facilitates this sort of thing. + // The next few structures and functions + // are for doing various things in + // parallel. They follow the scheme laid + // out in @ref threads, using the + // WorkStream class. As explained there, + // this requires us to declare two + // structures for each of the assemblers, + // a per-task data and a scratch data + // structure. These are then handed over + // to functions that assemble local + // contributions and that copy these + // local contributions to the global + // objects. + // + // One of the things that are specific to + // this program is that we don't just + // have a single DoFHandler object that + // represents both the velocities and the + // pressure, but we use individual + // DoFHandler objects for these two kinds + // of variables. We pay for this + // optimization when we want to assemble + // terms that involve both variables, + // such as the divergence of the velocity + // and the gradient of the pressure, + // times the respective test + // functions. When doing so, we can't + // just anymore use a single FEValues + // object, but rather we need two, and + // they need to be initialized with cell + // iterators that point to the same cell + // in the triangulation but different + // DoFHandlers. + // + // To do this in practice, we declare a + // "synchronous" iterator -- an object + // that internally consists of several + // (in our case two) iterators, and each + // time the synchronous iteration is + // moved up one step, each of the + // iterators stored internally is moved + // up one step as well, thereby always + // staying in sync. As it so happens, + // there is a deal.II class that + // facilitates this sort of thing. typedef std_cxx1x::tuple< typename DoFHandler::active_cell_iterator, - typename DoFHandler::active_cell_iterator - > IteratorTuple; + typename DoFHandler::active_cell_iterator + > IteratorTuple; typedef SynchronousIterators IteratorPair; @@ -562,119 +562,119 @@ namespace Step35 struct InitGradPerTaskData { - unsigned int d; - unsigned int vel_dpc; - unsigned int pres_dpc; - FullMatrix local_grad; - std::vector vel_local_dof_indices; - std::vector pres_local_dof_indices; - - InitGradPerTaskData (const unsigned int dd, - const unsigned int vdpc, - const unsigned int pdpc) - : - d(dd), - vel_dpc (vdpc), - pres_dpc (pdpc), - local_grad (vdpc, pdpc), - vel_local_dof_indices (vdpc), - pres_local_dof_indices (pdpc) - {} + unsigned int d; + unsigned int vel_dpc; + unsigned int pres_dpc; + FullMatrix local_grad; + std::vector vel_local_dof_indices; + std::vector pres_local_dof_indices; + + InitGradPerTaskData (const unsigned int dd, + const unsigned int vdpc, + const unsigned int pdpc) + : + d(dd), + vel_dpc (vdpc), + pres_dpc (pdpc), + local_grad (vdpc, pdpc), + vel_local_dof_indices (vdpc), + pres_local_dof_indices (pdpc) + {} }; struct InitGradScratchData { - unsigned int nqp; - FEValues fe_val_vel; - FEValues fe_val_pres; - InitGradScratchData (const FE_Q &fe_v, - const FE_Q &fe_p, - const QGauss &quad, - const UpdateFlags flags_v, - const UpdateFlags flags_p) - : - nqp (quad.size()), - fe_val_vel (fe_v, quad, flags_v), - fe_val_pres (fe_p, quad, flags_p) - {} - InitGradScratchData (const InitGradScratchData &data) - : - nqp (data.nqp), - fe_val_vel (data.fe_val_vel.get_fe(), - data.fe_val_vel.get_quadrature(), - data.fe_val_vel.get_update_flags()), - fe_val_pres (data.fe_val_pres.get_fe(), - data.fe_val_pres.get_quadrature(), - data.fe_val_pres.get_update_flags()) - {} + unsigned int nqp; + FEValues fe_val_vel; + FEValues fe_val_pres; + InitGradScratchData (const FE_Q &fe_v, + const FE_Q &fe_p, + const QGauss &quad, + const UpdateFlags flags_v, + const UpdateFlags flags_p) + : + nqp (quad.size()), + fe_val_vel (fe_v, quad, flags_v), + fe_val_pres (fe_p, quad, flags_p) + {} + InitGradScratchData (const InitGradScratchData &data) + : + nqp (data.nqp), + fe_val_vel (data.fe_val_vel.get_fe(), + data.fe_val_vel.get_quadrature(), + data.fe_val_vel.get_update_flags()), + fe_val_pres (data.fe_val_pres.get_fe(), + data.fe_val_pres.get_quadrature(), + data.fe_val_pres.get_update_flags()) + {} }; void assemble_one_cell_of_gradient (const IteratorPair &SI, - InitGradScratchData &scratch, - InitGradPerTaskData &data); + InitGradScratchData &scratch, + InitGradPerTaskData &data); void copy_gradient_local_to_global (const InitGradPerTaskData &data); - // The same general layout also applies - // to the following classes and functions - // implementing the assembly of the - // advection term: + // The same general layout also applies + // to the following classes and functions + // implementing the assembly of the + // advection term: void assemble_advection_term(); struct AdvectionPerTaskData { - FullMatrix local_advection; - std::vector local_dof_indices; - AdvectionPerTaskData (const unsigned int dpc) - : - local_advection (dpc, dpc), - local_dof_indices (dpc) - {} + FullMatrix local_advection; + std::vector local_dof_indices; + AdvectionPerTaskData (const unsigned int dpc) + : + local_advection (dpc, dpc), + local_dof_indices (dpc) + {} }; struct AdvectionScratchData { - unsigned int nqp; - unsigned int dpc; - std::vector< Point > u_star_local; - std::vector< Tensor<1,dim> > grad_u_star; - std::vector u_star_tmp; - FEValues fe_val; - AdvectionScratchData (const FE_Q &fe, - const QGauss &quad, - const UpdateFlags flags) - : - nqp (quad.size()), - dpc (fe.dofs_per_cell), - u_star_local (nqp), - grad_u_star (nqp), - u_star_tmp (nqp), - fe_val (fe, quad, flags) - {} - - AdvectionScratchData (const AdvectionScratchData &data) - : - nqp (data.nqp), - dpc (data.dpc), - u_star_local (nqp), - grad_u_star (nqp), - u_star_tmp (nqp), - fe_val (data.fe_val.get_fe(), - data.fe_val.get_quadrature(), - data.fe_val.get_update_flags()) - {} + unsigned int nqp; + unsigned int dpc; + std::vector< Point > u_star_local; + std::vector< Tensor<1,dim> > grad_u_star; + std::vector u_star_tmp; + FEValues fe_val; + AdvectionScratchData (const FE_Q &fe, + const QGauss &quad, + const UpdateFlags flags) + : + nqp (quad.size()), + dpc (fe.dofs_per_cell), + u_star_local (nqp), + grad_u_star (nqp), + u_star_tmp (nqp), + fe_val (fe, quad, flags) + {} + + AdvectionScratchData (const AdvectionScratchData &data) + : + nqp (data.nqp), + dpc (data.dpc), + u_star_local (nqp), + grad_u_star (nqp), + u_star_tmp (nqp), + fe_val (data.fe_val.get_fe(), + data.fe_val.get_quadrature(), + data.fe_val.get_update_flags()) + {} }; void assemble_one_cell_of_advection (const typename DoFHandler::active_cell_iterator &cell, - AdvectionScratchData &scratch, - AdvectionPerTaskData &data); + AdvectionScratchData &scratch, + AdvectionPerTaskData &data); void copy_advection_local_to_global (const AdvectionPerTaskData &data); - // The final few functions implement the - // diffusion solve as well as - // postprocessing the output, including - // computing the curl of the velocity: + // The final few functions implement the + // diffusion solve as well as + // postprocessing the output, including + // computing the curl of the velocity: void diffusion_component_solve (const unsigned int d); void output_results (const unsigned int step); @@ -684,44 +684,44 @@ namespace Step35 - // @sect4{ NavierStokesProjection::NavierStokesProjection } + // @sect4{ NavierStokesProjection::NavierStokesProjection } - // In the constructor, we just read - // all the data from the - // Data_Storage object - // that is passed as an argument, - // verify that the data we read is - // reasonable and, finally, create - // the triangulation and load the - // initial data. + // In the constructor, we just read + // all the data from the + // Data_Storage object + // that is passed as an argument, + // verify that the data we read is + // reasonable and, finally, create + // the triangulation and load the + // initial data. template NavierStokesProjection::NavierStokesProjection(const RunTimeParameters::Data_Storage &data) - : - type (data.form), - deg (data.pressure_degree), - dt (data.dt), - t_0 (data.initial_time), - T (data.final_time), - Re (data.Reynolds), - vel_exact (data.initial_time), - fe_velocity (deg+1), - fe_pressure (deg), - dof_handler_velocity (triangulation), - dof_handler_pressure (triangulation), - quadrature_pressure (deg+1), - quadrature_velocity (deg+2), - vel_max_its (data.vel_max_iterations), - vel_Krylov_size (data.vel_Krylov_size), - vel_off_diagonals (data.vel_off_diagonals), - vel_update_prec (data.vel_update_prec), - vel_eps (data.vel_eps), - vel_diag_strength (data.vel_diag_strength) + : + type (data.form), + deg (data.pressure_degree), + dt (data.dt), + t_0 (data.initial_time), + T (data.final_time), + Re (data.Reynolds), + vel_exact (data.initial_time), + fe_velocity (deg+1), + fe_pressure (deg), + dof_handler_velocity (triangulation), + dof_handler_pressure (triangulation), + quadrature_pressure (deg+1), + quadrature_velocity (deg+2), + vel_max_its (data.vel_max_iterations), + vel_Krylov_size (data.vel_Krylov_size), + vel_off_diagonals (data.vel_off_diagonals), + vel_update_prec (data.vel_update_prec), + vel_eps (data.vel_eps), + vel_diag_strength (data.vel_diag_strength) { if(deg < 1) std::cout << " WARNING: The chosen pair of finite element spaces is not stable." - << std::endl - << " The obtained results will be nonsense" - << std::endl; + << std::endl + << " The obtained results will be nonsense" + << std::endl; AssertThrow (! ( (dt <= 0.) || (dt > .5*T)), ExcInvalidTimeStep (dt, .5*T)); @@ -730,17 +730,17 @@ namespace Step35 } - // @sect4{ NavierStokesProjection::create_triangulation_and_dofs } + // @sect4{ NavierStokesProjection::create_triangulation_and_dofs } - // The method that creates the - // triangulation and refines it the - // needed number of times. After - // creating the triangulation, it - // creates the mesh dependent data, - // i.e. it distributes degrees of - // freedom and renumbers them, and - // initializes the matrices and - // vectors that we will use. + // The method that creates the + // triangulation and refines it the + // needed number of times. After + // creating the triangulation, it + // creates the mesh dependent data, + // i.e. it distributes degrees of + // freedom and renumbers them, and + // initializes the matrices and + // vectors that we will use. template void NavierStokesProjection:: @@ -757,10 +757,10 @@ namespace Step35 } std::cout << "Number of refines = " << n_refines - << std::endl; + << std::endl; triangulation.refine_global (n_refines); std::cout << "Number of active cells: " << triangulation.n_active_cells() - << std::endl; + << std::endl; boundary_indicators = triangulation.get_boundary_indicators(); @@ -780,29 +780,29 @@ namespace Step35 pres_tmp.reinit (dof_handler_pressure.n_dofs()); for(unsigned int d=0; dNavierStokesProjection::run } + + // This is the time marching + // function, which starting at + // t_0 advances in time + // using the projection method with + // time step dt until + // T. + // + // Its second parameter, verbose + // indicates whether the function should + // output information what it is doing at any + // given moment: for example, it will say + // whether we are working on the diffusion, + // projection substep; updating + // preconditioners etc. Rather than + // implementing this output using code like + // @code + // if (verbose) + // std::cout << "something"; + // @endcode + // we use the ConditionalOStream class to + // do that for us. That class takes an + // output stream and a condition that + // indicates whether the things you pass + // to it should be passed through to the + // given output stream, or should just + // be ignored. This way, above code + // simply becomes + // @code + // verbose_cout << "something"; + // @endcode + // and does the right thing in either + // case. template void NavierStokesProjection::run (const bool verbose, - const unsigned int output_interval) + const unsigned int output_interval) { ConditionalOStream verbose_cout (std::cout, verbose); @@ -1040,25 +1040,25 @@ namespace Step35 output_results(1); for (unsigned int n = 2; n<=n_steps; ++n) { - if (n % output_interval == 0) - { - verbose_cout << "Plotting Solution" << std::endl; - output_results(n); - } - std::cout << "Step = " << n << " Time = " << (n*dt) << std::endl; - verbose_cout << " Interpolating the velocity " << std::endl; - - interpolate_velocity(); - verbose_cout << " Diffusion Step" << std::endl; - if (n % vel_update_prec == 0) - verbose_cout << " With reinitialization of the preconditioner" - << std::endl; - diffusion_step ((n%vel_update_prec == 0) || (n == 2)); - verbose_cout << " Projection Step" << std::endl; - projection_step ( (n == 2)); - verbose_cout << " Updating the Pressure" << std::endl; - update_pressure ( (n == 2)); - vel_exact.advance_time(dt); + if (n % output_interval == 0) + { + verbose_cout << "Plotting Solution" << std::endl; + output_results(n); + } + std::cout << "Step = " << n << " Time = " << (n*dt) << std::endl; + verbose_cout << " Interpolating the velocity " << std::endl; + + interpolate_velocity(); + verbose_cout << " Diffusion Step" << std::endl; + if (n % vel_update_prec == 0) + verbose_cout << " With reinitialization of the preconditioner" + << std::endl; + diffusion_step ((n%vel_update_prec == 0) || (n == 2)); + verbose_cout << " Projection Step" << std::endl; + projection_step ( (n == 2)); + verbose_cout << " Updating the Pressure" << std::endl; + update_pressure ( (n == 2)); + vel_exact.advance_time(dt); } output_results (n_steps); } @@ -1074,28 +1074,28 @@ namespace Step35 } - // @sect4{NavierStokesProjection::diffusion_step} - - // The implementation of a diffusion - // step. Note that the expensive operation is - // the diffusion solve at the end of the - // function, which we have to do once for - // each velocity component. To accellerate - // things a bit, we allow to do this in - // %parallel, using the Threads::new_task - // function which makes sure that the - // dim solves are all taken care - // of and are scheduled to available - // processors: if your machine has more than - // one processor core and no other parts of - // this program are using resources - // currently, then the diffusion solves will - // run in %parallel. On the other hand, if - // your system has only one processor core - // then running things in %parallel would be - // inefficient (since it leads, for example, - // to cache congestion) and things will be - // executed sequentially. + // @sect4{NavierStokesProjection::diffusion_step} + + // The implementation of a diffusion + // step. Note that the expensive operation is + // the diffusion solve at the end of the + // function, which we have to do once for + // each velocity component. To accellerate + // things a bit, we allow to do this in + // %parallel, using the Threads::new_task + // function which makes sure that the + // dim solves are all taken care + // of and are scheduled to available + // processors: if your machine has more than + // one processor core and no other parts of + // this program are using resources + // currently, then the diffusion solves will + // run in %parallel. On the other hand, if + // your system has only one processor core + // then running things in %parallel would be + // inefficient (since it leads, for example, + // to cache congestion) and things will be + // executed sequentially. template void NavierStokesProjection::diffusion_step (const bool reinit_prec) @@ -1106,76 +1106,76 @@ namespace Step35 for (unsigned int d=0; d::const_iterator - boundaries = boundary_indicators.begin(); - boundaries != boundary_indicators.end(); - ++boundaries) - { - switch (*boundaries) - { - case 1: - VectorTools:: - interpolate_boundary_values (dof_handler_velocity, - *boundaries, - ZeroFunction(), - boundary_values); - break; - case 2: - VectorTools:: - interpolate_boundary_values (dof_handler_velocity, - *boundaries, - vel_exact, - boundary_values); - break; - case 3: - if (d != 0) - VectorTools:: - interpolate_boundary_values (dof_handler_velocity, - *boundaries, - ZeroFunction(), - boundary_values); - break; - case 4: - VectorTools:: - interpolate_boundary_values (dof_handler_velocity, - *boundaries, - ZeroFunction(), - boundary_values); - break; - default: - Assert (false, ExcNotImplemented()); - } - } - MatrixTools::apply_boundary_values (boundary_values, - vel_it_matrix[d], - u_n[d], - force[d]); + force[d] = 0.; + v_tmp.equ (2./dt,u_n[d],-.5/dt,u_n_minus_1[d]); + vel_Mass.vmult_add (force[d], v_tmp); + + pres_Diff[d].vmult_add (force[d], pres_tmp); + u_n_minus_1[d] = u_n[d]; + + vel_it_matrix[d].copy_from (vel_Laplace_plus_Mass); + vel_it_matrix[d].add (1., vel_Advection); + + vel_exact.set_component(d); + boundary_values.clear(); + for (std::vector::const_iterator + boundaries = boundary_indicators.begin(); + boundaries != boundary_indicators.end(); + ++boundaries) + { + switch (*boundaries) + { + case 1: + VectorTools:: + interpolate_boundary_values (dof_handler_velocity, + *boundaries, + ZeroFunction(), + boundary_values); + break; + case 2: + VectorTools:: + interpolate_boundary_values (dof_handler_velocity, + *boundaries, + vel_exact, + boundary_values); + break; + case 3: + if (d != 0) + VectorTools:: + interpolate_boundary_values (dof_handler_velocity, + *boundaries, + ZeroFunction(), + boundary_values); + break; + case 4: + VectorTools:: + interpolate_boundary_values (dof_handler_velocity, + *boundaries, + ZeroFunction(), + boundary_values); + break; + default: + Assert (false, ExcNotImplemented()); + } + } + MatrixTools::apply_boundary_values (boundary_values, + vel_it_matrix[d], + u_n[d], + force[d]); } Threads::TaskGroup tasks; for(unsigned int d=0; d:: - AdditionalData (vel_diag_strength, - vel_off_diagonals)); - tasks += Threads::new_task (&NavierStokesProjection:: - diffusion_component_solve, - *this, d); + if (reinit_prec) + prec_velocity[d].initialize (vel_it_matrix[d], + SparseILU:: + AdditionalData (vel_diag_strength, + vel_off_diagonals)); + tasks += Threads::new_task (&NavierStokesProjection:: + diffusion_component_solve, + *this, d); } tasks.join_all(); } @@ -1188,21 +1188,21 @@ namespace Step35 { SolverControl solver_control (vel_max_its, vel_eps*force[d].l2_norm()); SolverGMRES<> gmres (solver_control, - SolverGMRES<>::AdditionalData (vel_Krylov_size)); + SolverGMRES<>::AdditionalData (vel_Krylov_size)); gmres.solve (vel_it_matrix[d], u_n[d], force[d], prec_velocity[d]); } - // @sect4{ The NavierStokesProjection::assemble_advection_term method and related} + // @sect4{ The NavierStokesProjection::assemble_advection_term method and related} - // The following few functions deal with - // assembling the advection terms, which is the part of the - // system matrix for the diffusion step that changes - // at every time step. As mentioned above, we - // will run the assembly loop over all cells - // in %parallel, using the WorkStream class - // and other facilities as described in the - // documentation module on @ref threads. + // The following few functions deal with + // assembling the advection terms, which is the part of the + // system matrix for the diffusion step that changes + // at every time step. As mentioned above, we + // will run the assembly loop over all cells + // in %parallel, using the WorkStream class + // and other facilities as described in the + // documentation module on @ref threads. template void NavierStokesProjection::assemble_advection_term() @@ -1210,15 +1210,15 @@ namespace Step35 vel_Advection = 0.; AdvectionPerTaskData data (fe_velocity.dofs_per_cell); AdvectionScratchData scratch (fe_velocity, quadrature_velocity, - update_values | - update_JxW_values | - update_gradients); + update_values | + update_JxW_values | + update_gradients); WorkStream::run (dof_handler_velocity.begin_active(), - dof_handler_velocity.end(), *this, - &NavierStokesProjection::assemble_one_cell_of_advection, - &NavierStokesProjection::copy_advection_local_to_global, - scratch, - data); + dof_handler_velocity.end(), *this, + &NavierStokesProjection::assemble_one_cell_of_advection, + &NavierStokesProjection::copy_advection_local_to_global, + scratch, + data); } @@ -1227,43 +1227,43 @@ namespace Step35 void NavierStokesProjection:: assemble_one_cell_of_advection(const typename DoFHandler::active_cell_iterator &cell, - AdvectionScratchData &scratch, - AdvectionPerTaskData &data) + AdvectionScratchData &scratch, + AdvectionPerTaskData &data) { scratch.fe_val.reinit(cell); cell->get_dof_indices (data.local_dof_indices); for (unsigned int d=0; dNavierStokesProjection::projection_step} + // @sect4{NavierStokesProjection::projection_step} - // This implements the projection step: + // This implements the projection step: template void NavierStokesProjection::projection_step (const bool reinit_prec) @@ -1300,14 +1300,14 @@ namespace Step35 static std::map bval; if (reinit_prec) VectorTools::interpolate_boundary_values (dof_handler_pressure, 3, - ZeroFunction(), bval); + ZeroFunction(), bval); MatrixTools::apply_boundary_values (bval, pres_iterative, phi_n, pres_tmp); if (reinit_prec) prec_pres_Laplace.initialize(pres_iterative, - SparseILU::AdditionalData (vel_diag_strength, - vel_off_diagonals) ); + SparseILU::AdditionalData (vel_diag_strength, + vel_off_diagonals) ); SolverControl solvercontrol (vel_max_its, vel_eps*pres_tmp.l2_norm()); SolverCG<> cg (solvercontrol); @@ -1317,19 +1317,19 @@ namespace Step35 } - // @sect4{ NavierStokesProjection::update_pressure } + // @sect4{ NavierStokesProjection::update_pressure } - // This is the pressure update step - // of the projection method. It - // implements the standard - // formulation of the method, that is - // @f[ - // p^{n+1} = p^n + \phi^{n+1}, - // @f] - // or the rotational form, which is - // @f[ - // p^{n+1} = p^n + \phi^{n+1} - \frac{1}{Re} \nabla\cdot u^{n+1}. - // @f] + // This is the pressure update step + // of the projection method. It + // implements the standard + // formulation of the method, that is + // @f[ + // p^{n+1} = p^n + \phi^{n+1}, + // @f] + // or the rotational form, which is + // @f[ + // p^{n+1} = p^n + \phi^{n+1} - \frac{1}{Re} \nabla\cdot u^{n+1}. + // @f] template void NavierStokesProjection::update_pressure (const bool reinit_prec) @@ -1337,69 +1337,69 @@ namespace Step35 pres_n_minus_1 = pres_n; switch (type) { - case RunTimeParameters::METHOD_STANDARD: - pres_n += phi_n; - break; - case RunTimeParameters::METHOD_ROTATIONAL: - if (reinit_prec) - prec_mass.initialize (pres_Mass); - pres_n = pres_tmp; - prec_mass.solve (pres_n); - pres_n.sadd(1./Re, 1., pres_n_minus_1, 1., phi_n); - break; - default: - Assert (false, ExcNotImplemented()); + case RunTimeParameters::METHOD_STANDARD: + pres_n += phi_n; + break; + case RunTimeParameters::METHOD_ROTATIONAL: + if (reinit_prec) + prec_mass.initialize (pres_Mass); + pres_n = pres_tmp; + prec_mass.solve (pres_n); + pres_n.sadd(1./Re, 1., pres_n_minus_1, 1., phi_n); + break; + default: + Assert (false, ExcNotImplemented()); }; } - // @sect4{ NavierStokesProjection::output_results } - - // This method plots the current - // solution. The main difficulty is that we - // want to create a single output file that - // contains the data for all velocity - // components, the pressure, and also the - // vorticity of the flow. On the other hand, - // velocities and the pressure live on - // separate DoFHandler objects, and so can't - // be written to the same file using a single - // DataOut object. As a consequence, we have - // to work a bit harder to get the various - // pieces of data into a single DoFHandler - // object, and then use that to drive - // graphical output. - // - // We will not elaborate on this process - // here, but rather refer to step-31 and - // step-32, where a similar procedure is used - // (and is documented) to create a joint - // DoFHandler object for all variables. - // - // Let us also note that we here compute the - // vorticity as a scalar quantity in a - // separate function, using the $L^2$ - // projection of the quantity $\text{curl} u$ - // onto the finite element space used for the - // components of the velocity. In principle, - // however, we could also have computed as a - // pointwise quantity from the velocity, and - // do so through the DataPostprocessor - // mechanism discussed in step-29 and - // step-33. + // @sect4{ NavierStokesProjection::output_results } + + // This method plots the current + // solution. The main difficulty is that we + // want to create a single output file that + // contains the data for all velocity + // components, the pressure, and also the + // vorticity of the flow. On the other hand, + // velocities and the pressure live on + // separate DoFHandler objects, and so can't + // be written to the same file using a single + // DataOut object. As a consequence, we have + // to work a bit harder to get the various + // pieces of data into a single DoFHandler + // object, and then use that to drive + // graphical output. + // + // We will not elaborate on this process + // here, but rather refer to step-31 and + // step-32, where a similar procedure is used + // (and is documented) to create a joint + // DoFHandler object for all variables. + // + // Let us also note that we here compute the + // vorticity as a scalar quantity in a + // separate function, using the $L^2$ + // projection of the quantity $\text{curl} u$ + // onto the finite element space used for the + // components of the velocity. In principle, + // however, we could also have computed as a + // pointwise quantity from the velocity, and + // do so through the DataPostprocessor + // mechanism discussed in step-29 and + // step-33. template void NavierStokesProjection::output_results (const unsigned int step) { assemble_vorticity ( (step == 1)); const FESystem joint_fe (fe_velocity, dim, - fe_pressure, 1, - fe_velocity, 1); + fe_pressure, 1, + fe_velocity, 1); DoFHandler joint_dof_handler (triangulation); joint_dof_handler.distribute_dofs (joint_fe); Assert (joint_dof_handler.n_dofs() == - ((dim + 1)*dof_handler_velocity.n_dofs() + - dof_handler_pressure.n_dofs()), - ExcInternalError()); + ((dim + 1)*dof_handler_velocity.n_dofs() + + dof_handler_pressure.n_dofs()), + ExcInternalError()); static Vector joint_solution (joint_dof_handler.n_dofs()); std::vector loc_joint_dof_indices (joint_fe.dofs_per_cell), loc_vel_dof_indices (fe_velocity.dofs_per_cell), @@ -1411,34 +1411,34 @@ namespace Step35 pres_cell = dof_handler_pressure.begin_active(); for (; joint_cell != joint_endc; ++joint_cell, ++vel_cell, ++pres_cell) { - joint_cell->get_dof_indices (loc_joint_dof_indices); - vel_cell->get_dof_indices (loc_vel_dof_indices), - pres_cell->get_dof_indices (loc_pres_dof_indices); - for (unsigned int i=0; iget_dof_indices (loc_joint_dof_indices); + vel_cell->get_dof_indices (loc_vel_dof_indices), + pres_cell->get_dof_indices (loc_pres_dof_indices); + for (unsigned int i=0; i joint_solution_names (dim, "v"); joint_solution_names.push_back ("p"); @@ -1447,38 +1447,38 @@ namespace Step35 data_out.attach_dof_handler (joint_dof_handler); std::vector< DataComponentInterpretation::DataComponentInterpretation > component_interpretation (dim+2, - DataComponentInterpretation::component_is_part_of_vector); + DataComponentInterpretation::component_is_part_of_vector); component_interpretation[dim] = DataComponentInterpretation::component_is_scalar; component_interpretation[dim+1] = DataComponentInterpretation::component_is_scalar; data_out.add_data_vector (joint_solution, - joint_solution_names, - DataOut::type_dof_data, - component_interpretation); + joint_solution_names, + DataOut::type_dof_data, + component_interpretation); data_out.build_patches (deg + 1); std::ofstream output (("solution-" + - Utilities::int_to_string (step, 5) + - ".vtk").c_str()); + Utilities::int_to_string (step, 5) + + ".vtk").c_str()); data_out.write_vtk (output); } - // Following is the helper function that - // computes the vorticity by projecting the - // term $\text{curl} u$ onto the finite - // element space used for the components of - // the velocity. The function is only called - // whenever we generate graphical output, so - // not very often, and as a consequence we - // didn't bother parallelizing it using the - // WorkStream concept as we do for the other - // assembly functions. That should not be - // overly complicated, however, if - // needed. Moreover, the implementation that - // we have here only works for 2d, so we bail - // if that is not the case. + // Following is the helper function that + // computes the vorticity by projecting the + // term $\text{curl} u$ onto the finite + // element space used for the components of + // the velocity. The function is only called + // whenever we generate graphical output, so + // not very often, and as a consequence we + // didn't bother parallelizing it using the + // WorkStream concept as we do for the other + // assembly functions. That should not be + // overly complicated, however, if + // needed. Moreover, the implementation that + // we have here only works for 2d, so we bail + // if that is not the case. template void NavierStokesProjection::assemble_vorticity (const bool reinit_prec) { @@ -1487,11 +1487,11 @@ namespace Step35 prec_vel_mass.initialize (vel_Mass); FEValues fe_val_vel (fe_velocity, quadrature_velocity, - update_gradients | - update_JxW_values | - update_values); + update_gradients | + update_JxW_values | + update_values); const unsigned int dpc = fe_velocity.dofs_per_cell, - nqp = quadrature_velocity.size(); + nqp = quadrature_velocity.size(); std::vector ldi (dpc); Vector loc_rot (dpc); @@ -1503,19 +1503,19 @@ namespace Step35 end = dof_handler_velocity.end(); for (; cell != end; ++cell) { - fe_val_vel.reinit (cell); - cell->get_dof_indices (ldi); - fe_val_vel.get_function_gradients (u_n[0], grad_u1); - fe_val_vel.get_function_gradients (u_n[1], grad_u2); - loc_rot = 0.; - for (unsigned int q=0; qget_dof_indices (ldi); + fe_val_vel.get_function_gradients (u_n[0], grad_u1); + fe_val_vel.get_function_gradients (u_n[1], grad_u2); + loc_rot = 0.; + for (unsigned int q=0; q #include #include @@ -38,35 +38,35 @@ #include #include - // PETSc appears here because SLEPc - // depends on this library: + // PETSc appears here because SLEPc + // depends on this library: #include #include - // And then we need to actually - // import the interfaces for solvers - // that SLEPc provides: + // And then we need to actually + // import the interfaces for solvers + // that SLEPc provides: #include - // We also need some standard C++: + // We also need some standard C++: #include #include - // Finally, as in previous programs, we - // import all the deal.II class and function - // names into the namespace into which - // everything in this program will go: + // Finally, as in previous programs, we + // import all the deal.II class and function + // names into the namespace into which + // everything in this program will go: namespace Step36 { using namespace dealii; - // @sect3{The EigenvalueProblem class template} + // @sect3{The EigenvalueProblem class template} - // Following is the class declaration - // for the main class template. It - // looks pretty much exactly like - // what has already been shown in - // step-4: + // Following is the class declaration + // for the main class template. It + // looks pretty much exactly like + // what has already been shown in + // step-4: template class EigenvalueProblem { @@ -84,139 +84,139 @@ namespace Step36 FE_Q fe; DoFHandler dof_handler; - // With these exceptions: For our - // eigenvalue problem, we need - // both a stiffness matrix for - // the left hand side as well as - // a mass matrix for the right - // hand side. We also need not - // just one solution function, - // but a whole set of these for - // the eigenfunctions we want to - // compute, along with the - // corresponding eigenvalues: + // With these exceptions: For our + // eigenvalue problem, we need + // both a stiffness matrix for + // the left hand side as well as + // a mass matrix for the right + // hand side. We also need not + // just one solution function, + // but a whole set of these for + // the eigenfunctions we want to + // compute, along with the + // corresponding eigenvalues: PETScWrappers::SparseMatrix stiffness_matrix, mass_matrix; std::vector eigenfunctions; std::vector eigenvalues; - // And then we need an object - // that will store several - // run-time parameters that we - // will specify in an input file: + // And then we need an object + // that will store several + // run-time parameters that we + // will specify in an input file: ParameterHandler parameters; - // Finally, we will have an - // object that contains - // "constraints" on our degrees - // of freedom. This could include - // hanging node constraints if we - // had adaptively refined meshes - // (which we don't have in the - // current program). Here, we - // will store the constraints for - // boundary nodes $U_i=0$. + // Finally, we will have an + // object that contains + // "constraints" on our degrees + // of freedom. This could include + // hanging node constraints if we + // had adaptively refined meshes + // (which we don't have in the + // current program). Here, we + // will store the constraints for + // boundary nodes $U_i=0$. ConstraintMatrix constraints; }; - // @sect3{Implementation of the EigenvalueProblem class} + // @sect3{Implementation of the EigenvalueProblem class} - // @sect4{EigenvalueProblem::EigenvalueProblem} + // @sect4{EigenvalueProblem::EigenvalueProblem} - // First up, the constructor. The - // main new part is handling the - // run-time input parameters. We need - // to declare their existence first, - // and then read their values from - // the input file whose name is - // specified as an argument to this - // function: + // First up, the constructor. The + // main new part is handling the + // run-time input parameters. We need + // to declare their existence first, + // and then read their values from + // the input file whose name is + // specified as an argument to this + // function: template EigenvalueProblem::EigenvalueProblem (const std::string &prm_file) - : - fe (1), - dof_handler (triangulation) + : + fe (1), + dof_handler (triangulation) { parameters.declare_entry ("Global mesh refinement steps", "5", - Patterns::Integer (0, 20), - "The number of times the 1-cell coarse mesh should " - "be refined globally for our computations."); + Patterns::Integer (0, 20), + "The number of times the 1-cell coarse mesh should " + "be refined globally for our computations."); parameters.declare_entry ("Number of eigenvalues/eigenfunctions", "5", - Patterns::Integer (0, 100), - "The number of eigenvalues/eigenfunctions " - "to be computed."); + Patterns::Integer (0, 100), + "The number of eigenvalues/eigenfunctions " + "to be computed."); parameters.declare_entry ("Potential", "0", - Patterns::Anything(), - "A functional description of the potential."); + Patterns::Anything(), + "A functional description of the potential."); parameters.read_input (prm_file); } - // @sect4{EigenvalueProblem::make_grid_and_dofs} - - // The next function creates a mesh - // on the domain $[-1,1]^d$, refines - // it as many times as the input file - // calls for, and then attaches a - // DoFHandler to it and initializes - // the matrices and vectors to their - // correct sizes. We also build the - // constraints that correspond to the - // boundary values - // $u|_{\partial\Omega}=0$. - // - // For the matrices, we use the PETSc - // wrappers. These have the ability - // to allocate memory as necessary as - // non-zero entries are added. This - // seems inefficient: we could as - // well first compute the sparsity - // pattern, initialize the matrices - // with it, and as we then insert - // entries we can be sure that we do - // not need to re-allocate memory and - // free the one used previously. One - // way to do that would be to use - // code like this: - // @code - // CompressedSimpleSparsityPattern - // csp (dof_handler.n_dofs(), - // dof_handler.n_dofs()); - // DoFTools::make_sparsity_pattern (dof_handler, csp); - // csp.compress (); - // stiffness_matrix.reinit (csp); - // mass_matrix.reinit (csp); - // @endcode - // instead of the two - // reinit() calls for - // the stiffness and mass matrices - // below. - // - // This doesn't quite work, - // unfortunately. The code above may - // lead to a few entries in the - // non-zero pattern to which we only - // ever write zero entries; most - // notably, this holds true for - // off-diagonal entries for those - // rows and columns that belong to - // boundary nodes. This shouldn't be - // a problem, but for whatever - // reason, PETSc's ILU - // preconditioner, which we use to - // solve linear systems in the - // eigenvalue solver, doesn't like - // these extra entries and aborts - // with an error message. - // - // In the absense of any obvious way - // to avoid this, we simply settle - // for the second best option, which - // is have PETSc allocate memory as - // necessary. That said, since this - // is not a time critical part, this - // whole affair is of no further - // importance. + // @sect4{EigenvalueProblem::make_grid_and_dofs} + + // The next function creates a mesh + // on the domain $[-1,1]^d$, refines + // it as many times as the input file + // calls for, and then attaches a + // DoFHandler to it and initializes + // the matrices and vectors to their + // correct sizes. We also build the + // constraints that correspond to the + // boundary values + // $u|_{\partial\Omega}=0$. + // + // For the matrices, we use the PETSc + // wrappers. These have the ability + // to allocate memory as necessary as + // non-zero entries are added. This + // seems inefficient: we could as + // well first compute the sparsity + // pattern, initialize the matrices + // with it, and as we then insert + // entries we can be sure that we do + // not need to re-allocate memory and + // free the one used previously. One + // way to do that would be to use + // code like this: + // @code + // CompressedSimpleSparsityPattern + // csp (dof_handler.n_dofs(), + // dof_handler.n_dofs()); + // DoFTools::make_sparsity_pattern (dof_handler, csp); + // csp.compress (); + // stiffness_matrix.reinit (csp); + // mass_matrix.reinit (csp); + // @endcode + // instead of the two + // reinit() calls for + // the stiffness and mass matrices + // below. + // + // This doesn't quite work, + // unfortunately. The code above may + // lead to a few entries in the + // non-zero pattern to which we only + // ever write zero entries; most + // notably, this holds true for + // off-diagonal entries for those + // rows and columns that belong to + // boundary nodes. This shouldn't be + // a problem, but for whatever + // reason, PETSc's ILU + // preconditioner, which we use to + // solve linear systems in the + // eigenvalue solver, doesn't like + // these extra entries and aborts + // with an error message. + // + // In the absense of any obvious way + // to avoid this, we simply settle + // for the second best option, which + // is have PETSc allocate memory as + // necessary. That said, since this + // is not a time critical part, this + // whole affair is of no further + // importance. template void EigenvalueProblem::make_grid_and_dofs () { @@ -228,20 +228,20 @@ namespace Step36 constraints.close (); stiffness_matrix.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); mass_matrix.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); - - // The next step is to take care of - // the eigenspectrum. In this case, - // the outputs are eigenvalues and - // eigenfunctions, so we set the - // size of the list of - // eigenfunctions and eigenvalues - // to be as large as we asked for - // in the input file: + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); + + // The next step is to take care of + // the eigenspectrum. In this case, + // the outputs are eigenvalues and + // eigenfunctions, so we set the + // size of the list of + // eigenfunctions and eigenvalues + // to be as large as we asked for + // in the input file: eigenfunctions .resize (parameters.get_integer ("Number of eigenvalues/eigenfunctions")); for (unsigned int i=0; i void EigenvalueProblem::assemble_system () { QGauss quadrature_formula(2); FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -297,8 +297,8 @@ namespace Step36 FunctionParser potential; potential.initialize (FunctionParser::default_variable_names (), - parameters.get ("Potential"), - typename FunctionParser::ConstMap()); + parameters.get ("Potential"), + typename FunctionParser::ConstMap()); std::vector potential_values (n_q_points); @@ -308,172 +308,172 @@ namespace Step36 endc = dof_handler.end (); for (; cell!=endc; ++cell) { - fe_values.reinit (cell); - cell_stiffness_matrix = 0; - cell_mass_matrix = 0; - - potential.value_list (fe_values.get_quadrature_points(), - potential_values); - - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - - constraints - .distribute_local_to_global (cell_stiffness_matrix, - local_dof_indices, - stiffness_matrix); - constraints - .distribute_local_to_global (cell_mass_matrix, - local_dof_indices, - mass_matrix); + fe_values.reinit (cell); + cell_stiffness_matrix = 0; + cell_mass_matrix = 0; + + potential.value_list (fe_values.get_quadrature_points(), + potential_values); + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + + constraints + .distribute_local_to_global (cell_stiffness_matrix, + local_dof_indices, + stiffness_matrix); + constraints + .distribute_local_to_global (cell_mass_matrix, + local_dof_indices, + mass_matrix); } - // At the end of the function, we - // tell PETSc that the matrices - // have now been fully assembled - // and that the sparse matrix - // representation can now be - // compressed as no more entries - // will be added: + // At the end of the function, we + // tell PETSc that the matrices + // have now been fully assembled + // and that the sparse matrix + // representation can now be + // compressed as no more entries + // will be added: stiffness_matrix.compress (); mass_matrix.compress (); } - // @sect4{EigenvalueProblem::solve} - - // This is the key new functionality - // of the program. Now that the - // system is set up, here is a good - // time to actually solve the - // problem: As with other examples - // this is done using a "solve" - // routine. Essentially, it works as - // in other programs: you set up a - // SolverControl object that - // describes the accuracy to which we - // want to solve the linear systems, - // and then we select the kind of - // solver we want. Here we choose the - // Krylov-Schur solver of SLEPc, a - // pretty fast and robust choice for - // this kind of problem: + // @sect4{EigenvalueProblem::solve} + + // This is the key new functionality + // of the program. Now that the + // system is set up, here is a good + // time to actually solve the + // problem: As with other examples + // this is done using a "solve" + // routine. Essentially, it works as + // in other programs: you set up a + // SolverControl object that + // describes the accuracy to which we + // want to solve the linear systems, + // and then we select the kind of + // solver we want. Here we choose the + // Krylov-Schur solver of SLEPc, a + // pretty fast and robust choice for + // this kind of problem: template void EigenvalueProblem::solve () { - // We start here, as we normally do, - // by assigning convergence control - // we want: + // We start here, as we normally do, + // by assigning convergence control + // we want: SolverControl solver_control (dof_handler.n_dofs(), 1e-9); SLEPcWrappers::SolverKrylovSchur eigensolver (solver_control); - // Before we actually solve for the - // eigenfunctions and -values, we - // have to also select which set of - // eigenvalues to solve for. Lets - // select those eigenvalues and - // corresponding eigenfunctions - // with the smallest real part (in - // fact, the problem we solve here - // is symmetric and so the - // eigenvalues are purely - // real). After that, we can - // actually let SLEPc do its work: + // Before we actually solve for the + // eigenfunctions and -values, we + // have to also select which set of + // eigenvalues to solve for. Lets + // select those eigenvalues and + // corresponding eigenfunctions + // with the smallest real part (in + // fact, the problem we solve here + // is symmetric and so the + // eigenvalues are purely + // real). After that, we can + // actually let SLEPc do its work: eigensolver.set_which_eigenpairs (EPS_SMALLEST_REAL); eigensolver.solve (stiffness_matrix, mass_matrix, - eigenvalues, eigenfunctions, - eigenfunctions.size()); - - // The output of the call above is - // a set of vectors and values. In - // eigenvalue problems, the - // eigenfunctions are only - // determined up to a constant that - // can be fixed pretty - // arbitrarily. Knowing nothing - // about the origin of the - // eigenvalue problem, SLEPc has no - // other choice than to normalize - // the eigenvectors to one in the - // $l_2$ (vector) - // norm. Unfortunately this norm - // has little to do with any norm - // we may be interested from a - // eigenfunction perspective: the - // $L_2(\Omega)$ norm, or maybe the - // $L_\infty(\Omega)$ norm. - // - // Let us choose the latter and - // rescale eigenfunctions so that - // they have $\|\phi_i(\mathbf - // x)\|_{L^\infty(\Omega)}=1$ - // instead of $\|\Phi\|_{l_2}=1$ - // (where $\phi_i$ is the $i$th - // eigenfunction and - // $\Phi_i$ the corresponding - // vector of nodal values). For the - // $Q_1$ elements chosen here, we - // know that the maximum of the - // function $\phi_i(\mathbf x)$ is - // attained at one of the nodes, so - // $\max_{\mathbf x}\phi_i(\mathbf - // x)=\max_j (\Phi_i)_j$, making - // the normalization in the - // $L_\infty$ norm trivial. Note - // that this doesn't work as easily - // if we had chosen $Q_k$ elements - // with $k>1$: there, the maximum - // of a function does not - // necessarily have to be attained - // at a node, and so $\max_{\mathbf - // x}\phi_i(\mathbf x)\ge\max_j - // (\Phi_i)_j$ (although the - // equality is usually nearly - // true). + eigenvalues, eigenfunctions, + eigenfunctions.size()); + + // The output of the call above is + // a set of vectors and values. In + // eigenvalue problems, the + // eigenfunctions are only + // determined up to a constant that + // can be fixed pretty + // arbitrarily. Knowing nothing + // about the origin of the + // eigenvalue problem, SLEPc has no + // other choice than to normalize + // the eigenvectors to one in the + // $l_2$ (vector) + // norm. Unfortunately this norm + // has little to do with any norm + // we may be interested from a + // eigenfunction perspective: the + // $L_2(\Omega)$ norm, or maybe the + // $L_\infty(\Omega)$ norm. + // + // Let us choose the latter and + // rescale eigenfunctions so that + // they have $\|\phi_i(\mathbf + // x)\|_{L^\infty(\Omega)}=1$ + // instead of $\|\Phi\|_{l_2}=1$ + // (where $\phi_i$ is the $i$th + // eigenfunction and + // $\Phi_i$ the corresponding + // vector of nodal values). For the + // $Q_1$ elements chosen here, we + // know that the maximum of the + // function $\phi_i(\mathbf x)$ is + // attained at one of the nodes, so + // $\max_{\mathbf x}\phi_i(\mathbf + // x)=\max_j (\Phi_i)_j$, making + // the normalization in the + // $L_\infty$ norm trivial. Note + // that this doesn't work as easily + // if we had chosen $Q_k$ elements + // with $k>1$: there, the maximum + // of a function does not + // necessarily have to be attained + // at a node, and so $\max_{\mathbf + // x}\phi_i(\mathbf x)\ge\max_j + // (\Phi_i)_j$ (although the + // equality is usually nearly + // true). for (unsigned int i=0; i void EigenvalueProblem::output_results () const { @@ -483,31 +483,31 @@ namespace Step36 for (unsigned int i=0; i projected_potential (dof_handler.n_dofs()); { FunctionParser potential; potential.initialize (FunctionParser::default_variable_names (), - parameters.get ("Potential"), - typename FunctionParser::ConstMap()); + parameters.get ("Potential"), + typename FunctionParser::ConstMap()); VectorTools::interpolate (dof_handler, potential, projected_potential); } data_out.add_data_vector (projected_potential, "interpolated_potential"); @@ -519,24 +519,24 @@ namespace Step36 } - // @sect4{EigenvalueProblem::run} + // @sect4{EigenvalueProblem::run} - // This is the function which has the - // top-level control over - // everything. It is almost exactly - // the same as in step-4: + // This is the function which has the + // top-level control over + // everything. It is almost exactly + // the same as in step-4: template void EigenvalueProblem::run () { make_grid_and_dofs (); std::cout << " Number of active cells: " - << triangulation.n_active_cells () - << std::endl - << " Number of degrees of freedom: " - << dof_handler.n_dofs () - << std::endl - << std::endl; + << triangulation.n_active_cells () + << std::endl + << " Number of degrees of freedom: " + << dof_handler.n_dofs () + << std::endl + << std::endl; assemble_system (); solve (); @@ -544,8 +544,8 @@ namespace Step36 for (unsigned int i=0; i problem ("step-36.prm"); - problem.run (); + EigenvalueProblem<2> problem ("step-36.prm"); + problem.run (); } SlepcFinalize (); } - // All the while, we are watching - // out if any exceptions should - // have been generated. If that is - // so, we panic... + // All the while, we are watching + // out if any exceptions should + // have been generated. If that is + // so, we panic... catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } - // If no exceptions are thrown, - // then we tell the program to stop - // monkeying around and exit - // nicely: + // If no exceptions are thrown, + // then we tell the program to stop + // monkeying around and exit + // nicely: std::cout << std::endl - << "Job done." - << std::endl; + << "Job done." + << std::endl; return 0; } diff --git a/deal.II/examples/step-37/step-37.cc b/deal.II/examples/step-37/step-37.cc index de83d5bda0..dd638c7f18 100644 --- a/deal.II/examples/step-37/step-37.cc +++ b/deal.II/examples/step-37/step-37.cc @@ -10,8 +10,8 @@ /* further information on this license. */ - // To start with the include files are more - // or less the same as in step-16: + // To start with the include files are more + // or less the same as in step-16: #include #include #include @@ -50,16 +50,16 @@ using namespace dealii; - // @sect3{Equation data} + // @sect3{Equation data} - // We define a variable coefficient function - // for the Poisson problem. It is similar to - // the function in step-5 but we use the form - // $a(\mathbf x)=\frac{1}{0.1 + \|\bf x\|^2}$ - // instead of a discontinuous one. It is - // merely to demonstrate the possibilities of - // this implementation, rather than making - // much sense physically. + // We define a variable coefficient function + // for the Poisson problem. It is similar to + // the function in step-5 but we use the form + // $a(\mathbf x)=\frac{1}{0.1 + \|\bf x\|^2}$ + // instead of a discontinuous one. It is + // merely to demonstrate the possibilities of + // this implementation, rather than making + // much sense physically. template class Coefficient : public Function { @@ -67,18 +67,18 @@ class Coefficient : public Function Coefficient () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void value_list (const std::vector > &points, - std::vector &values, - const unsigned int component = 0) const; + std::vector &values, + const unsigned int component = 0) const; }; template double Coefficient::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { return 1./(0.1+p.square()); } @@ -87,13 +87,13 @@ double Coefficient::value (const Point &p, template void Coefficient::value_list (const std::vector > &points, - std::vector &values, - const unsigned int component) const + std::vector &values, + const unsigned int component) const { Assert (values.size() == points.size(), - ExcDimensionMismatch (values.size(), points.size())); + ExcDimensionMismatch (values.size(), points.size())); Assert (component == 0, - ExcIndexRange (component, 0, 1)); + ExcIndexRange (component, 0, 1)); const unsigned int n_points = points.size(); @@ -103,37 +103,37 @@ void Coefficient::value_list (const std::vector > &points, - // @sect3{Matrix-free implementation} - - // In this program, we want to make - // use of the ability of deal.II to - // runs things in %parallel if compute - // resources are available. We will - // follow the general framework laid - // out in the @ref threads module and - // use the WorkStream class to do - // operations on the range of all - // cells. - // - // To this end, we first have to have - // a few declarations that we use for - // defining the %parallel layout of - // the vector multiplication function - // with the WorkStream concept in the - // Matrix-free class. These comprise - // so-called scratch data that we use - // for calculating cell-related - // information, and copy data that is - // eventually used in a separate - // function for writing local data - // into the global vector. The reason - // for this split-up definition is - // that many threads at a time can - // execute the local multiplications - // (and filling up the copy data), - // but than that copy data needs to - // be worked on by one process at a - // time. + // @sect3{Matrix-free implementation} + + // In this program, we want to make + // use of the ability of deal.II to + // runs things in %parallel if compute + // resources are available. We will + // follow the general framework laid + // out in the @ref threads module and + // use the WorkStream class to do + // operations on the range of all + // cells. + // + // To this end, we first have to have + // a few declarations that we use for + // defining the %parallel layout of + // the vector multiplication function + // with the WorkStream concept in the + // Matrix-free class. These comprise + // so-called scratch data that we use + // for calculating cell-related + // information, and copy data that is + // eventually used in a separate + // function for writing local data + // into the global vector. The reason + // for this split-up definition is + // that many threads at a time can + // execute the local multiplications + // (and filling up the copy data), + // but than that copy data needs to + // be worked on by one process at a + // time. namespace WorkStreamData { template @@ -146,14 +146,14 @@ namespace WorkStreamData template ScratchData::ScratchData () - : - solutions () + : + solutions () {} template ScratchData::ScratchData (const ScratchData &) - : - solutions () + : + solutions () {} template @@ -167,49 +167,49 @@ namespace WorkStreamData template CopyData::CopyData () - : - ScratchData () + : + ScratchData () {} template CopyData::CopyData (const CopyData &) - : - ScratchData () + : + ScratchData () {} } - // Next comes the implementation of the - // matrix-free class. It provides some - // standard information we expect for - // matrices (like returning the dimensions - // of the matrix), it implements - // matrix-vector multiplications in several - // forms, and it provides functions for - // filling the matrix with data. - // - // We choose to make this class generic, - // i.e., we do not implement the actual - // differential operator (here: Laplace - // operator) directly in this class. We - // instead let the actual transformation - // (which happens on the level of quadrature - // points, see the discussion in the - // introduction) be a template parameter that - // is implemented by another class. We then - // only have to store a list of these objects - // for each quadrature point on each cell in - // a big list – we choose a - // Table<2,Transformation> data - // format) – and call a transform - // command of the @p Transformation - // class. This template magic makes it easy - // to reuse this MatrixFree class for other - // problems that are based on a symmetric - // operation without the need for substantial - // changes. + // Next comes the implementation of the + // matrix-free class. It provides some + // standard information we expect for + // matrices (like returning the dimensions + // of the matrix), it implements + // matrix-vector multiplications in several + // forms, and it provides functions for + // filling the matrix with data. + // + // We choose to make this class generic, + // i.e., we do not implement the actual + // differential operator (here: Laplace + // operator) directly in this class. We + // instead let the actual transformation + // (which happens on the level of quadrature + // points, see the discussion in the + // introduction) be a template parameter that + // is implemented by another class. We then + // only have to store a list of these objects + // for each quadrature point on each cell in + // a big list – we choose a + // Table<2,Transformation> data + // format) – and call a transform + // command of the @p Transformation + // class. This template magic makes it easy + // to reuse this MatrixFree class for other + // problems that are based on a symmetric + // operation without the need for substantial + // changes. template class MatrixFree : public Subscriptor { @@ -217,9 +217,9 @@ class MatrixFree : public Subscriptor MatrixFree (); void reinit (const unsigned int n_dofs, - const unsigned int n_cells, - const FullMatrix &cell_matrix, - const unsigned int n_points_per_cell); + const unsigned int n_cells, + const FullMatrix &cell_matrix, + const unsigned int n_points_per_cell); void clear(); unsigned int m () const; @@ -227,59 +227,59 @@ class MatrixFree : public Subscriptor ConstraintMatrix & get_constraints (); void set_local_dof_indices (const unsigned int cell_no, - const std::vector &local_dof_indices); + const std::vector &local_dof_indices); void set_derivative_data (const unsigned int cell_no, - const unsigned int quad_point, - const Transformation &trans_in); + const unsigned int quad_point, + const Transformation &trans_in); template void vmult (Vector &dst, - const Vector &src) const; + const Vector &src) const; template void Tvmult (Vector &dst, - const Vector &src) const; + const Vector &src) const; template void vmult_add (Vector &dst, - const Vector &src) const; + const Vector &src) const; template void Tvmult_add (Vector &dst, - const Vector &src) const; + const Vector &src) const; number el (const unsigned int row, - const unsigned int col) const; + const unsigned int col) const; void calculate_diagonal () const; std::size_t memory_consumption () const; - // The private member variables of the - // @p MatrixFree class are a - // small matrix that does the - // transformation from solution values to - // quadrature points, a list with the - // mapping between local degrees of freedom - // and global degrees of freedom for each - // cell (stored as a two-dimensional array, - // where each row corresponds to one - // cell, and the columns within individual - // cells are the local degrees of freedom), - // the transformation variable for - // implementing derivatives, a constraint - // matrix for handling boundary conditions - // as well as a few other variables that - // store matrix properties. + // The private member variables of the + // @p MatrixFree class are a + // small matrix that does the + // transformation from solution values to + // quadrature points, a list with the + // mapping between local degrees of freedom + // and global degrees of freedom for each + // cell (stored as a two-dimensional array, + // where each row corresponds to one + // cell, and the columns within individual + // cells are the local degrees of freedom), + // the transformation variable for + // implementing derivatives, a constraint + // matrix for handling boundary conditions + // as well as a few other variables that + // store matrix properties. private: typedef std::vector >::const_iterator CellChunkIterator; template void local_vmult (CellChunkIterator cell_range, - WorkStreamData::ScratchData &scratch, - WorkStreamData::CopyData ©, - const Vector &src) const; + WorkStreamData::ScratchData &scratch, + WorkStreamData::CopyData ©, + const Vector &src) const; template void copy_local_to_global (const WorkStreamData::CopyData ©, - Vector &dst) const; + Vector &dst) const; FullMatrix B_ref_cell; Table<2,unsigned int> indices_local_to_global; @@ -292,41 +292,41 @@ class MatrixFree : public Subscriptor struct MatrixSizes { - unsigned int n_dofs, n_cells; - unsigned int m, n; - unsigned int n_points, n_comp; - std::vector > chunks; + unsigned int n_dofs, n_cells; + unsigned int m, n; + unsigned int n_points, n_comp; + std::vector > chunks; } matrix_sizes; }; - // This is the constructor of the @p - // MatrixFree class. All it does is to - // subscribe to the general deal.II @p - // Subscriptor scheme that makes sure that we - // do not delete an object of this class as - // long as it used somewhere else, e.g. in a - // preconditioner. + // This is the constructor of the @p + // MatrixFree class. All it does is to + // subscribe to the general deal.II @p + // Subscriptor scheme that makes sure that we + // do not delete an object of this class as + // long as it used somewhere else, e.g. in a + // preconditioner. template MatrixFree::MatrixFree () - : - Subscriptor() + : + Subscriptor() {} - // The next functions return the - // number of rows and columns of the - // global matrix (i.e. the dimensions - // of the operator this class - // represents, the point of this - // tutorial program was, after all, - // that we don't actually store the - // elements of the rows and columns - // of this operator). Since the - // matrix is square, the returned - // numbers are the same. + // The next functions return the + // number of rows and columns of the + // global matrix (i.e. the dimensions + // of the operator this class + // represents, the point of this + // tutorial program was, after all, + // that we don't actually store the + // elements of the rows and columns + // of this operator). Since the + // matrix is square, the returned + // numbers are the same. template unsigned int MatrixFree::m () const @@ -345,11 +345,11 @@ MatrixFree::n () const - // One more function that just returns an - // %internal variable. Note that the user - // will need to change this variable, so it - // returns a non-constant reference to the - // ConstraintMatrix. + // One more function that just returns an + // %internal variable. Note that the user + // will need to change this variable, so it + // returns a non-constant reference to the + // ConstraintMatrix. template ConstraintMatrix & MatrixFree::get_constraints () @@ -359,28 +359,28 @@ MatrixFree::get_constraints () - // The following function takes a vector of - // local dof indices on cell level and writes - // the data into the - // @p indices_local_to_global field - // in order to have fast access to it. It - // performs a few sanity checks like whether - // the sizes in the matrix are set - // correctly. One tiny thing: Whenever we - // enter this function, we probably make some - // modification to the matrix. This means - // that the diagonal of the matrix, which we - // might have computed to have fast access to - // those elements, is invalidated. We set the - // respective flag to @p false. + // The following function takes a vector of + // local dof indices on cell level and writes + // the data into the + // @p indices_local_to_global field + // in order to have fast access to it. It + // performs a few sanity checks like whether + // the sizes in the matrix are set + // correctly. One tiny thing: Whenever we + // enter this function, we probably make some + // modification to the matrix. This means + // that the diagonal of the matrix, which we + // might have computed to have fast access to + // those elements, is invalidated. We set the + // respective flag to @p false. template void MatrixFree:: set_local_dof_indices (const unsigned int cell_no, - const std::vector &local_dof_indices) + const std::vector &local_dof_indices) { Assert (local_dof_indices.size() == matrix_sizes.m, - ExcDimensionMismatch(local_dof_indices.size(), - matrix_sizes.m)); + ExcDimensionMismatch(local_dof_indices.size(), + matrix_sizes.m)); for (unsigned int i=0; i void MatrixFree:: set_derivative_data (const unsigned int cell_no, - const unsigned int quad_point, - const Transformation &trans_in) + const unsigned int quad_point, + const Transformation &trans_in) { Assert (quad_point < matrix_sizes.n_points, ExcInternalError()); derivatives(cell_no,quad_point) = trans_in; @@ -414,136 +414,136 @@ set_derivative_data (const unsigned int cell_no, - // Now finally to the central function of the - // matrix-free class, implementing the - // multiplication of the matrix with a - // vector. This function does not actually - // work on all cells of a mesh, but only the - // subset of cells specified by the first - // argument @p cell_range. Since this - // function operates similarly irrespective - // on which cell chunk we are sitting, we can - // call it simultaneously on many processors, - // but with different cell range data. - // - // The goal of this function is to provide - // the multiplication of a vector with the - // local contributions of a set of cells. As - // mentioned in the introduction, if we were - // to deal with a single cell, this would - // amount to performing the product - // @f{eqnarray*} - // P^T_\mathrm{cell,local-global} A_\mathrm{cell} - // P_\mathrm{cell,local-global} x - // @f} - // where - // @f{eqnarray*} - // A_\mathrm{cell} = - // B_\mathrm{ref\_cell}^T J_\mathrm{cell}^T - // D_\mathrm{cell} - // J_\mathrm{cell} B_\mathrm{ref\_cell} - // @f} - // and Pcell,local-global - // is the transformation from local to global - // indices. - // - // To do this, we would have to do the - // following steps: - //
    - //
  1. Form $x_\mathrm{cell} = - // P_\mathrm{cell,local-global} x$. This is - // done by using the command - // ConstraintMatrix::get_dof_values. - //
  2. Form $x_1 = B_\mathrm{ref\_cell} - // x_\mathrm{cell}$. The vector - // x1 contains the - // reference cell gradient to the local - // cell vector. - //
  3. Form $x_2 = J_\mathrm{cell}^T - // D_\mathrm{cell} J_\mathrm{cell} - // x_1$. This is a block-diagonal - // operation, with the block size equal to - // @p dim. The blocks just - // correspond to the individual quadrature - // points. The operation on each quadrature - // point is implemented by the - // Transformation class object that this - // class is equipped with. Compared to the - // introduction, the matrix - // Dcell now contains the - // @p JxW values and the - // inhomogeneous coefficient. - //
  4. Form $y_\mathrm{cell} = - // B_\mathrm{ref\_cell}^T x_2$. This gives - // the local result of the matrix-vector - // product. - //
  5. Form $y \leftarrow y + - // P_\mathrm{cell,local-global}^T - // y_\mathrm{cell}$. This adds the local - // result to the global vector, which is - // realized using the method - // ConstraintMatrix::distribute_local_to_global. - // Note that we do this in an extra - // function called - // @p copy_local_to_global - // because that operation must not be done - // in %parallel, in order to avoid two or - // more processes trying to add to the same - // positions in the result vector y. - //
- // The steps 1 to 4 can be done in %parallel - // by multiple processes. - - // Now, it turns out that the most expensive - // part of the above is the multiplication - // Bref_cell - // xcell in the second step - // and the transpose operation in step - // 4. Note that the matrix - // JT D J is - // block-diagonal, and hence, its application - // is cheaper. Since the matrix - // Bref_cell is the same - // for all cells, all that changes is the - // vector xcell. Hence, - // nothing prevents us from collecting - // several cell vectors to a (rectangular) - // matrix, and then perform a matrix-matrix - // product. These matrices are both full, but - // not very large, having of the order @p - // dofs_per_cell rows and columns. This is an - // operation that can be much better - // optimized than matrix-vector products. The - // functions @p FullMatrix::mmult and - // @p FullMatrix::mTmult use the BLAS - // dgemm function (as long as BLAS has been - // detected in deal.II configuration), which - // provides optimized kernels for doing this - // product. In our case, a matrix-matrix - // product is between three and five times - // faster than doing the matrix-vector - // product on one cell after the other. The - // variables that hold the solution on the - // respective cell's support points and the - // quadrature points are thus full matrices, - // which we set to the correct size as a - // first action in this function. The number - // of rows in the two matrices @p - // scratch.solutions and @p copy.solutions is - // given by the number of cells they work on, - // and the number of columns is the number of - // degrees of freedom per cell for the first - // and the number of quadrature points times - // the number of components per point for the - // latter. + // Now finally to the central function of the + // matrix-free class, implementing the + // multiplication of the matrix with a + // vector. This function does not actually + // work on all cells of a mesh, but only the + // subset of cells specified by the first + // argument @p cell_range. Since this + // function operates similarly irrespective + // on which cell chunk we are sitting, we can + // call it simultaneously on many processors, + // but with different cell range data. + // + // The goal of this function is to provide + // the multiplication of a vector with the + // local contributions of a set of cells. As + // mentioned in the introduction, if we were + // to deal with a single cell, this would + // amount to performing the product + // @f{eqnarray*} + // P^T_\mathrm{cell,local-global} A_\mathrm{cell} + // P_\mathrm{cell,local-global} x + // @f} + // where + // @f{eqnarray*} + // A_\mathrm{cell} = + // B_\mathrm{ref\_cell}^T J_\mathrm{cell}^T + // D_\mathrm{cell} + // J_\mathrm{cell} B_\mathrm{ref\_cell} + // @f} + // and Pcell,local-global + // is the transformation from local to global + // indices. + // + // To do this, we would have to do the + // following steps: + //
    + //
  1. Form $x_\mathrm{cell} = + // P_\mathrm{cell,local-global} x$. This is + // done by using the command + // ConstraintMatrix::get_dof_values. + //
  2. Form $x_1 = B_\mathrm{ref\_cell} + // x_\mathrm{cell}$. The vector + // x1 contains the + // reference cell gradient to the local + // cell vector. + //
  3. Form $x_2 = J_\mathrm{cell}^T + // D_\mathrm{cell} J_\mathrm{cell} + // x_1$. This is a block-diagonal + // operation, with the block size equal to + // @p dim. The blocks just + // correspond to the individual quadrature + // points. The operation on each quadrature + // point is implemented by the + // Transformation class object that this + // class is equipped with. Compared to the + // introduction, the matrix + // Dcell now contains the + // @p JxW values and the + // inhomogeneous coefficient. + //
  4. Form $y_\mathrm{cell} = + // B_\mathrm{ref\_cell}^T x_2$. This gives + // the local result of the matrix-vector + // product. + //
  5. Form $y \leftarrow y + + // P_\mathrm{cell,local-global}^T + // y_\mathrm{cell}$. This adds the local + // result to the global vector, which is + // realized using the method + // ConstraintMatrix::distribute_local_to_global. + // Note that we do this in an extra + // function called + // @p copy_local_to_global + // because that operation must not be done + // in %parallel, in order to avoid two or + // more processes trying to add to the same + // positions in the result vector y. + //
+ // The steps 1 to 4 can be done in %parallel + // by multiple processes. + + // Now, it turns out that the most expensive + // part of the above is the multiplication + // Bref_cell + // xcell in the second step + // and the transpose operation in step + // 4. Note that the matrix + // JT D J is + // block-diagonal, and hence, its application + // is cheaper. Since the matrix + // Bref_cell is the same + // for all cells, all that changes is the + // vector xcell. Hence, + // nothing prevents us from collecting + // several cell vectors to a (rectangular) + // matrix, and then perform a matrix-matrix + // product. These matrices are both full, but + // not very large, having of the order @p + // dofs_per_cell rows and columns. This is an + // operation that can be much better + // optimized than matrix-vector products. The + // functions @p FullMatrix::mmult and + // @p FullMatrix::mTmult use the BLAS + // dgemm function (as long as BLAS has been + // detected in deal.II configuration), which + // provides optimized kernels for doing this + // product. In our case, a matrix-matrix + // product is between three and five times + // faster than doing the matrix-vector + // product on one cell after the other. The + // variables that hold the solution on the + // respective cell's support points and the + // quadrature points are thus full matrices, + // which we set to the correct size as a + // first action in this function. The number + // of rows in the two matrices @p + // scratch.solutions and @p copy.solutions is + // given by the number of cells they work on, + // and the number of columns is the number of + // degrees of freedom per cell for the first + // and the number of quadrature points times + // the number of components per point for the + // latter. template template void MatrixFree:: local_vmult (CellChunkIterator cell_range, - WorkStreamData::ScratchData &scratch, - WorkStreamData::CopyData ©, - const Vector &src) const + WorkStreamData::ScratchData &scratch, + WorkStreamData::CopyData ©, + const Vector &src) const { const unsigned int chunk_size = cell_range->second - cell_range->first; @@ -553,8 +553,8 @@ local_vmult (CellChunkIterator cell_range, copy.n_dofs = chunk_size*matrix_sizes.m; constraints.get_dof_values(src, &indices_local_to_global(copy.first_cell,0), - ©.solutions(0,0), - ©.solutions(0,0)+copy.n_dofs); + ©.solutions(0,0), + ©.solutions(0,0)+copy.n_dofs); copy.solutions.mmult (scratch.solutions, B_ref_cell); @@ -572,25 +572,25 @@ template void MatrixFree:: copy_local_to_global (const WorkStreamData::CopyData ©, - Vector &dst) const + Vector &dst) const { constraints.distribute_local_to_global (©.solutions(0,0), - ©.solutions(0,0)+copy.n_dofs, - &indices_local_to_global(copy.first_cell,0), - dst); + ©.solutions(0,0)+copy.n_dofs, + &indices_local_to_global(copy.first_cell,0), + dst); } - // Now to the @p vmult function that is - // called externally: In addition to what we - // do in a @p vmult_add function, we set the - // destination to zero first. + // Now to the @p vmult function that is + // called externally: In addition to what we + // do in a @p vmult_add function, we set the + // destination to zero first. template template void MatrixFree::vmult (Vector &dst, - const Vector &src) const + const Vector &src) const { dst = 0; vmult_add (dst, src); @@ -598,16 +598,16 @@ MatrixFree::vmult (Vector &dst, - // Transposed matrix-vector products (needed - // for the multigrid operations to be - // well-defined): do the same. Since we - // implement a symmetric operation, we can - // refer to the @p vmult_add operation. + // Transposed matrix-vector products (needed + // for the multigrid operations to be + // well-defined): do the same. Since we + // implement a symmetric operation, we can + // refer to the @p vmult_add operation. template template void MatrixFree::Tvmult (Vector &dst, - const Vector &src) const + const Vector &src) const { dst = 0; Tvmult_add (dst,src); @@ -619,96 +619,96 @@ template template void MatrixFree::Tvmult_add (Vector &dst, - const Vector &src) const + const Vector &src) const { vmult_add (dst,src); } - // This is the @p vmult_add function that - // multiplies the matrix with vector @p src - // and adds the result to vector @p dst. We - // include a few sanity checks to make sure - // that the size of the vectors is the same - // as the dimension of the matrix. We call a - // %parallel function that applies the - // multiplication on a chunk of cells at once - // using the WorkStream module (cf. also the - // @ref threads module). The subdivision into - // chunks will be performed in the reinit - // function and is stored in the field @p - // matrix_sizes.chunks. What the rather - // cryptic command to @p std_cxx1x::bind does - // is to transform a function that has - // several arguments (source vector, chunk - // information) into a function which has - // three arguments (in the first case) or one - // argument (in the second), which is what - // the WorkStream::run function expects. The - // placeholders _1, std_cxx1x::_2, _3 in - // the local vmult specify variable input - // values, given by the chunk information, - // scratch data and copy data that the - // WorkStream::run function will provide, - // whereas the other arguments to the @p - // local_vmult function are bound: to @p this - // and a constant reference to the @p src in - // the first case, and @p this and a - // reference to the output vector in the - // second. Similarly, the placeholder - // @p _1 argument in the - // @p copy_local_to_global function - // sets the first explicit argument of that - // function, which is of class - // @p CopyData. We need to - // abstractly specify these arguments because - // the tasks defined by different cell chunks - // will be scheduled by the WorkStream class, - // and we will reuse available scratch and - // copy data. + // This is the @p vmult_add function that + // multiplies the matrix with vector @p src + // and adds the result to vector @p dst. We + // include a few sanity checks to make sure + // that the size of the vectors is the same + // as the dimension of the matrix. We call a + // %parallel function that applies the + // multiplication on a chunk of cells at once + // using the WorkStream module (cf. also the + // @ref threads module). The subdivision into + // chunks will be performed in the reinit + // function and is stored in the field @p + // matrix_sizes.chunks. What the rather + // cryptic command to @p std_cxx1x::bind does + // is to transform a function that has + // several arguments (source vector, chunk + // information) into a function which has + // three arguments (in the first case) or one + // argument (in the second), which is what + // the WorkStream::run function expects. The + // placeholders _1, std_cxx1x::_2, _3 in + // the local vmult specify variable input + // values, given by the chunk information, + // scratch data and copy data that the + // WorkStream::run function will provide, + // whereas the other arguments to the @p + // local_vmult function are bound: to @p this + // and a constant reference to the @p src in + // the first case, and @p this and a + // reference to the output vector in the + // second. Similarly, the placeholder + // @p _1 argument in the + // @p copy_local_to_global function + // sets the first explicit argument of that + // function, which is of class + // @p CopyData. We need to + // abstractly specify these arguments because + // the tasks defined by different cell chunks + // will be scheduled by the WorkStream class, + // and we will reuse available scratch and + // copy data. template template void MatrixFree::vmult_add (Vector &dst, - const Vector &src) const + const Vector &src) const { Assert (src.size() == n(), ExcDimensionMismatch(src.size(), n())); Assert (dst.size() == m(), ExcDimensionMismatch(dst.size(), m())); WorkStream::run (matrix_sizes.chunks.begin(), matrix_sizes.chunks.end(), - std_cxx1x::bind(&MatrixFree:: - template local_vmult, - this, std_cxx1x::_1, std_cxx1x::_2, std_cxx1x::_3, boost::cref(src)), - std_cxx1x::bind(&MatrixFree:: - template copy_local_to_global, - this, std_cxx1x::_1, boost::ref(dst)), - WorkStreamData::ScratchData(), - WorkStreamData::CopyData(), - 2*multithread_info.n_default_threads,1); - - // One thing to be cautious about: - // The deal.II classes expect that - // the matrix still contains a - // diagonal entry for constrained - // dofs (otherwise, the matrix - // would be singular, which is not - // what we want). Since the - // distribute_local_to_global - // command of the constraint matrix - // which we used for adding the - // local elements into the global - // vector does not do anything with - // constrained elements, we have to - // circumvent that problem by - // artificially setting the - // diagonal to some non-zero value - // and adding the source values. We - // simply set it to one, which - // corresponds to copying the - // respective elements of the - // source vector into the matching - // entry of the destination vector. + std_cxx1x::bind(&MatrixFree:: + template local_vmult, + this, std_cxx1x::_1, std_cxx1x::_2, std_cxx1x::_3, boost::cref(src)), + std_cxx1x::bind(&MatrixFree:: + template copy_local_to_global, + this, std_cxx1x::_1, boost::ref(dst)), + WorkStreamData::ScratchData(), + WorkStreamData::CopyData(), + 2*multithread_info.n_default_threads,1); + + // One thing to be cautious about: + // The deal.II classes expect that + // the matrix still contains a + // diagonal entry for constrained + // dofs (otherwise, the matrix + // would be singular, which is not + // what we want). Since the + // distribute_local_to_global + // command of the constraint matrix + // which we used for adding the + // local elements into the global + // vector does not do anything with + // constrained elements, we have to + // circumvent that problem by + // artificially setting the + // diagonal to some non-zero value + // and adding the source values. We + // simply set it to one, which + // corresponds to copying the + // respective elements of the + // source vector into the matching + // entry of the destination vector. for (unsigned int i=0; i::vmult_add (Vector &dst, - // The next function initializes the - // structures of the matrix. It writes the - // number of total degrees of freedom in the - // problem as well as the number of cells to - // the MatrixSizes struct and copies the - // small matrix that transforms the solution - // from support points to quadrature - // points. It uses the small matrix for - // determining the number of degrees of - // freedom per cell (number of rows in @p - // B_ref_cell). The number of quadrature - // points needs to be passed through the last - // variable @p n_points_per_cell, since the - // number of columns in the small matrix is - // @p dim*n_points_per_cell for the Laplace - // problem (the Laplacian is a tensor and has - // @p dim components). In this function, we - // also give the fields containing the - // derivative information and the local dof - // indices the correct sizes. They will be - // filled by calling the respective set - // function defined above. + // The next function initializes the + // structures of the matrix. It writes the + // number of total degrees of freedom in the + // problem as well as the number of cells to + // the MatrixSizes struct and copies the + // small matrix that transforms the solution + // from support points to quadrature + // points. It uses the small matrix for + // determining the number of degrees of + // freedom per cell (number of rows in @p + // B_ref_cell). The number of quadrature + // points needs to be passed through the last + // variable @p n_points_per_cell, since the + // number of columns in the small matrix is + // @p dim*n_points_per_cell for the Laplace + // problem (the Laplacian is a tensor and has + // @p dim components). In this function, we + // also give the fields containing the + // derivative information and the local dof + // indices the correct sizes. They will be + // filled by calling the respective set + // function defined above. template void MatrixFree:: reinit (const unsigned int n_dofs_in, - const unsigned int n_cells_in, - const FullMatrix &B_ref_cell_in, - const unsigned int n_points_per_cell) + const unsigned int n_cells_in, + const FullMatrix &B_ref_cell_in, + const unsigned int n_points_per_cell) { B_ref_cell = B_ref_cell_in; @@ -759,73 +759,73 @@ reinit (const unsigned int n_dofs_in, matrix_sizes.n_points = n_points_per_cell; matrix_sizes.n_comp = B_ref_cell.n()/matrix_sizes.n_points; Assert(matrix_sizes.n_comp * n_points_per_cell == B_ref_cell.n(), - ExcInternalError()); - - // One thing to make the matrix-vector - // product with this class efficient is to - // decide how many cells should be combined - // to one chunk, which will determine the - // size of the full matrix that we work - // on. If we choose too few cells, then the - // gains from using the matrix-matrix - // product will not be fully utilized - // (dgemm tends to provide more efficiency - // the larger the matrix dimensions get), - // so we choose at least 60 cells for one - // chunk (except when there are very few - // cells, like on the coarse levels of the - // multigrid scheme). If we choose too - // many, we will degrade parallelization - // (we need to have sufficiently - // independent tasks). We need to also - // think about the fact that most high - // performance BLAS implementations - // internally work with square - // sub-matrices. Choosing as many cells in - // a chunk as there are degrees of freedom - // on each cell (coded in @p - // matrix_sizes.m) respects the BLAS GEMM - // design, whenever we exceed 60. Clearly, - // the chunk size is an - // architecture-dependent value and the - // interested user can squeeze out some - // extra performance by hand-tuning this - // parameter. Once we have chosen the - // number of cells we collect in one chunk, - // we determine how many chunks we have on - // the given cell range and recalculate the - // actual chunk size in order to evenly - // distribute the chunks. + ExcInternalError()); + + // One thing to make the matrix-vector + // product with this class efficient is to + // decide how many cells should be combined + // to one chunk, which will determine the + // size of the full matrix that we work + // on. If we choose too few cells, then the + // gains from using the matrix-matrix + // product will not be fully utilized + // (dgemm tends to provide more efficiency + // the larger the matrix dimensions get), + // so we choose at least 60 cells for one + // chunk (except when there are very few + // cells, like on the coarse levels of the + // multigrid scheme). If we choose too + // many, we will degrade parallelization + // (we need to have sufficiently + // independent tasks). We need to also + // think about the fact that most high + // performance BLAS implementations + // internally work with square + // sub-matrices. Choosing as many cells in + // a chunk as there are degrees of freedom + // on each cell (coded in @p + // matrix_sizes.m) respects the BLAS GEMM + // design, whenever we exceed 60. Clearly, + // the chunk size is an + // architecture-dependent value and the + // interested user can squeeze out some + // extra performance by hand-tuning this + // parameter. Once we have chosen the + // number of cells we collect in one chunk, + // we determine how many chunks we have on + // the given cell range and recalculate the + // actual chunk size in order to evenly + // distribute the chunks. const unsigned int divisor = std::max(60U, matrix_sizes.m); const unsigned int n_chunks = std::max (matrix_sizes.n_cells/divisor + 1, - 2*multithread_info.n_default_threads); + 2*multithread_info.n_default_threads); const unsigned int chunk_size = (matrix_sizes.n_cells/n_chunks + - (matrix_sizes.n_cells%n_chunks>0)); + (matrix_sizes.n_cells%n_chunks>0)); std::pair chunk; for (unsigned int i=0; i matrix_sizes.n_cells) - chunk.second = matrix_sizes.n_cells; + chunk.second = matrix_sizes.n_cells; else - chunk.second = (i+1)*chunk_size; + chunk.second = (i+1)*chunk_size; if (chunk.second > chunk.first) - matrix_sizes.chunks.push_back(chunk); + matrix_sizes.chunks.push_back(chunk); else - break; + break; } } - // Then we need a function if we want to - // delete the content of the matrix, - // e.g. when we are finished with one grid - // level and continue to the next one. Just - // set all the field sizes to 0. + // Then we need a function if we want to + // delete the content of the matrix, + // e.g. when we are finished with one grid + // level and continue to the next one. Just + // set all the field sizes to 0. template void MatrixFree::clear () @@ -846,22 +846,22 @@ MatrixFree::clear () - // The next function returns the entries of the - // matrix. Since this class is intended not - // to store the matrix entries, it would make - // no sense to provide all those - // elements. However, diagonal entries are - // explicitly needed for the implementation - // of the Chebyshev smoother that we intend - // to use in the multigrid - // preconditioner. This matrix is equipped - // with a vector that stores the diagonal, - // and we compute it when this function is - // called for the first time. + // The next function returns the entries of the + // matrix. Since this class is intended not + // to store the matrix entries, it would make + // no sense to provide all those + // elements. However, diagonal entries are + // explicitly needed for the implementation + // of the Chebyshev smoother that we intend + // to use in the multigrid + // preconditioner. This matrix is equipped + // with a vector that stores the diagonal, + // and we compute it when this function is + // called for the first time. template number MatrixFree::el (const unsigned int row, - const unsigned int col) const + const unsigned int col) const { Assert (row == col, ExcNotImplemented()); if (diagonal_is_calculated == false) @@ -872,26 +872,26 @@ MatrixFree::el (const unsigned int row, - // Regarding the calculation of the diagonal, - // remember that this is as simple (or - // complicated) as assembling a right hand - // side in deal.II. Well, it is a bit easier - // to do this within this class since we have - // all the derivative information - // available. What we do is to go through all - // the cells (now in serial, since this - // function should not be called very often - // anyway), then all the degrees of - // freedom. At this place, we first copy the - // first basis functions in all the - // quadrature points to a temporary array, - // apply the derivatives from the Jacobian - // matrix, and finally multiply with the - // second basis function. This is exactly the - // value that would be written into the - // diagonal of a sparse matrix. Note that we - // need to condense hanging node constraints - // and set the constrained diagonals to one. + // Regarding the calculation of the diagonal, + // remember that this is as simple (or + // complicated) as assembling a right hand + // side in deal.II. Well, it is a bit easier + // to do this within this class since we have + // all the derivative information + // available. What we do is to go through all + // the cells (now in serial, since this + // function should not be called very often + // anyway), then all the degrees of + // freedom. At this place, we first copy the + // first basis functions in all the + // quadrature points to a temporary array, + // apply the derivatives from the Jacobian + // matrix, and finally multiply with the + // second basis function. This is exactly the + // value that would be written into the + // diagonal of a sparse matrix. Note that we + // need to condense hanging node constraints + // and set the constrained diagonals to one. template void MatrixFree::calculate_diagonal() const @@ -901,14 +901,14 @@ MatrixFree::calculate_diagonal() const for (unsigned int cell=0; cell::calculate_diagonal() const - // Eventually, we provide a function that - // calculates how much memory this class - // uses. We just need to sum up the memory - // consumption of the arrays, the - // constraints, the small matrix and of the - // local variables. Just as a remark: In 2D - // and with data type @p double, - // about 80 per cent of the memory - // consumption is due to the - // @p derivatives array, while in 3D - // this number is even 85 per cent. + // Eventually, we provide a function that + // calculates how much memory this class + // uses. We just need to sum up the memory + // consumption of the arrays, the + // constraints, the small matrix and of the + // local variables. Just as a remark: In 2D + // and with data type @p double, + // about 80 per cent of the memory + // consumption is due to the + // @p derivatives array, while in 3D + // this number is even 85 per cent. template std::size_t MatrixFree::memory_consumption () const { std::size_t glob_size = derivatives.memory_consumption() + - indices_local_to_global.memory_consumption() + - constraints.memory_consumption() + - B_ref_cell.memory_consumption() + - diagonal_values.memory_consumption() + - matrix_sizes.chunks.size()*2*sizeof(unsigned int) + - sizeof(*this); + indices_local_to_global.memory_consumption() + + constraints.memory_consumption() + + B_ref_cell.memory_consumption() + + diagonal_values.memory_consumption() + + matrix_sizes.chunks.size()*2*sizeof(unsigned int) + + sizeof(*this); return glob_size; } - // @sect3{Laplace operator implementation} - - // This class implements the local action of - // a Laplace operator on a quadrature - // point. This is a very basic class - // implementation, providing functions for - // initialization with a Tensor of rank 2 and - // implementing the @p transform operation - // needed by the @p MatrixFree class. There - // is one point worth noting: The - // quadrature-point related action of the - // Laplace operator is a tensor of rank - // two. It is symmetric since it is the - // product of the inverse Jacobian - // transformation between unit and real cell - // with its transpose (times quadrature - // weights and a coefficient, which are - // scalar), so we can just save the diagonal - // and upper diagonal part. We could use the - // SymmetricTensor<2,dim> class for doing - // this, however, that class is only based on - // @p double %numbers. Since we also want to - // use @p float %numbers for the multigrid - // preconditioner (in order to save memory - // and computing time), we manually implement - // this operator. Note that @p dim is a - // template argument and hence known at - // compile-time, so the compiler knows that - // this symmetric rank-2 tensor has 3 entries - // if used in 2D and 6 entries if used in 3D. + // @sect3{Laplace operator implementation} + + // This class implements the local action of + // a Laplace operator on a quadrature + // point. This is a very basic class + // implementation, providing functions for + // initialization with a Tensor of rank 2 and + // implementing the @p transform operation + // needed by the @p MatrixFree class. There + // is one point worth noting: The + // quadrature-point related action of the + // Laplace operator is a tensor of rank + // two. It is symmetric since it is the + // product of the inverse Jacobian + // transformation between unit and real cell + // with its transpose (times quadrature + // weights and a coefficient, which are + // scalar), so we can just save the diagonal + // and upper diagonal part. We could use the + // SymmetricTensor<2,dim> class for doing + // this, however, that class is only based on + // @p double %numbers. Since we also want to + // use @p float %numbers for the multigrid + // preconditioner (in order to save memory + // and computing time), we manually implement + // this operator. Note that @p dim is a + // template argument and hence known at + // compile-time, so the compiler knows that + // this symmetric rank-2 tensor has 3 entries + // if used in 2D and 6 entries if used in 3D. template class LaplaceOperator { @@ -1006,35 +1006,35 @@ LaplaceOperator::LaplaceOperator(const Tensor<2,dim> &tensor) *this = tensor; } - // Now implement the transformation, which is - // just a so-called contraction - // operation between a tensor of rank two and a - // tensor of rank one. Unfortunately, we - // need to implement this by hand, since we - // chose not to use the - // SymmetricTensor<2,dim> class (note that - // the resulting values are entries in a full - // matrix that consists of doubles or - // floats). It feels a bit unsafe to operate - // on a pointer to the data, but that is the - // only possibility if we do not want to copy - // data back and forth, which is expensive - // since this is the innermost position of - // the loop in the @p vmult - // operation of the MatrixFree class. We need - // to pay attention to the fact that we only - // saved half of the (symmetric) rank-two - // tensor. - // - // At first sight, it seems inefficient that - // we have an @p if clause at this position - // in the code at the innermost loop, but - // note once again that @p dim is known when - // this piece of code is compiled, so the - // compiler can optimize away the @p if - // statement (and actually even inline these - // few lines of code into the @p MatrixFree - // class). + // Now implement the transformation, which is + // just a so-called contraction + // operation between a tensor of rank two and a + // tensor of rank one. Unfortunately, we + // need to implement this by hand, since we + // chose not to use the + // SymmetricTensor<2,dim> class (note that + // the resulting values are entries in a full + // matrix that consists of doubles or + // floats). It feels a bit unsafe to operate + // on a pointer to the data, but that is the + // only possibility if we do not want to copy + // data back and forth, which is expensive + // since this is the innermost position of + // the loop in the @p vmult + // operation of the MatrixFree class. We need + // to pay attention to the fact that we only + // saved half of the (symmetric) rank-two + // tensor. + // + // At first sight, it seems inefficient that + // we have an @p if clause at this position + // in the code at the innermost loop, but + // note once again that @p dim is known when + // this piece of code is compiled, so the + // compiler can optimize away the @p if + // statement (and actually even inline these + // few lines of code into the @p MatrixFree + // class). template void LaplaceOperator::transform (number* result) const { @@ -1049,28 +1049,28 @@ void LaplaceOperator::transform (number* result) const const number temp1 = result[0]; const number temp2 = result[1]; result[0] = transformation[0] * temp1 + transformation[1] * temp2 + - transformation[2] * result[2]; + transformation[2] * result[2]; result[1] = transformation[1] * temp1 + transformation[3] * temp2 + - transformation[4] * result[2]; + transformation[4] * result[2]; result[2] = transformation[2] * temp1 + transformation[4] * temp2 + - transformation[5] * result[2]; + transformation[5] * result[2]; } else ExcNotImplemented(); } - // The final function in this group - // takes the content of a rank-2 - // tensor and writes it to the field - // @p transformation of - // this class. We save the upper part - // of the symmetric tensor row-wise: - // we first take the (0,0)-entry, - // then the (0,1)-entry, and so - // on. We only implement this for - // dimensions two and three, which - // for the moment should do just - // fine: + // The final function in this group + // takes the content of a rank-2 + // tensor and writes it to the field + // @p transformation of + // this class. We save the upper part + // of the symmetric tensor row-wise: + // we first take the (0,0)-entry, + // then the (0,1)-entry, and so + // on. We only implement this for + // dimensions two and three, which + // for the moment should do just + // fine: template LaplaceOperator& LaplaceOperator::operator=(const Tensor<2,dim> &tensor) @@ -1081,7 +1081,7 @@ LaplaceOperator::operator=(const Tensor<2,dim> &tensor) transformation[1] = tensor[0][1]; transformation[2] = tensor[1][1]; Assert (std::fabs(tensor[1][0]-tensor[0][1])<1e-15, - ExcInternalError()); + ExcInternalError()); } else if (dim == 3) { @@ -1092,11 +1092,11 @@ LaplaceOperator::operator=(const Tensor<2,dim> &tensor) transformation[4] = tensor[1][2]; transformation[5] = tensor[2][2]; Assert (std::fabs(tensor[1][0]-tensor[0][1])<1e-15, - ExcInternalError()); + ExcInternalError()); Assert (std::fabs(tensor[2][0]-tensor[0][2])<1e-15, - ExcInternalError()); + ExcInternalError()); Assert (std::fabs(tensor[2][1]-tensor[1][2])<1e-15, - ExcInternalError()); + ExcInternalError()); } else ExcNotImplemented(); @@ -1113,15 +1113,15 @@ LaplaceOperator::memory_consumption () const - // @sect3{LaplaceProblem class} + // @sect3{LaplaceProblem class} - // This class is based on the same - // class in step-16. However, we - // replaced the SparseMatrix - // class by our matrix-free - // implementation, which means that - // we can also skip the sparsity - // patterns. + // This class is based on the same + // class in step-16. However, we + // replaced the SparseMatrix + // class by our matrix-free + // implementation, which means that + // we can also skip the sparsity + // patterns. template class LaplaceProblem { @@ -1153,39 +1153,39 @@ class LaplaceProblem template LaplaceProblem::LaplaceProblem (const unsigned int degree) - : + : fe (degree), - mg_dof_handler (triangulation) + mg_dof_handler (triangulation) {} - // @sect4{LaplaceProblem::setup_system} - - // This is the function of step-16 with - // relevant changes due to the MatrixFree - // class. What we need to do is to somehow - // create a local gradient matrix that does - // not contain any cell-related data - // (gradient on the reference cell). The - // way to get to this matrix is to create - // an FEValues object with gradient - // information on a cell that corresponds - // to the reference cell, which is a cube - // with side length 1. So we create a - // pseudo triangulation, initialize the - // FEValues to the only cell of that - // triangulation, and read off the - // gradients (which we put in a - // FullMatrix). That full matrix is then - // passed to the reinit function of the - // MatrixFree class used as a system matrix - // and, further down, as multigrid matrices - // on the individual levels. We need to - // implement Dirichlet boundary conditions - // here, which is done with the - // ConstraintMatrix function as shown, - // e.g., in step-22. + // @sect4{LaplaceProblem::setup_system} + + // This is the function of step-16 with + // relevant changes due to the MatrixFree + // class. What we need to do is to somehow + // create a local gradient matrix that does + // not contain any cell-related data + // (gradient on the reference cell). The + // way to get to this matrix is to create + // an FEValues object with gradient + // information on a cell that corresponds + // to the reference cell, which is a cube + // with side length 1. So we create a + // pseudo triangulation, initialize the + // FEValues to the only cell of that + // triangulation, and read off the + // gradients (which we put in a + // FullMatrix). That full matrix is then + // passed to the reinit function of the + // MatrixFree class used as a system matrix + // and, further down, as multigrid matrices + // on the individual levels. We need to + // implement Dirichlet boundary conditions + // here, which is done with the + // ConstraintMatrix function as shown, + // e.g., in step-22. template void LaplaceProblem::setup_system () { @@ -1195,113 +1195,113 @@ void LaplaceProblem::setup_system () mg_dof_handler.distribute_dofs (fe); std::cout << "Number of degrees of freedom: " - << mg_dof_handler.n_dofs() - << std::endl; + << mg_dof_handler.n_dofs() + << std::endl; const unsigned int nlevels = triangulation.n_levels(); mg_matrices.resize(0, nlevels-1); QGauss quadrature_formula(fe.degree+1); FEValues fe_values_reference (fe, quadrature_formula, - update_gradients); + update_gradients); Triangulation reference_cell; GridGenerator::hyper_cube (reference_cell, 0, 1); fe_values_reference.reinit (reference_cell.begin()); FullMatrix ref_cell_gradients (fe.dofs_per_cell, - quadrature_formula.size()*dim); + quadrature_formula.size()*dim); for (unsigned int i=0; i(), - system_matrix.get_constraints()); + 0, + ZeroFunction(), + system_matrix.get_constraints()); system_matrix.get_constraints().close(); std::cout.precision(4); std::cout << "System matrix memory consumption: " - << system_matrix.memory_consumption()/double(1<<20) - << " MiB." - << std::endl; + << system_matrix.memory_consumption()/double(1<<20) + << " MiB." + << std::endl; solution.reinit (mg_dof_handler.n_dofs()); system_rhs.reinit (mg_dof_handler.n_dofs()); - // Next, initialize the matrices for the - // multigrid method on all the - // levels. Unfortunately, the function - // MGTools::make_boundary_list cannot write - // Dirichlet boundary conditions into a - // ConstraintMatrix object directly, so we - // first have to make the boundary list and - // then manually fill the boundary - // conditions using the command - // ConstraintMatrix::add_line. Once this is - // done, we close the ConstraintMatrix so - // it can be used for matrix-vector - // products. + // Next, initialize the matrices for the + // multigrid method on all the + // levels. Unfortunately, the function + // MGTools::make_boundary_list cannot write + // Dirichlet boundary conditions into a + // ConstraintMatrix object directly, so we + // first have to make the boundary list and + // then manually fill the boundary + // conditions using the command + // ConstraintMatrix::add_line. Once this is + // done, we close the ConstraintMatrix so + // it can be used for matrix-vector + // products. typename FunctionMap::type dirichlet_boundary; ZeroFunction homogeneous_dirichlet_bc (1); dirichlet_boundary[0] = &homogeneous_dirichlet_bc; std::vector > boundary_indices(triangulation.n_levels()); MGTools::make_boundary_list (mg_dof_handler, - dirichlet_boundary, - boundary_indices); + dirichlet_boundary, + boundary_indices); for (unsigned int level=0;level::iterator bc_it = boundary_indices[level].begin(); for ( ; bc_it != boundary_indices[level].end(); ++bc_it) - mg_matrices[level].get_constraints().add_line(*bc_it); + mg_matrices[level].get_constraints().add_line(*bc_it); mg_matrices[level].get_constraints().close(); } coarse_matrix.reinit (mg_dof_handler.n_dofs(0), - mg_dof_handler.n_dofs(0)); + mg_dof_handler.n_dofs(0)); } - // @sect4{LaplaceProblem::assemble_system} - - // The assemble function is significantly - // reduced compared to step-16. All we need - // to do is to assemble the right hand side - // and to calculate the cell-dependent part - // of the Laplace operator. The first task is - // standard. The second is also not too hard - // given the discussion in the introduction: - // We need to take the inverse of the - // Jacobian of the transformation from unit - // to real cell, multiply it with its - // transpose and multiply the resulting - // rank-2 tensor with the quadrature weights - // and the coefficient values at the - // quadrature points. To make this work, we - // add the update flag @p - // update_inverse_jacobians to the FEValues - // constructor, and query the inverse of the - // Jacobian in a loop over the quadrature - // points (note that the Jacobian is not - // related to any kind of degrees of freedom - // directly). In the end, we condense the - // constraints from Dirichlet boundary - // conditions away from the right hand side. + // @sect4{LaplaceProblem::assemble_system} + + // The assemble function is significantly + // reduced compared to step-16. All we need + // to do is to assemble the right hand side + // and to calculate the cell-dependent part + // of the Laplace operator. The first task is + // standard. The second is also not too hard + // given the discussion in the introduction: + // We need to take the inverse of the + // Jacobian of the transformation from unit + // to real cell, multiply it with its + // transpose and multiply the resulting + // rank-2 tensor with the quadrature weights + // and the coefficient values at the + // quadrature points. To make this work, we + // add the update flag @p + // update_inverse_jacobians to the FEValues + // constructor, and query the inverse of the + // Jacobian in a loop over the quadrature + // points (note that the Jacobian is not + // related to any kind of degrees of freedom + // directly). In the end, we condense the + // constraints from Dirichlet boundary + // conditions away from the right hand side. template void LaplaceProblem::assemble_system () { QGauss quadrature_formula(fe.degree+1); MappingQ mapping (fe.degree); FEValues fe_values (mapping, fe, quadrature_formula, - update_values | update_inverse_jacobians | + update_values | update_inverse_jacobians | update_quadrature_points | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; @@ -1314,56 +1314,56 @@ void LaplaceProblem::assemble_system () unsigned int cell_no = 0; typename DoFHandler::active_cell_iterator cell = mg_dof_handler.begin_active(), - endc = mg_dof_handler.end(); + endc = mg_dof_handler.end(); for (; cell!=endc; ++cell, ++cell_no) { cell->get_dof_indices (local_dof_indices); fe_values.reinit (cell); coefficient.value_list (fe_values.get_quadrature_points(), - coefficient_values); + coefficient_values); for (unsigned int i=0; i void LaplaceProblem::assemble_multigrid () { @@ -1371,7 +1371,7 @@ void LaplaceProblem::assemble_multigrid () QGauss quadrature_formula(fe.degree+1); MappingQ mapping (fe.degree); FEValues fe_values (mapping, fe, quadrature_formula, - update_gradients | update_inverse_jacobians | + update_gradients | update_inverse_jacobians | update_quadrature_points | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; @@ -1383,76 +1383,76 @@ void LaplaceProblem::assemble_multigrid () std::vector cell_no(triangulation.n_levels()); typename MGDoFHandler::cell_iterator cell = mg_dof_handler.begin(), - endc = mg_dof_handler.end(); + endc = mg_dof_handler.end(); for (; cell!=endc; ++cell) { const unsigned int level = cell->level(); cell->get_mg_dof_indices (local_dof_indices); fe_values.reinit (cell); coefficient.value_list (fe_values.get_quadrature_points(), - coefficient_values); + coefficient_values); mg_matrices[level].set_local_dof_indices (cell_no[level], - local_dof_indices); + local_dof_indices); for (unsigned int q=0; q void LaplaceProblem::solve () { @@ -1468,27 +1468,27 @@ void LaplaceProblem::solve () MGSmootherPrecondition > mg_smoother(vector_memory); - // Then, we initialize the smoother - // with our level matrices and the - // required, additional data for - // the Chebyshev smoother. In - // particular, we use a higher - // polynomial degree for higher - // order elements, since smoothing - // gets more difficult for - // these. Smooth out a range of - // $[\lambda_{\max}/10,\lambda_{\max}]$. In - // order to compute the maximum - // eigenvalue of the corresponding - // matrix, the Chebyshev - // initializations performs a few - // steps of a CG algorithm. Since - // all we need is a rough estimate, - // we choose some eight iterations - // (more if the finite element - // polynomial degree is larger, - // less if it is smaller than - // quadratic). + // Then, we initialize the smoother + // with our level matrices and the + // required, additional data for + // the Chebyshev smoother. In + // particular, we use a higher + // polynomial degree for higher + // order elements, since smoothing + // gets more difficult for + // these. Smooth out a range of + // $[\lambda_{\max}/10,\lambda_{\max}]$. In + // order to compute the maximum + // eigenvalue of the corresponding + // matrix, the Chebyshev + // initializations performs a few + // steps of a CG algorithm. Since + // all we need is a rough estimate, + // we choose some eight iterations + // (more if the finite element + // polynomial degree is larger, + // less if it is smaller than + // quadratic). typename SMOOTHER::AdditionalData smoother_data; smoother_data.smoothing_range = 10.; smoother_data.degree = fe.degree; @@ -1499,54 +1499,54 @@ void LaplaceProblem::solve () mg_matrix(&mg_matrices); Multigrid > mg(mg_dof_handler, - mg_matrix, - mg_coarse, - mg_transfer, - mg_smoother, - mg_smoother); + mg_matrix, + mg_coarse, + mg_transfer, + mg_smoother, + mg_smoother); PreconditionMG, MGTransferPrebuilt > > preconditioner(mg_dof_handler, mg, mg_transfer); - // Finally, write out the memory - // consumption of the Multigrid object - // (or rather, of its most significant - // components, since there is no built-in - // function for the total multigrid - // object), then create the solver object - // and solve the system. This is very - // easy, and we didn't even see any - // difference in the solve process - // compared to step-16. The magic is all - // hidden behind the implementation of - // the MatrixFree::vmult operation. + // Finally, write out the memory + // consumption of the Multigrid object + // (or rather, of its most significant + // components, since there is no built-in + // function for the total multigrid + // object), then create the solver object + // and solve the system. This is very + // easy, and we didn't even see any + // difference in the solve process + // compared to step-16. The magic is all + // hidden behind the implementation of + // the MatrixFree::vmult operation. const unsigned int multigrid_memory = (mg_matrices.memory_consumption() + mg_transfer.memory_consumption() + coarse_matrix.memory_consumption()); std::cout << "Multigrid objects memory consumption: " - << multigrid_memory/double(1<<20) - << " MiB." - << std::endl; + << multigrid_memory/double(1<<20) + << " MiB." + << std::endl; SolverControl solver_control (1000, 1e-12); SolverCG<> cg (solver_control); cg.solve (system_matrix, solution, system_rhs, - preconditioner); + preconditioner); std::cout << "Convergence in " << solver_control.last_step() - << " CG iterations." << std::endl; + << " CG iterations." << std::endl; } - // @sect4{LaplaceProblem::output_results} + // @sect4{LaplaceProblem::output_results} - // Here is the data output, which is a - // simplified version of step-5. We use the - // standard VTK output for each grid - // produced in the refinement process. + // Here is the data output, which is a + // simplified version of step-5. We use the + // standard VTK output for each grid + // produced in the refinement process. template void LaplaceProblem::output_results (const unsigned int cycle) const { @@ -1558,8 +1558,8 @@ void LaplaceProblem::output_results (const unsigned int cycle) const std::ostringstream filename; filename << "solution-" - << cycle - << ".vtk"; + << cycle + << ".vtk"; std::ofstream output (filename.str().c_str()); data_out.write_vtk (output); @@ -1567,12 +1567,12 @@ void LaplaceProblem::output_results (const unsigned int cycle) const - // @sect4{LaplaceProblem::run} + // @sect4{LaplaceProblem::run} - // The function that runs the program is - // very similar to the one in step-16. We - // make less refinement steps in 3D - // compared to 2D, but that's it. + // The function that runs the program is + // very similar to the one in step-16. We + // make less refinement steps in 3D + // compared to 2D, but that's it. template void LaplaceProblem::run () { @@ -1581,12 +1581,12 @@ void LaplaceProblem::run () std::cout << "Cycle " << cycle << std::endl; if (cycle == 0) - { - GridGenerator::hyper_ball(triangulation); - static const HyperBallBoundary boundary; - triangulation.set_boundary (0, boundary); - triangulation.refine_global (3-dim); - } + { + GridGenerator::hyper_ball(triangulation); + static const HyperBallBoundary boundary; + triangulation.set_boundary (0, boundary); + triangulation.refine_global (3-dim); + } triangulation.refine_global (1); setup_system (); assemble_system (); @@ -1599,9 +1599,9 @@ void LaplaceProblem::run () - // @sect3{The main function} + // @sect3{The main function} - // This is as in all other programs: + // This is as in all other programs: int main () { try @@ -1613,24 +1613,24 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-38/step-38.cc b/deal.II/examples/step-38/step-38.cc index f64c443043..72c85c8787 100644 --- a/deal.II/examples/step-38/step-38.cc +++ b/deal.II/examples/step-38/step-38.cc @@ -396,8 +396,8 @@ namespace Step38 dof_handler.distribute_dofs (fe); std::cout << "Surface mesh has " << dof_handler.n_dofs() - << " degrees of freedom." - << std::endl; + << " degrees of freedom." + << std::endl; CompressedSparsityPattern csp (dof_handler.n_dofs(), dof_handler.n_dofs()); DoFTools::make_sparsity_pattern (dof_handler, csp); @@ -410,22 +410,22 @@ namespace Step38 } - // @sect4{LaplaceBeltramiProblem::assemble_system} - - // The following is the central function of - // this program, assembling the matrix that - // corresponds to the surface Laplacian - // (Laplace-Beltrami operator). Maybe - // surprisingly, it actually looks exactly - // the same as for the regular Laplace - // operator discussed in, for example, - // step-4. The key is that the - // FEValues::shape_gradient function does the - // magic: It returns the surface gradient - // $\nabla_K \phi_i(x_q)$ of the $i$th shape - // function at the $q$th quadrature - // point. The rest then does not need any - // changes either: + // @sect4{LaplaceBeltramiProblem::assemble_system} + + // The following is the central function of + // this program, assembling the matrix that + // corresponds to the surface Laplacian + // (Laplace-Beltrami operator). Maybe + // surprisingly, it actually looks exactly + // the same as for the regular Laplace + // operator discussed in, for example, + // step-4. The key is that the + // FEValues::shape_gradient function does the + // magic: It returns the surface gradient + // $\nabla_K \phi_i(x_q)$ of the $i$th shape + // function at the $q$th quadrature + // point. The rest then does not need any + // changes either: template void LaplaceBeltramiProblem::assemble_system () { @@ -434,10 +434,10 @@ namespace Step38 const QGauss quadrature_formula(2*fe.degree); FEValues fe_values (mapping, fe, quadrature_formula, - update_values | - update_gradients | - update_quadrature_points | - update_JxW_values); + update_values | + update_gradients | + update_quadrature_points | + update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -451,132 +451,132 @@ namespace Step38 const RightHandSide rhs; for (typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - cell!=endc; ++cell) + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + cell!=endc; ++cell) { - cell_matrix = 0; - cell_rhs = 0; - - fe_values.reinit (cell); - - rhs.value_list (fe_values.get_quadrature_points(), rhs_values); - - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - for (unsigned int i=0; iget_dof_indices (local_dof_indices); + for (unsigned int i=0; i boundary_values; VectorTools::interpolate_boundary_values (mapping, - dof_handler, - 0, - Solution(), - boundary_values); + dof_handler, + 0, + Solution(), + boundary_values); MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs,false); + system_matrix, + solution, + system_rhs,false); } - // @sect4{LaplaceBeltramiProblem::solve} + // @sect4{LaplaceBeltramiProblem::solve} - // The next function is the one that solves - // the linear system. Here, too, no changes - // are necessary: + // The next function is the one that solves + // the linear system. Here, too, no changes + // are necessary: template void LaplaceBeltramiProblem::solve () { SolverControl solver_control (solution.size(), - 1e-7 * system_rhs.l2_norm()); + 1e-7 * system_rhs.l2_norm()); SolverCG<> cg (solver_control); PreconditionSSOR<> preconditioner; preconditioner.initialize(system_matrix, 1.2); cg.solve (system_matrix, solution, system_rhs, - preconditioner); + preconditioner); } - // @sect4{LaplaceBeltramiProblem::output_result} - - // This is the function that generates - // graphical output from the solution. Most - // of it is boilerplate code, but there are - // two points worth pointing out: - // - // - The DataOut::add_data_vector function - // can take two kinds of vectors: Either - // vectors that have one value per degree - // of freedom defined by the DoFHandler - // object previously attached via - // DataOut::attach_dof_handler; and vectors - // that have one value for each cell of the - // triangulation, for example to output - // estimated errors for each - // cell. Typically, the DataOut class knows - // to tell these two kinds of vectors - // apart: there are almost always more - // degrees of freedom than cells, so we can - // differentiate by the two kinds looking - // at the length of a vector. We could do - // the same here, but only because we got - // lucky: we use a half sphere. If we had - // used the whole sphere as domain and - // $Q_1$ elements, we would have the same - // number of cells as vertices and - // consequently the two kinds of vectors - // would have the same number of - // elements. To avoid the resulting - // confusion, we have to tell the - // DataOut::add_data_vector function which - // kind of vector we have: DoF data. This - // is what the third argument to the - // function does. - // - The DataOut::build_patches function can - // generate output that subdivides each - // cell so that visualization programs can - // resolve curved manifolds or higher - // polynomial degree shape functions - // better. We here subdivide each element - // in each coordinate direction as many - // times as the polynomial degree of the - // finite element in use. + // @sect4{LaplaceBeltramiProblem::output_result} + + // This is the function that generates + // graphical output from the solution. Most + // of it is boilerplate code, but there are + // two points worth pointing out: + // + // - The DataOut::add_data_vector function + // can take two kinds of vectors: Either + // vectors that have one value per degree + // of freedom defined by the DoFHandler + // object previously attached via + // DataOut::attach_dof_handler; and vectors + // that have one value for each cell of the + // triangulation, for example to output + // estimated errors for each + // cell. Typically, the DataOut class knows + // to tell these two kinds of vectors + // apart: there are almost always more + // degrees of freedom than cells, so we can + // differentiate by the two kinds looking + // at the length of a vector. We could do + // the same here, but only because we got + // lucky: we use a half sphere. If we had + // used the whole sphere as domain and + // $Q_1$ elements, we would have the same + // number of cells as vertices and + // consequently the two kinds of vectors + // would have the same number of + // elements. To avoid the resulting + // confusion, we have to tell the + // DataOut::add_data_vector function which + // kind of vector we have: DoF data. This + // is what the third argument to the + // function does. + // - The DataOut::build_patches function can + // generate output that subdivides each + // cell so that visualization programs can + // resolve curved manifolds or higher + // polynomial degree shape functions + // better. We here subdivide each element + // in each coordinate direction as many + // times as the polynomial degree of the + // finite element in use. template void LaplaceBeltramiProblem::output_results () const { DataOut > data_out; data_out.attach_dof_handler (dof_handler); data_out.add_data_vector (solution, - "solution", - DataOut >::type_dof_data); + "solution", + DataOut >::type_dof_data); data_out.build_patches (mapping, - mapping.get_degree()); + mapping.get_degree()); std::string filename ("solution-"); filename += ('0'+spacedim); @@ -587,40 +587,40 @@ namespace Step38 - // @sect4{LaplaceBeltramiProblem::compute_error} + // @sect4{LaplaceBeltramiProblem::compute_error} - // This is the last piece of functionality: - // we want to compute the error in the - // numerical solution. It is a verbatim copy - // of the code previously shown and discussed - // in step-7. As mentioned in the - // introduction, the Solution - // class provides the (tangential) gradient - // of the solution. To avoid evaluating the - // error only a superconvergence points, we - // choose a quadrature rule of sufficiently - // high order. + // This is the last piece of functionality: + // we want to compute the error in the + // numerical solution. It is a verbatim copy + // of the code previously shown and discussed + // in step-7. As mentioned in the + // introduction, the Solution + // class provides the (tangential) gradient + // of the solution. To avoid evaluating the + // error only a superconvergence points, we + // choose a quadrature rule of sufficiently + // high order. template void LaplaceBeltramiProblem::compute_error () const { Vector difference_per_cell (triangulation.n_active_cells()); VectorTools::integrate_difference (mapping, dof_handler, solution, - Solution(), - difference_per_cell, - QGauss(2*fe.degree+1), - VectorTools::H1_norm); + Solution(), + difference_per_cell, + QGauss(2*fe.degree+1), + VectorTools::H1_norm); std::cout << "H1 error = " - << difference_per_cell.l2_norm() - << std::endl; + << difference_per_cell.l2_norm() + << std::endl; } - // @sect4{LaplaceBeltramiProblem::run} + // @sect4{LaplaceBeltramiProblem::run} - // The last function provides the top-level - // logic. Its contents are self-explanatory: + // The last function provides the top-level + // logic. Its contents are self-explanatory: template void LaplaceBeltramiProblem::run () { @@ -635,11 +635,11 @@ namespace Step38 // @sect3{The main() function} - // The remainder of the program is taken up - // by the main() function. It - // follows exactly the general layout first - // introduced in step-6 and used in all - // following tutorial programs: + // The remainder of the program is taken up + // by the main() function. It + // follows exactly the general layout first + // introduced in step-6 and used in all + // following tutorial programs: int main () { try @@ -655,24 +655,24 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-39/step-39.cc b/deal.II/examples/step-39/step-39.cc index d1a1607417..e29c8ff782 100644 --- a/deal.II/examples/step-39/step-39.cc +++ b/deal.II/examples/step-39/step-39.cc @@ -9,11 +9,11 @@ /* to the file deal.II/doc/license.html for the text and */ /* further information on this license. */ - // The include files for the linear - // algebra: A regular SparseMatrix, - // which in turn will include the - // necessary files for - // SparsityPattern and Vector classes. + // The include files for the linear + // algebra: A regular SparseMatrix, + // which in turn will include the + // necessary files for + // SparsityPattern and Vector classes. #include #include #include @@ -21,32 +21,32 @@ #include #include - // Include files for setting up the - // mesh + // Include files for setting up the + // mesh #include #include - // Include files for FiniteElement - // classes and DoFHandler. + // Include files for FiniteElement + // classes and DoFHandler. #include #include #include #include #include - // The include files for using the - // MeshWorker framework + // The include files for using the + // MeshWorker framework #include #include #include #include - // The include file for local - // integrators associated with the - // Laplacian + // The include file for local + // integrators associated with the + // Laplacian #include - // Support for multigrid methods + // Support for multigrid methods #include #include #include @@ -54,9 +54,9 @@ #include #include - // Finally, we take our exact - // solution from the library as well - // as quadrature and additional tools. + // Finally, we take our exact + // solution from the library as well + // as quadrature and additional tools. #include #include #include @@ -65,79 +65,79 @@ #include #include - // All classes of the deal.II library - // are in the namespace dealii. In - // order to save typing, we tell the - // compiler to search names in there - // as well. + // All classes of the deal.II library + // are in the namespace dealii. In + // order to save typing, we tell the + // compiler to search names in there + // as well. namespace Step39 { using namespace dealii; - // This is the function we use to set - // the boundary values and also the - // exact solution we compare to. + // This is the function we use to set + // the boundary values and also the + // exact solution we compare to. Functions::SlitSingularityFunction<2> exact_solution; - // @sect3{The local integrators} - - // MeshWorker separates local - // integration from the loops over - // cells and faces. Thus, we have to - // write local integration classes - // for generating matrices, the right - // hand side and the error - // estimator. - - // All these classes have the same - // three functions for integrating - // over cells, boundary faces and - // interior faces, respectively. All - // the information needed for the - // local integration is provided by - // MeshWorker::IntegrationInfo. Note - // that the signature of the functions cannot - // be changed, because it is expected - // by MeshWorker::integration_loop(). - - // The first class defining local - // integrators is responsible for - // computing cell and face - // matrices. It is used to assemble - // the global matrix as well as the - // level matrices. + // @sect3{The local integrators} + + // MeshWorker separates local + // integration from the loops over + // cells and faces. Thus, we have to + // write local integration classes + // for generating matrices, the right + // hand side and the error + // estimator. + + // All these classes have the same + // three functions for integrating + // over cells, boundary faces and + // interior faces, respectively. All + // the information needed for the + // local integration is provided by + // MeshWorker::IntegrationInfo. Note + // that the signature of the functions cannot + // be changed, because it is expected + // by MeshWorker::integration_loop(). + + // The first class defining local + // integrators is responsible for + // computing cell and face + // matrices. It is used to assemble + // the global matrix as well as the + // level matrices. template class MatrixIntegrator : public Subscriptor { public: static void cell(MeshWorker::DoFInfo& dinfo, - typename MeshWorker::IntegrationInfo& info); + typename MeshWorker::IntegrationInfo& info); static void boundary(MeshWorker::DoFInfo& dinfo, - typename MeshWorker::IntegrationInfo& info); + typename MeshWorker::IntegrationInfo& info); static void face(MeshWorker::DoFInfo& dinfo1, - MeshWorker::DoFInfo& dinfo2, - typename MeshWorker::IntegrationInfo& info1, - typename MeshWorker::IntegrationInfo& info2); + MeshWorker::DoFInfo& dinfo2, + typename MeshWorker::IntegrationInfo& info1, + typename MeshWorker::IntegrationInfo& info2); }; - // On each cell, we integrate the - // Dirichlet form. We use the library - // of ready made integrals in - // LocalIntegrators to avoid writing - // these loops ourselves. Similarly, - // we implement Nitsche boundary - // conditions and the interior - // penalty fluxes between cells. - // - // The boundary und flux terms need a - // penalty parameter, which should be - // adjusted to the cell size and the - // polynomial degree. A safe choice - // of this parameter for constant - // coefficients can be found in - // LocalIntegrators::Laplace::compute_penalty() - // and we use this below. + // On each cell, we integrate the + // Dirichlet form. We use the library + // of ready made integrals in + // LocalIntegrators to avoid writing + // these loops ourselves. Similarly, + // we implement Nitsche boundary + // conditions and the interior + // penalty fluxes between cells. + // + // The boundary und flux terms need a + // penalty parameter, which should be + // adjusted to the cell size and the + // polynomial degree. A safe choice + // of this parameter for constant + // coefficients can be found in + // LocalIntegrators::Laplace::compute_penalty() + // and we use this below. template void MatrixIntegrator::cell( MeshWorker::DoFInfo& dinfo, @@ -158,8 +158,8 @@ namespace Step39 LocalIntegrators::Laplace::compute_penalty(dinfo, dinfo, deg, deg)); } - // Interior faces use the interior - // penalty method + // Interior faces use the interior + // penalty method template void MatrixIntegrator::face( MeshWorker::DoFInfo& dinfo1, @@ -175,12 +175,12 @@ namespace Step39 LocalIntegrators::Laplace::compute_penalty(dinfo1, dinfo2, deg, deg)); } - // The second local integrator builds - // the right hand side. In our - // example, the right hand side - // function is zero, such that only - // the boundary condition is set here - // in weak form. + // The second local integrator builds + // the right hand side. In our + // example, the right hand side + // function is zero, such that only + // the boundary condition is set here + // in weak form. template class RHSIntegrator : public Subscriptor { @@ -188,9 +188,9 @@ namespace Step39 static void cell(MeshWorker::DoFInfo& dinfo, typename MeshWorker::IntegrationInfo& info); static void boundary(MeshWorker::DoFInfo& dinfo, typename MeshWorker::IntegrationInfo& info); static void face(MeshWorker::DoFInfo& dinfo1, - MeshWorker::DoFInfo& dinfo2, - typename MeshWorker::IntegrationInfo& info1, - typename MeshWorker::IntegrationInfo& info2); + MeshWorker::DoFInfo& dinfo2, + typename MeshWorker::IntegrationInfo& info1, + typename MeshWorker::IntegrationInfo& info2); }; @@ -213,25 +213,25 @@ namespace Step39 for (unsigned k=0;k void RHSIntegrator::face(MeshWorker::DoFInfo&, - MeshWorker::DoFInfo&, - typename MeshWorker::IntegrationInfo&, - typename MeshWorker::IntegrationInfo&) + MeshWorker::DoFInfo&, + typename MeshWorker::IntegrationInfo&, + typename MeshWorker::IntegrationInfo&) {} - // The third local integrator is - // responsible for the contributions - // to the error estimate. This is the - // standard energy estimator due to - // Karakashian and Pascal (2003). + // The third local integrator is + // responsible for the contributions + // to the error estimate. This is the + // standard energy estimator due to + // Karakashian and Pascal (2003). template class Estimator : public Subscriptor { @@ -239,16 +239,16 @@ namespace Step39 static void cell(MeshWorker::DoFInfo& dinfo, typename MeshWorker::IntegrationInfo& info); static void boundary(MeshWorker::DoFInfo& dinfo, typename MeshWorker::IntegrationInfo& info); static void face(MeshWorker::DoFInfo& dinfo1, - MeshWorker::DoFInfo& dinfo2, - typename MeshWorker::IntegrationInfo& info1, - typename MeshWorker::IntegrationInfo& info2); + MeshWorker::DoFInfo& dinfo2, + typename MeshWorker::IntegrationInfo& info1, + typename MeshWorker::IntegrationInfo& info2); }; - // The cell contribution is the - // Laplacian of the discrete - // solution, since the right hand - // side is zero. + // The cell contribution is the + // Laplacian of the discrete + // solution, since the right hand + // side is zero. template void Estimator::cell(MeshWorker::DoFInfo& dinfo, typename MeshWorker::IntegrationInfo& info) { @@ -257,18 +257,18 @@ namespace Step39 const std::vector >& DDuh = info.hessians[0][0]; for (unsigned k=0;kdiameter() * trace(DDuh[k]); - dinfo.value(0) += t*t * fe.JxW(k); + const double t = dinfo.cell->diameter() * trace(DDuh[k]); + dinfo.value(0) += t*t * fe.JxW(k); } dinfo.value(0) = std::sqrt(dinfo.value(0)); } - // At the boundary, we use simply a - // weighted form of the boundary - // residual, namely the norm of the - // difference between the finite - // element solution and the correct - // boundary condition. + // At the boundary, we use simply a + // weighted form of the boundary + // residual, namely the norm of the + // difference between the finite + // element solution and the correct + // boundary condition. template void Estimator::boundary(MeshWorker::DoFInfo& dinfo, typename MeshWorker::IntegrationInfo& info) { @@ -284,20 +284,20 @@ namespace Step39 for (unsigned k=0;k void Estimator::face(MeshWorker::DoFInfo& dinfo1, - MeshWorker::DoFInfo& dinfo2, - typename MeshWorker::IntegrationInfo& info1, - typename MeshWorker::IntegrationInfo& info2) + MeshWorker::DoFInfo& dinfo2, + typename MeshWorker::IntegrationInfo& info1, + typename MeshWorker::IntegrationInfo& info2) { const FEValuesBase& fe = info1.fe_values(); const std::vector& uh1 = info1.values[0][0]; @@ -313,43 +313,43 @@ namespace Step39 for (unsigned k=0;k class ErrorIntegrator : public Subscriptor @@ -358,34 +358,34 @@ namespace Step39 static void cell(MeshWorker::DoFInfo& dinfo, typename MeshWorker::IntegrationInfo& info); static void boundary(MeshWorker::DoFInfo& dinfo, typename MeshWorker::IntegrationInfo& info); static void face(MeshWorker::DoFInfo& dinfo1, - MeshWorker::DoFInfo& dinfo2, - typename MeshWorker::IntegrationInfo& info1, - typename MeshWorker::IntegrationInfo& info2); + MeshWorker::DoFInfo& dinfo2, + typename MeshWorker::IntegrationInfo& info1, + typename MeshWorker::IntegrationInfo& info2); }; - // Here we have the integration on - // cells. There is currently no good - // interfce in MeshWorker that would - // allow us to access values of - // regular functions in the - // quadrature points. Thus, we have - // to create the vectors for the - // exact function's values and - // gradients inside the cell - // integrator. After that, everything - // is as before and we just add up - // the squares of the differences. - - // Additionally to computing the error - // in the energy norm, we use the - // capability of the mesh worker to - // compute two functionals at the - // same time and compute the - // L2-error in the - // same loop. Obviously, this one - // does not have any jump terms and - // only appears in the integration on - // cells. + // Here we have the integration on + // cells. There is currently no good + // interfce in MeshWorker that would + // allow us to access values of + // regular functions in the + // quadrature points. Thus, we have + // to create the vectors for the + // exact function's values and + // gradients inside the cell + // integrator. After that, everything + // is as before and we just add up + // the squares of the differences. + + // Additionally to computing the error + // in the energy norm, we use the + // capability of the mesh worker to + // compute two functionals at the + // same time and compute the + // L2-error in the + // same loop. Obviously, this one + // does not have any jump terms and + // only appears in the integration on + // cells. template void ErrorIntegrator::cell( MeshWorker::DoFInfo& dinfo, @@ -403,15 +403,15 @@ namespace Step39 for (unsigned k=0;k class InteriorPenaltyProblem { @@ -497,236 +497,236 @@ namespace Step39 void solve (); void output_results (const unsigned int cycle) const; - // The member objects related to - // the discretization are here. + // The member objects related to + // the discretization are here. Triangulation triangulation; const MappingQ1 mapping; const FiniteElement& fe; MGDoFHandler mg_dof_handler; DoFHandler& dof_handler; - // Then, we have the matrices and - // vectors related to the global - // discrete system. + // Then, we have the matrices and + // vectors related to the global + // discrete system. SparsityPattern sparsity; SparseMatrix matrix; Vector solution; Vector right_hand_side; BlockVector estimates; - // Finally, we have a group of - // sparsity patterns and sparse - // matrices related to the - // multilevel preconditioner. - // First, we have a level matrix - // and its sparsity pattern. + // Finally, we have a group of + // sparsity patterns and sparse + // matrices related to the + // multilevel preconditioner. + // First, we have a level matrix + // and its sparsity pattern. MGLevelObject mg_sparsity; MGLevelObject > mg_matrix; - // When we perform multigrid with - // local smoothing on locally - // refined meshes, additional - // matrices are required; see - // Kanschat (2004). Here is the - // sparsity pattern for these - // edge matrices. We only need - // one, because the pattern of - // the up matrix is the - // transpose of that of the down - // matrix. Actually, we do not - // care too much about these - // details, since the MeshWorker - // is filling these matrices. + // When we perform multigrid with + // local smoothing on locally + // refined meshes, additional + // matrices are required; see + // Kanschat (2004). Here is the + // sparsity pattern for these + // edge matrices. We only need + // one, because the pattern of + // the up matrix is the + // transpose of that of the down + // matrix. Actually, we do not + // care too much about these + // details, since the MeshWorker + // is filling these matrices. MGLevelObject mg_sparsity_dg_interface; - // The flux matrix at the - // refinement edge, coupling fine - // level degrees of freedom to - // coarse level. + // The flux matrix at the + // refinement edge, coupling fine + // level degrees of freedom to + // coarse level. MGLevelObject > mg_matrix_dg_down; - // The transpose of the flux - // matrix at the refinement edge, - // coupling coarse level degrees - // of freedom to fine level. + // The transpose of the flux + // matrix at the refinement edge, + // coupling coarse level degrees + // of freedom to fine level. MGLevelObject > mg_matrix_dg_up; }; - // The constructor simply sets up the - // coarse grid and the - // DoFHandler. The FiniteElement is - // provided as a parameter to allow - // flexibility. + // The constructor simply sets up the + // coarse grid and the + // DoFHandler. The FiniteElement is + // provided as a parameter to allow + // flexibility. template InteriorPenaltyProblem::InteriorPenaltyProblem(const FiniteElement& fe) - : - mapping(), - fe(fe), - mg_dof_handler(triangulation), - dof_handler(mg_dof_handler), - estimates(1) + : + mapping(), + fe(fe), + mg_dof_handler(triangulation), + dof_handler(mg_dof_handler), + estimates(1) { GridGenerator::hyper_cube_slit(triangulation, -1, 1); } - // In this function, we set up the - // dimension of the linear system and - // the sparsity patterns for the - // global matrix as well as the level - // matrices. + // In this function, we set up the + // dimension of the linear system and + // the sparsity patterns for the + // global matrix as well as the level + // matrices. template void InteriorPenaltyProblem::setup_system() { - // First, we use the finite element - // to distribute degrees of - // freedom over the mesh and number - // them. + // First, we use the finite element + // to distribute degrees of + // freedom over the mesh and number + // them. dof_handler.distribute_dofs(fe); unsigned int n_dofs = dof_handler.n_dofs(); - // Then, we already know the size - // of the vectors representing - // finite element functions. + // Then, we already know the size + // of the vectors representing + // finite element functions. solution.reinit(n_dofs); right_hand_side.reinit(n_dofs); - // Next, we set up the sparsity - // pattern for the global - // matrix. Since we do not know the - // row sizes in advance, we first - // fill a temporary - // CompressedSparsityPattern object - // and copy it to the regular - // SparsityPattern once it is - // complete. + // Next, we set up the sparsity + // pattern for the global + // matrix. Since we do not know the + // row sizes in advance, we first + // fill a temporary + // CompressedSparsityPattern object + // and copy it to the regular + // SparsityPattern once it is + // complete. CompressedSparsityPattern c_sparsity(n_dofs); DoFTools::make_flux_sparsity_pattern(dof_handler, c_sparsity); sparsity.copy_from(c_sparsity); matrix.reinit(sparsity); const unsigned int n_levels = triangulation.n_levels(); - // The global system is set up, now - // we attend to the level - // matrices. We resize all matrix - // objects to hold one matrix per level. + // The global system is set up, now + // we attend to the level + // matrices. We resize all matrix + // objects to hold one matrix per level. mg_matrix.resize(0, n_levels-1); mg_matrix.clear(); mg_matrix_dg_up.resize(0, n_levels-1); mg_matrix_dg_up.clear(); mg_matrix_dg_down.resize(0, n_levels-1); mg_matrix_dg_down.clear(); - // It is important to update the - // sparsity patterns after - // clear() was called for - // the level matrices, since the - // matrices lock the sparsity - // pattern through the Smartpointer - // ans Subscriptor mechanism. + // It is important to update the + // sparsity patterns after + // clear() was called for + // the level matrices, since the + // matrices lock the sparsity + // pattern through the Smartpointer + // ans Subscriptor mechanism. mg_sparsity.resize(0, n_levels-1); mg_sparsity_dg_interface.resize(0, n_levels-1); - // Now all objects are prepared to - // hold one sparsity pattern or - // matrix per level. What's left is - // setting up the sparsity patterns - // on each level. + // Now all objects are prepared to + // hold one sparsity pattern or + // matrix per level. What's left is + // setting up the sparsity patterns + // on each level. for (unsigned int level=mg_sparsity.get_minlevel(); - level<=mg_sparsity.get_maxlevel();++level) + level<=mg_sparsity.get_maxlevel();++level) { - // These are roughly the same - // lines as above for the - // global matrix, now for each - // level. - CompressedSparsityPattern c_sparsity(mg_dof_handler.n_dofs(level)); - MGTools::make_flux_sparsity_pattern(mg_dof_handler, c_sparsity, level); - mg_sparsity[level].copy_from(c_sparsity); - mg_matrix[level].reinit(mg_sparsity[level]); - - // Additionally, we need to - // initialize the transfer - // matrices at the refinement - // edge between levels. They - // are stored at the index - // referring to the finer of - // the two indices, thus there - // is no such object on level - // 0. - if (level>0) - { - CompressedSparsityPattern ci_sparsity; - ci_sparsity.reinit(mg_dof_handler.n_dofs(level-1), mg_dof_handler.n_dofs(level)); - MGTools::make_flux_sparsity_pattern_edge(mg_dof_handler, ci_sparsity, level); - mg_sparsity_dg_interface[level].copy_from(ci_sparsity); - mg_matrix_dg_up[level].reinit(mg_sparsity_dg_interface[level]); - mg_matrix_dg_down[level].reinit(mg_sparsity_dg_interface[level]); - } + // These are roughly the same + // lines as above for the + // global matrix, now for each + // level. + CompressedSparsityPattern c_sparsity(mg_dof_handler.n_dofs(level)); + MGTools::make_flux_sparsity_pattern(mg_dof_handler, c_sparsity, level); + mg_sparsity[level].copy_from(c_sparsity); + mg_matrix[level].reinit(mg_sparsity[level]); + + // Additionally, we need to + // initialize the transfer + // matrices at the refinement + // edge between levels. They + // are stored at the index + // referring to the finer of + // the two indices, thus there + // is no such object on level + // 0. + if (level>0) + { + CompressedSparsityPattern ci_sparsity; + ci_sparsity.reinit(mg_dof_handler.n_dofs(level-1), mg_dof_handler.n_dofs(level)); + MGTools::make_flux_sparsity_pattern_edge(mg_dof_handler, ci_sparsity, level); + mg_sparsity_dg_interface[level].copy_from(ci_sparsity); + mg_matrix_dg_up[level].reinit(mg_sparsity_dg_interface[level]); + mg_matrix_dg_down[level].reinit(mg_sparsity_dg_interface[level]); + } } } - // In this function, we assemble the - // global system matrix, where by - // global we indicate that this is - // the matrix of the discrete system - // we solve and it is covering the - // whole mesh. + // In this function, we assemble the + // global system matrix, where by + // global we indicate that this is + // the matrix of the discrete system + // we solve and it is covering the + // whole mesh. template void InteriorPenaltyProblem::assemble_matrix() { - // First, we need t set up the - // object providing the values we - // integrate. This object contains - // all FEValues and FEFaceValues - // objects needed and also - // maintains them automatically - // such that they always point to - // the current cell. To this end, - // we need to tell it first, where - // and what to compute. Since we - // are not doing anything fancy, we - // can rely on their standard - // choice for quadrature rules. - // - // Since their default update flags - // are minimal, we add what we need - // additionally, namely the values - // and gradients of shape functions - // on all objects (cells, boundary - // and interior faces). Afterwards, - // we are ready to initialize the - // container, which will create all - // necessary FEValuesBase objects - // for integration. + // First, we need t set up the + // object providing the values we + // integrate. This object contains + // all FEValues and FEFaceValues + // objects needed and also + // maintains them automatically + // such that they always point to + // the current cell. To this end, + // we need to tell it first, where + // and what to compute. Since we + // are not doing anything fancy, we + // can rely on their standard + // choice for quadrature rules. + // + // Since their default update flags + // are minimal, we add what we need + // additionally, namely the values + // and gradients of shape functions + // on all objects (cells, boundary + // and interior faces). Afterwards, + // we are ready to initialize the + // container, which will create all + // necessary FEValuesBase objects + // for integration. MeshWorker::IntegrationInfoBox info_box; UpdateFlags update_flags = update_values | update_gradients; info_box.add_update_flags_all(update_flags); info_box.initialize(fe, mapping); - // This is the object into which we - // integrate local data. It is - // filled by the local integration - // routines in MatrixIntegrator and - // then used by the assembler to - // distribute the information into - // the global matrix. + // This is the object into which we + // integrate local data. It is + // filled by the local integration + // routines in MatrixIntegrator and + // then used by the assembler to + // distribute the information into + // the global matrix. MeshWorker::DoFInfo dof_info(dof_handler); - // Finally, we need an object that - // assembles the local matrix into - // the global matrix. + // Finally, we need an object that + // assembles the local matrix into + // the global matrix. MeshWorker::Assembler::MatrixSimple > assembler; assembler.initialize(matrix); - // Now, we throw everything into a - // MeshWorker::loop(), which here - // traverses all active cells of - // the mesh, computes cell and face - // matrices and assembles them into - // the global matrix. We use the - // variable dof_handler - // here in order to use the global - // numbering of degrees of freedom. + // Now, we throw everything into a + // MeshWorker::loop(), which here + // traverses all active cells of + // the mesh, computes cell and face + // matrices and assembles them into + // the global matrix. We use the + // variable dof_handler + // here in order to use the global + // numbering of degrees of freedom. MeshWorker::integration_loop( dof_handler.begin_active(), dof_handler.end(), dof_info, info_box, @@ -737,11 +737,11 @@ namespace Step39 } - // Now, we do the same for the level - // matrices. Not too surprisingly, - // this function looks like a twin of - // the previous one. Indeed, there - // are only two minor differences. + // Now, we do the same for the level + // matrices. Not too surprisingly, + // this function looks like a twin of + // the previous one. Indeed, there + // are only two minor differences. template void InteriorPenaltyProblem::assemble_mg_matrix() @@ -753,23 +753,23 @@ namespace Step39 MeshWorker::DoFInfo dof_info(mg_dof_handler); - // Obviously, the assembler needs - // to be replaced by one filling - // level matrices. Note that it - // automatically fills the edge - // matrices as well. + // Obviously, the assembler needs + // to be replaced by one filling + // level matrices. Note that it + // automatically fills the edge + // matrices as well. MeshWorker::Assembler::MGMatrixSimple > assembler; assembler.initialize(mg_matrix); assembler.initialize_fluxes(mg_matrix_dg_up, mg_matrix_dg_down); - // Here is the other difference to - // the previous function: we run - // over all cells, not only the - // active ones. And we use - // mg_dof_handler, since - // we need the degrees of freedom - // on each level, not the global - // numbering. + // Here is the other difference to + // the previous function: we run + // over all cells, not only the + // active ones. And we use + // mg_dof_handler, since + // we need the degrees of freedom + // on each level, not the global + // numbering. MeshWorker::integration_loop ( mg_dof_handler.begin(), mg_dof_handler.end(), dof_info, info_box, @@ -780,11 +780,11 @@ namespace Step39 } - // Here we have another clone of the - // assemble function. The difference - // to assembling the system matrix - // consists in that we assemble a - // vector here. + // Here we have another clone of the + // assemble function. The difference + // to assembling the system matrix + // consists in that we assemble a + // vector here. template void InteriorPenaltyProblem::assemble_right_hand_side() @@ -796,16 +796,16 @@ namespace Step39 MeshWorker::DoFInfo dof_info(dof_handler); - // Since this assembler alows us to - // fill several vectors, the - // interface is a little more - // complicated as above. The - // pointers to the vectors have to - // be stored in a NamedData - // object. While this seems to - // cause two extra lines of code - // here, it actually comes handy in - // more complex applications. + // Since this assembler alows us to + // fill several vectors, the + // interface is a little more + // complicated as above. The + // pointers to the vectors have to + // be stored in a NamedData + // object. While this seems to + // cause two extra lines of code + // here, it actually comes handy in + // more complex applications. MeshWorker::Assembler::ResidualSimple > assembler; NamedData* > data; Vector* rhs = &right_hand_side; @@ -824,43 +824,43 @@ namespace Step39 } - // Now that we have coded all - // functions building the discrete - // linear system, it is about time - // that we actually solve it. + // Now that we have coded all + // functions building the discrete + // linear system, it is about time + // that we actually solve it. template void InteriorPenaltyProblem::solve() { - // The solver of choice is - // conjugate gradient. + // The solver of choice is + // conjugate gradient. SolverControl control(1000, 1.e-12); SolverCG > solver(control); - // Now we are setting up the - // components of the multilevel - // preconditioner. First, we need - // transfer between grid - // levels. The object we are using - // here generates sparse matrices - // for these transfers. + // Now we are setting up the + // components of the multilevel + // preconditioner. First, we need + // transfer between grid + // levels. The object we are using + // here generates sparse matrices + // for these transfers. MGTransferPrebuilt > mg_transfer; mg_transfer.build_matrices(mg_dof_handler); - // Then, we need an exact solver - // for the matrix on the coarsest - // level. + // Then, we need an exact solver + // for the matrix on the coarsest + // level. FullMatrix coarse_matrix; coarse_matrix.copy_from (mg_matrix[0]); MGCoarseGridHouseholder > mg_coarse; mg_coarse.initialize(coarse_matrix); - // While transfer and coarse grid - // solver are pretty much generic, - // more flexibility is offered for - // the smoother. First, we choose - // Gauss-Seidel as our smoothing - // method. + // While transfer and coarse grid + // solver are pretty much generic, + // more flexibility is offered for + // the smoother. First, we choose + // Gauss-Seidel as our smoothing + // method. GrowingVectorMemory > mem; typedef PreconditionSOR > RELAXATION; MGSmootherRelaxation, RELAXATION, Vector > @@ -868,73 +868,73 @@ namespace Step39 RELAXATION::AdditionalData smoother_data(1.); mg_smoother.initialize(mg_matrix, smoother_data); - // Do two smoothing steps on each - // level. + // Do two smoothing steps on each + // level. mg_smoother.set_steps(2); - // Since the SOR method is not - // symmetric, but we use conjugate - // gradient iteration below, here - // is a trick to make the - // multilevel preconditioner a - // symmetric operator even for - // nonsymmetric smoothers. + // Since the SOR method is not + // symmetric, but we use conjugate + // gradient iteration below, here + // is a trick to make the + // multilevel preconditioner a + // symmetric operator even for + // nonsymmetric smoothers. mg_smoother.set_symmetric(true); - // The smoother class optionally - // implements the variable V-cycle, - // which we do not want here. + // The smoother class optionally + // implements the variable V-cycle, + // which we do not want here. mg_smoother.set_variable(false); - // Finally, we must wrap our - // matrices in an object having the - // required multiplication - // functions. + // Finally, we must wrap our + // matrices in an object having the + // required multiplication + // functions. MGMatrix, Vector > mgmatrix(&mg_matrix); MGMatrix, Vector > mgdown(&mg_matrix_dg_down); MGMatrix, Vector > mgup(&mg_matrix_dg_up); - // Now, we are ready to set up the - // V-cycle operator and the - // multilevel preconditioner. + // Now, we are ready to set up the + // V-cycle operator and the + // multilevel preconditioner. Multigrid > mg(mg_dof_handler, mgmatrix, - mg_coarse, mg_transfer, - mg_smoother, mg_smoother); - // Let us not forget the edge - // matrices needed because of the - // adaptive refinement. + mg_coarse, mg_transfer, + mg_smoother, mg_smoother); + // Let us not forget the edge + // matrices needed because of the + // adaptive refinement. mg.set_edge_flux_matrices(mgdown, mgup); - // After all preparations, wrap the - // Multigrid object into another - // object, which can be used as a - // regular preconditioner, + // After all preparations, wrap the + // Multigrid object into another + // object, which can be used as a + // regular preconditioner, PreconditionMG, - MGTransferPrebuilt > > + MGTransferPrebuilt > > preconditioner(mg_dof_handler, mg, mg_transfer); - // and use it to solve the system. + // and use it to solve the system. solver.solve(matrix, solution, right_hand_side, preconditioner); } - // Another clone of the assemble - // function. The big difference to - // the previous ones is here that we - // also have an input vector. + // Another clone of the assemble + // function. The big difference to + // the previous ones is here that we + // also have an input vector. template double InteriorPenaltyProblem::estimate() { - // The results of the estimator are - // stored in a vector with one - // entry per cell. Since cells in - // deal.II are not numbered, we - // have to create our own numbering - // in order to use this vector. - // - // On the other hand, somebody - // might have used the user indices - // already. So, let's be good - // citizens and save them before - // tampering with them. + // The results of the estimator are + // stored in a vector with one + // entry per cell. Since cells in + // deal.II are not numbered, we + // have to create our own numbering + // in order to use this vector. + // + // On the other hand, somebody + // might have used the user indices + // already. So, let's be good + // citizens and save them before + // tampering with them. std::vector old_user_indices; triangulation.save_user_indices(old_user_indices); @@ -944,53 +944,53 @@ InteriorPenaltyProblem::estimate() cell != triangulation.end();++cell,++i) cell->set_user_index(i); - // This starts like before, + // This starts like before, MeshWorker::IntegrationInfoBox info_box; const unsigned int n_gauss_points = dof_handler.get_fe().tensor_degree()+1; info_box.initialize_gauss_quadrature(n_gauss_points, n_gauss_points+1, n_gauss_points); - // but now we need to notify the - // info box of the finite element - // functio we want to evaluate in - // the quadrature points. First, we - // create a NamedData object with - // this vector, which is the - // solution we just computed. + // but now we need to notify the + // info box of the finite element + // functio we want to evaluate in + // the quadrature points. First, we + // create a NamedData object with + // this vector, which is the + // solution we just computed. NamedData* > solution_data; solution_data.add(&solution, "solution"); - // Then, we tell the Meshworker::VectorSelector - // for cells, that we need the - // second derivatives of this - // solution (to compute the - // Laplacian). Therefore, the - // boolean arguments selecting - // function values and first - // derivatives a false, only the - // last one selecting second - // derivatives is true. + // Then, we tell the Meshworker::VectorSelector + // for cells, that we need the + // second derivatives of this + // solution (to compute the + // Laplacian). Therefore, the + // boolean arguments selecting + // function values and first + // derivatives a false, only the + // last one selecting second + // derivatives is true. info_box.cell_selector.add("solution", false, false, true); - // On interior and boundary faces, - // we need the function values and - // the first derivatives, but not - // second derivatives. + // On interior and boundary faces, + // we need the function values and + // the first derivatives, but not + // second derivatives. info_box.boundary_selector.add("solution", true, true, false); info_box.face_selector.add("solution", true, true, false); - // And we continue as before, with - // the exception that the default - // update flags are already - // adjusted to the values and - // derivatives we requested above. + // And we continue as before, with + // the exception that the default + // update flags are already + // adjusted to the values and + // derivatives we requested above. info_box.add_update_flags_boundary(update_quadrature_points); info_box.initialize(fe, mapping, solution_data); MeshWorker::DoFInfo dof_info(dof_handler); - // The assembler stores one number - // per cell, but else this is the - // same as in the computation of - // the right hand side. + // The assembler stores one number + // per cell, but else this is the + // same as in the computation of + // the right hand side. MeshWorker::Assembler::CellsAndFaces assembler; NamedData* > out_data; BlockVector* est = &estimates; @@ -1005,26 +1005,26 @@ InteriorPenaltyProblem::estimate() &Estimator::face, assembler); - // Right before we return the - // result of the error estimate, we - // restore the old user indices. + // Right before we return the + // result of the error estimate, we + // restore the old user indices. triangulation.load_user_indices(old_user_indices); return estimates.block(0).l2_norm(); } - // Here we compare our finite element - // solution with the (known) exact - // solution and compute the mean - // quadratic error of the gradient - // and the function itself. This - // function is a clone of the - // estimation function right above. - - // Since we compute the error in the - // energy and the - // L2-norm, - // respectively, our block vector - // needs two blocks here. + // Here we compare our finite element + // solution with the (known) exact + // solution and compute the mean + // quadratic error of the gradient + // and the function itself. This + // function is a clone of the + // estimation function right above. + + // Since we compute the error in the + // energy and the + // L2-norm, + // respectively, our block vector + // needs two blocks here. template void InteriorPenaltyProblem::error() @@ -1073,19 +1073,19 @@ InteriorPenaltyProblem::error() } - // Some graphical output + // Some graphical output template void InteriorPenaltyProblem::output_results (const unsigned int cycle) const { - // Output of the solution in - // gnuplot format. + // Output of the solution in + // gnuplot format. char * fn = new char[100]; sprintf(fn, "sol-%02d", cycle); std::string filename(fn); filename += ".gnuplot"; deallog << "Writing solution to <" << filename << ">..." - << std::endl << std::endl; + << std::endl << std::endl; std::ofstream gnuplot_output (filename.c_str()); DataOut data_out; @@ -1098,9 +1098,9 @@ void InteriorPenaltyProblem::output_results (const unsigned int cycle) cons data_out.write_gnuplot(gnuplot_output); } - // And finally the adaptive loop, - // more or less like in previous - // examples. + // And finally the adaptive loop, + // more or less like in previous + // examples. template void InteriorPenaltyProblem::run(unsigned int n_steps) @@ -1110,23 +1110,23 @@ InteriorPenaltyProblem::run(unsigned int n_steps) { deallog << "Step " << s << std::endl; if (estimates.block(0).size() == 0) - triangulation.refine_global(1); + triangulation.refine_global(1); else - { - GridRefinement::refine_and_coarsen_fixed_fraction (triangulation, - estimates.block(0), - 0.5, 0.0); - triangulation.execute_coarsening_and_refinement (); - } + { + GridRefinement::refine_and_coarsen_fixed_fraction (triangulation, + estimates.block(0), + 0.5, 0.0); + triangulation.execute_coarsening_and_refinement (); + } deallog << "Triangulation " - << triangulation.n_active_cells() << " cells, " - << triangulation.n_levels() << " levels" << std::endl; + << triangulation.n_active_cells() << " cells, " + << triangulation.n_levels() << " levels" << std::endl; setup_system(); deallog << "DoFHandler " << dof_handler.n_dofs() << " dofs, level dofs"; for (unsigned int l=0;l #include #include @@ -40,39 +40,39 @@ #include #include - // This is new, however: in the previous - // example we got some unwanted output from - // the linear solvers. If we want to suppress - // it, we have to include this file and add a - // single line somewhere to the program (see - // the main() function below for that): + // This is new, however: in the previous + // example we got some unwanted output from + // the linear solvers. If we want to suppress + // it, we have to include this file and add a + // single line somewhere to the program (see + // the main() function below for that): #include - // The final step, as in previous - // programs, is to import all the - // deal.II class and function names - // into the global namespace: + // The final step, as in previous + // programs, is to import all the + // deal.II class and function names + // into the global namespace: using namespace dealii; // @sect3{The Step4 class template} - // This is again the same - // Step4 class as in the - // previous example. The only - // difference is that we have now - // declared it as a class with a - // template parameter, and the - // template parameter is of course - // the spatial dimension in which we - // would like to solve the Laplace - // equation. Of course, several of - // the member variables depend on - // this dimension as well, in - // particular the Triangulation - // class, which has to represent - // quadrilaterals or hexahedra, - // respectively. Apart from this, - // everything is as before. + // This is again the same + // Step4 class as in the + // previous example. The only + // difference is that we have now + // declared it as a class with a + // template parameter, and the + // template parameter is of course + // the spatial dimension in which we + // would like to solve the Laplace + // equation. Of course, several of + // the member variables depend on + // this dimension as well, in + // particular the Triangulation + // class, which has to represent + // quadrilaterals or hexahedra, + // respectively. Apart from this, + // everything is as before. template class Step4 { @@ -101,55 +101,55 @@ class Step4 // @sect3{Right hand side and boundary values} - // In the following, we declare two more - // classes denoting the right hand side and - // the non-homogeneous Dirichlet boundary - // values. Both are functions of a - // dim-dimensional space variable, so we - // declare them as templates as well. - // - // Each of these classes is derived from a - // common, abstract base class Function, - // which declares the common interface which - // all functions have to follow. In - // particular, concrete classes have to - // overload the value function, - // which takes a point in dim-dimensional - // space as parameters and shall return the - // value at that point as a - // double variable. - // - // The value function takes a - // second argument, which we have here named - // component: This is only meant - // for vector valued functions, where you may - // want to access a certain component of the - // vector at the point - // p. However, our functions are - // scalar, so we need not worry about this - // parameter and we will not use it in the - // implementation of the functions. Inside - // the library's header files, the Function - // base class's declaration of the - // value function has a default - // value of zero for the component, so we - // will access the value - // function of the right hand side with only - // one parameter, namely the point where we - // want to evaluate the function. A value for - // the component can then simply be omitted - // for scalar functions. - // - // Note that the C++ language forces - // us to declare and define a - // constructor to the following - // classes even though they are - // empty. This is due to the fact - // that the base class has no default - // constructor (i.e. one without - // arguments), even though it has a - // constructor which has default - // values for all arguments. + // In the following, we declare two more + // classes denoting the right hand side and + // the non-homogeneous Dirichlet boundary + // values. Both are functions of a + // dim-dimensional space variable, so we + // declare them as templates as well. + // + // Each of these classes is derived from a + // common, abstract base class Function, + // which declares the common interface which + // all functions have to follow. In + // particular, concrete classes have to + // overload the value function, + // which takes a point in dim-dimensional + // space as parameters and shall return the + // value at that point as a + // double variable. + // + // The value function takes a + // second argument, which we have here named + // component: This is only meant + // for vector valued functions, where you may + // want to access a certain component of the + // vector at the point + // p. However, our functions are + // scalar, so we need not worry about this + // parameter and we will not use it in the + // implementation of the functions. Inside + // the library's header files, the Function + // base class's declaration of the + // value function has a default + // value of zero for the component, so we + // will access the value + // function of the right hand side with only + // one parameter, namely the point where we + // want to evaluate the function. A value for + // the component can then simply be omitted + // for scalar functions. + // + // Note that the C++ language forces + // us to declare and define a + // constructor to the following + // classes even though they are + // empty. This is due to the fact + // that the base class has no default + // constructor (i.e. one without + // arguments), even though it has a + // constructor which has default + // values for all arguments. template class RightHandSide : public Function { @@ -157,7 +157,7 @@ class RightHandSide : public Function RightHandSide () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; @@ -169,48 +169,48 @@ class BoundaryValues : public Function BoundaryValues () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; - // For this example, we choose as right hand - // side function to function $4(x^4+y^4)$ in - // 2D, or $4(x^4+y^4+z^4)$ in 3D. We could - // write this distinction using an - // if-statement on the space dimension, but - // here is a simple way that also allows us - // to use the same function in 1D (or in 4D, - // if you should desire to do so), by using a - // short loop. Fortunately, the compiler - // knows the size of the loop at compile time - // (remember that at the time when you define - // the template, the compiler doesn't know - // the value of dim, but when it later - // encounters a statement or declaration - // RightHandSide@<2@>, it will take the - // template, replace all occurrences of dim - // by 2 and compile the resulting function); - // in other words, at the time of compiling - // this function, the number of times the - // body will be executed is known, and the - // compiler can optimize away the overhead - // needed for the loop and the result will be - // as fast as if we had used the formulas - // above right away. - // - // The last thing to note is that a - // Point@ denotes a point in - // dim-dimensionsal space, and its individual - // components (i.e. $x$, $y$, - // ... coordinates) can be accessed using the - // () operator (in fact, the [] operator will - // work just as well) with indices starting - // at zero as usual in C and C++. + // For this example, we choose as right hand + // side function to function $4(x^4+y^4)$ in + // 2D, or $4(x^4+y^4+z^4)$ in 3D. We could + // write this distinction using an + // if-statement on the space dimension, but + // here is a simple way that also allows us + // to use the same function in 1D (or in 4D, + // if you should desire to do so), by using a + // short loop. Fortunately, the compiler + // knows the size of the loop at compile time + // (remember that at the time when you define + // the template, the compiler doesn't know + // the value of dim, but when it later + // encounters a statement or declaration + // RightHandSide@<2@>, it will take the + // template, replace all occurrences of dim + // by 2 and compile the resulting function); + // in other words, at the time of compiling + // this function, the number of times the + // body will be executed is known, and the + // compiler can optimize away the overhead + // needed for the loop and the result will be + // as fast as if we had used the formulas + // above right away. + // + // The last thing to note is that a + // Point@ denotes a point in + // dim-dimensionsal space, and its individual + // components (i.e. $x$, $y$, + // ... coordinates) can be accessed using the + // () operator (in fact, the [] operator will + // work just as well) with indices starting + // at zero as usual in C and C++. template double RightHandSide::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { double return_value = 0; for (unsigned int i=0; i::value (const Point &p, } - // As boundary values, we choose x*x+y*y in - // 2D, and x*x+y*y+z*z in 3D. This happens to - // be equal to the square of the vector from - // the origin to the point at which we would - // like to evaluate the function, - // irrespective of the dimension. So that is - // what we return: + // As boundary values, we choose x*x+y*y in + // 2D, and x*x+y*y+z*z in 3D. This happens to + // be equal to the square of the vector from + // the origin to the point at which we would + // like to evaluate the function, + // irrespective of the dimension. So that is + // what we return: template double BoundaryValues::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { return p.square(); } @@ -281,39 +281,39 @@ double BoundaryValues::value (const Point &p, // @sect4{Step4::Step4} - // After this introduction, here is the - // constructor of the Step4 - // class. It specifies the desired polynomial - // degree of the finite elements and - // associates the DoFHandler to the - // triangulation just as in the previous - // example program, step-3: + // After this introduction, here is the + // constructor of the Step4 + // class. It specifies the desired polynomial + // degree of the finite elements and + // associates the DoFHandler to the + // triangulation just as in the previous + // example program, step-3: template Step4::Step4 () - : + : fe (1), - dof_handler (triangulation) + dof_handler (triangulation) {} // @sect4{Step4::make_grid} - // Grid creation is something inherently - // dimension dependent. However, as long as - // the domains are sufficiently similar in 2D - // or 3D, the library can abstract for - // you. In our case, we would like to again - // solve on the square $[-1,1]\times [-1,1]$ - // in 2D, or on the cube $[-1,1] \times - // [-1,1] \times [-1,1]$ in 3D; both can be - // termed GridGenerator::hyper_cube(), so we may - // use the same function in whatever - // dimension we are. Of course, the functions - // that create a hypercube in two and three - // dimensions are very much different, but - // that is something you need not care - // about. Let the library handle the - // difficult things. + // Grid creation is something inherently + // dimension dependent. However, as long as + // the domains are sufficiently similar in 2D + // or 3D, the library can abstract for + // you. In our case, we would like to again + // solve on the square $[-1,1]\times [-1,1]$ + // in 2D, or on the cube $[-1,1] \times + // [-1,1] \times [-1,1]$ in 3D; both can be + // termed GridGenerator::hyper_cube(), so we may + // use the same function in whatever + // dimension we are. Of course, the functions + // that create a hypercube in two and three + // dimensions are very much different, but + // that is something you need not care + // about. Let the library handle the + // difficult things. template void Step4::make_grid () { @@ -321,32 +321,32 @@ void Step4::make_grid () triangulation.refine_global (4); std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << " Total number of cells: " - << triangulation.n_cells() - << std::endl; + << triangulation.n_active_cells() + << std::endl + << " Total number of cells: " + << triangulation.n_cells() + << std::endl; } // @sect4{Step4::setup_system} - // This function looks - // exactly like in the previous example, - // although it performs actions that in their - // details are quite different if - // dim happens to be 3. The only - // significant difference from a user's - // perspective is the number of cells - // resulting, which is much higher in three - // than in two space dimensions! + // This function looks + // exactly like in the previous example, + // although it performs actions that in their + // details are quite different if + // dim happens to be 3. The only + // significant difference from a user's + // perspective is the number of cells + // resulting, which is much higher in three + // than in two space dimensions! template void Step4::setup_system () { dof_handler.distribute_dofs (fe); std::cout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl; + << dof_handler.n_dofs() + << std::endl; CompressedSparsityPattern c_sparsity(dof_handler.n_dofs()); DoFTools::make_sparsity_pattern (dof_handler, c_sparsity); @@ -361,76 +361,76 @@ void Step4::setup_system () // @sect4{Step4::assemble_system} - // Unlike in the previous example, we - // would now like to use a - // non-constant right hand side - // function and non-zero boundary - // values. Both are tasks that are - // readily achieved with a only a few - // new lines of code in the - // assemblage of the matrix and right - // hand side. - // - // More interesting, though, is the - // way we assemble matrix and right - // hand side vector dimension - // independently: there is simply no - // difference to the - // two-dimensional case. Since the - // important objects used in this - // function (quadrature formula, - // FEValues) depend on the dimension - // by way of a template parameter as - // well, they can take care of - // setting up properly everything for - // the dimension for which this - // function is compiled. By declaring - // all classes which might depend on - // the dimension using a template - // parameter, the library can make - // nearly all work for you and you - // don't have to care about most - // things. + // Unlike in the previous example, we + // would now like to use a + // non-constant right hand side + // function and non-zero boundary + // values. Both are tasks that are + // readily achieved with a only a few + // new lines of code in the + // assemblage of the matrix and right + // hand side. + // + // More interesting, though, is the + // way we assemble matrix and right + // hand side vector dimension + // independently: there is simply no + // difference to the + // two-dimensional case. Since the + // important objects used in this + // function (quadrature formula, + // FEValues) depend on the dimension + // by way of a template parameter as + // well, they can take care of + // setting up properly everything for + // the dimension for which this + // function is compiled. By declaring + // all classes which might depend on + // the dimension using a template + // parameter, the library can make + // nearly all work for you and you + // don't have to care about most + // things. template void Step4::assemble_system () { QGauss quadrature_formula(2); - // We wanted to have a non-constant right - // hand side, so we use an object of the - // class declared above to generate the - // necessary data. Since this right hand - // side object is only used locally in the - // present function, we declare it here as - // a local variable: + // We wanted to have a non-constant right + // hand side, so we use an object of the + // class declared above to generate the + // necessary data. Since this right hand + // side object is only used locally in the + // present function, we declare it here as + // a local variable: const RightHandSide right_hand_side; - // Compared to the previous example, in - // order to evaluate the non-constant right - // hand side function we now also need the - // quadrature points on the cell we are - // presently on (previously, we only - // required values and gradients of the - // shape function from the - // FEValues object, as well as - // the quadrature weights, - // FEValues::JxW() ). We can tell the - // FEValues object to do for - // us by also giving it the - // #update_quadrature_points - // flag: + // Compared to the previous example, in + // order to evaluate the non-constant right + // hand side function we now also need the + // quadrature points on the cell we are + // presently on (previously, we only + // required values and gradients of the + // shape function from the + // FEValues object, as well as + // the quadrature weights, + // FEValues::JxW() ). We can tell the + // FEValues object to do for + // us by also giving it the + // #update_quadrature_points + // flag: FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | + update_values | update_gradients | update_quadrature_points | update_JxW_values); - // We then again define a few - // abbreviations. The values of these - // variables of course depend on the - // dimension which we are presently - // using. However, the FE and Quadrature - // classes do all the necessary work for - // you and you don't have to care about the - // dimension dependent parts: + // We then again define a few + // abbreviations. The values of these + // variables of course depend on the + // dimension which we are presently + // using. However, the FE and Quadrature + // classes do all the necessary work for + // you and you don't have to care about the + // dimension dependent parts: const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -440,18 +440,18 @@ void Step4::assemble_system () std::vector local_dof_indices (dofs_per_cell); // Next, we again have to loop over all - // cells and assemble local contributions. - // Note, that a cell is a quadrilateral in - // two space dimensions, but a hexahedron - // in 3D. In fact, the - // active_cell_iterator data - // type is something different, depending - // on the dimension we are in, but to the - // outside world they look alike and you - // will probably never see a difference - // although the classes that this typedef - // stands for are in fact completely - // unrelated: + // cells and assemble local contributions. + // Note, that a cell is a quadrilateral in + // two space dimensions, but a hexahedron + // in 3D. In fact, the + // active_cell_iterator data + // type is something different, depending + // on the dimension we are in, but to the + // outside world they look alike and you + // will probably never see a difference + // although the classes that this typedef + // stands for are in fact completely + // unrelated: typename DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), endc = dof_handler.end(); @@ -462,19 +462,19 @@ void Step4::assemble_system () cell_matrix = 0; cell_rhs = 0; - // Now we have to assemble the - // local matrix and right hand - // side. This is done exactly - // like in the previous - // example, but now we revert - // the order of the loops - // (which we can safely do - // since they are independent - // of each other) and merge the - // loops for the local matrix - // and the local vector as far - // as possible to make - // things a bit faster. + // Now we have to assemble the + // local matrix and right hand + // side. This is done exactly + // like in the previous + // example, but now we revert + // the order of the loops + // (which we can safely do + // since they are independent + // of each other) and merge the + // loops for the local matrix + // and the local vector as far + // as possible to make + // things a bit faster. // // Assembling the right hand side // presents the only significant @@ -485,17 +485,17 @@ void Step4::assemble_system () // hand side and evaluate it at the // quadrature points: for (unsigned int q_point=0; q_point::assemble_system () // wants to write code dimension // independently. - // With the local systems assembled, - // the transfer into the global matrix - // and right hand side is done exactly - // as before, but here we have again - // merged some loops for efficiency: + // With the local systems assembled, + // the transfer into the global matrix + // and right hand side is done exactly + // as before, but here we have again + // merged some loops for efficiency: cell->get_dof_indices (local_dof_indices); for (unsigned int i=0; iBoundaryValues - // class declared above): + // As the final step in this function, we + // wanted to have non-homogeneous boundary + // values in this example, unlike the one + // before. This is a simple task, we only + // have to replace the + // ZeroFunction used there by + // an object of the class which describes + // the boundary values we would like to use + // (i.e. the BoundaryValues + // class declared above): std::map boundary_values; VectorTools::interpolate_boundary_values (dof_handler, - 0, - BoundaryValues(), - boundary_values); + 0, + BoundaryValues(), + boundary_values); MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); + system_matrix, + solution, + system_rhs); } // @sect4{Step4::solve} - // Solving the linear system of - // equations is something that looks - // almost identical in most - // programs. In particular, it is - // dimension independent, so this - // function is copied verbatim from the - // previous example. + // Solving the linear system of + // equations is something that looks + // almost identical in most + // programs. In particular, it is + // dimension independent, so this + // function is copied verbatim from the + // previous example. template void Step4::solve () { SolverControl solver_control (1000, 1e-12); SolverCG<> solver (solver_control); solver.solve (system_matrix, solution, system_rhs, - PreconditionIdentity()); + PreconditionIdentity()); - // We have made one addition, - // though: since we suppress output - // from the linear solvers, we have - // to print the number of - // iterations by hand. + // We have made one addition, + // though: since we suppress output + // from the linear solvers, we have + // to print the number of + // iterations by hand. std::cout << " " << solver_control.last_step() - << " CG iterations needed to obtain convergence." - << std::endl; + << " CG iterations needed to obtain convergence." + << std::endl; } // @sect4{Step4::output_results} - // This function also does what the - // respective one did in step-3. No changes - // here for dimension independence either. + // This function also does what the + // respective one did in step-3. No changes + // here for dimension independence either. // // The only difference to the previous // example is that we want to write output in @@ -635,8 +635,8 @@ void Step4::output_results () const data_out.build_patches (); std::ofstream output (dim == 2 ? - "solution-2d.vtk" : - "solution-3d.vtk"); + "solution-2d.vtk" : + "solution-3d.vtk"); data_out.write_vtk (output); } @@ -645,10 +645,10 @@ void Step4::output_results () const // @sect4{Step4::run} // This is the function which has the - // top-level control over - // everything. Apart from one line of - // additional output, it is the same - // as for the previous example. + // top-level control over + // everything. Apart from one line of + // additional output, it is the same + // as for the previous example. template void Step4::run () { @@ -664,47 +664,47 @@ void Step4::run () // @sect3{The main function} - // And this is the main function. It also - // looks mostly like in step-3, but if you - // look at the code below, note how we first - // create a variable of type - // Step4@<2@> (forcing - // the compiler to compile the class template - // with dim replaced by - // 2) and run a 2d simulation, - // and then we do the whole thing over in 3d. - // - // In practice, this is probably not what you - // would do very frequently (you probably - // either want to solve a 2d problem, or one - // in 3d, but not both at the same - // time). However, it demonstrates the - // mechanism by which we can simply change - // which dimension we want in a single place, - // and thereby force the compiler to - // recompile the dimension independent class - // templates for the dimension we - // request. The emphasis here lies on the - // fact that we only need to change a single - // place. This makes it rather trivial to - // debug the program in 2d where computations - // are fast, and then switch a single place - // to a 3 to run the much more computing - // intensive program in 3d for `real' - // computations. - // - // Each of the two blocks is enclosed in - // braces to make sure that the - // laplace_problem_2d variable - // goes out of scope (and releases the memory - // it holds) before we move on to allocate - // memory for the 3d case. Without the - // additional braces, the - // laplace_problem_2d variable - // would only be destroyed at the end of the - // function, i.e. after running the 3d - // problem, and would needlessly hog memory - // while the 3d run could actually use it. + // And this is the main function. It also + // looks mostly like in step-3, but if you + // look at the code below, note how we first + // create a variable of type + // Step4@<2@> (forcing + // the compiler to compile the class template + // with dim replaced by + // 2) and run a 2d simulation, + // and then we do the whole thing over in 3d. + // + // In practice, this is probably not what you + // would do very frequently (you probably + // either want to solve a 2d problem, or one + // in 3d, but not both at the same + // time). However, it demonstrates the + // mechanism by which we can simply change + // which dimension we want in a single place, + // and thereby force the compiler to + // recompile the dimension independent class + // templates for the dimension we + // request. The emphasis here lies on the + // fact that we only need to change a single + // place. This makes it rather trivial to + // debug the program in 2d where computations + // are fast, and then switch a single place + // to a 3 to run the much more computing + // intensive program in 3d for `real' + // computations. + // + // Each of the two blocks is enclosed in + // braces to make sure that the + // laplace_problem_2d variable + // goes out of scope (and releases the memory + // it holds) before we move on to allocate + // memory for the 3d case. Without the + // additional braces, the + // laplace_problem_2d variable + // would only be destroyed at the end of the + // function, i.e. after running the 3d + // problem, and would needlessly hog memory + // while the 3d run could actually use it. // // Finally, the first line of the function is // used to suppress some output. Remember diff --git a/deal.II/examples/step-40/step-40.cc b/deal.II/examples/step-40/step-40.cc index 5f1daba4cb..bc536fc339 100644 --- a/deal.II/examples/step-40/step-40.cc +++ b/deal.II/examples/step-40/step-40.cc @@ -12,12 +12,12 @@ // @sect3{Include files} - // - // Most of the include files we need for this - // program have already been discussed in - // previous programs. In particular, all of - // the following should already be familiar - // friends: + // + // Most of the include files we need for this + // program have already been discussed in + // previous programs. In particular, all of + // the following should already be familiar + // friends: #include #include #include @@ -43,81 +43,81 @@ #include #include - // The following, however, will be new or be - // used in new roles. Let's walk through - // them. The first of these will provide the - // tools of the Utilities::System namespace - // that we will use to query things like the - // number of processors associated with the - // current MPI universe, or the number within - // this universe the processor this job runs - // on is: + // The following, however, will be new or be + // used in new roles. Let's walk through + // them. The first of these will provide the + // tools of the Utilities::System namespace + // that we will use to query things like the + // number of processors associated with the + // current MPI universe, or the number within + // this universe the processor this job runs + // on is: #include - // The next one provides a class, - // ConditionOStream that allows us to write - // code that would output things to a stream - // (such as std::cout on every - // processor but throws the text away on all - // but one of them. We could achieve the same - // by simply putting an if - // statement in front of each place where we - // may generate output, but this doesn't make - // the code any prettier. In addition, the - // condition whether this processor should or - // should not produce output to the screen is - // the same every time -- and consequently it - // should be simple enough to put it into the - // statements that generate output itself. + // The next one provides a class, + // ConditionOStream that allows us to write + // code that would output things to a stream + // (such as std::cout on every + // processor but throws the text away on all + // but one of them. We could achieve the same + // by simply putting an if + // statement in front of each place where we + // may generate output, but this doesn't make + // the code any prettier. In addition, the + // condition whether this processor should or + // should not produce output to the screen is + // the same every time -- and consequently it + // should be simple enough to put it into the + // statements that generate output itself. #include - // After these preliminaries, here is where - // it becomes more interesting. As mentioned - // in the @ref distributed module, one of the - // fundamental truths of solving problems on - // large numbers of processors is that there - // is no way for any processor to store - // everything (e.g. information about all - // cells in the mesh, all degrees of freedom, - // or the values of all elements of the - // solution vector). Rather, every processor - // will own a few of each of these - // and, if necessary, may know about a - // few more, for example the ones that are - // located on cells adjacent to the ones this - // processor owns itself. We typically call - // the latter ghost cells, ghost - // nodes or ghost elements of a - // vector. The point of this discussion - // here is that we need to have a way to - // indicate which elements a particular - // processor owns or need to know of. This is - // the realm of the IndexSet class: if there - // are a total of $N$ cells, degrees of - // freedom, or vector elements, associated - // with (non-negative) integral indices - // $[0,N)$, then both the set of elements the - // current processor owns as well as the - // (possibly larger) set of indices it needs - // to know about are subsets of the set - // $[0,N)$. IndexSet is a class that stores - // subsets of this set in an efficient - // format: + // After these preliminaries, here is where + // it becomes more interesting. As mentioned + // in the @ref distributed module, one of the + // fundamental truths of solving problems on + // large numbers of processors is that there + // is no way for any processor to store + // everything (e.g. information about all + // cells in the mesh, all degrees of freedom, + // or the values of all elements of the + // solution vector). Rather, every processor + // will own a few of each of these + // and, if necessary, may know about a + // few more, for example the ones that are + // located on cells adjacent to the ones this + // processor owns itself. We typically call + // the latter ghost cells, ghost + // nodes or ghost elements of a + // vector. The point of this discussion + // here is that we need to have a way to + // indicate which elements a particular + // processor owns or need to know of. This is + // the realm of the IndexSet class: if there + // are a total of $N$ cells, degrees of + // freedom, or vector elements, associated + // with (non-negative) integral indices + // $[0,N)$, then both the set of elements the + // current processor owns as well as the + // (possibly larger) set of indices it needs + // to know about are subsets of the set + // $[0,N)$. IndexSet is a class that stores + // subsets of this set in an efficient + // format: #include - // The next header file is necessary for a - // single function, - // SparsityTools::distribute_sparsity_pattern. The - // role of this function will be explained - // below. + // The next header file is necessary for a + // single function, + // SparsityTools::distribute_sparsity_pattern. The + // role of this function will be explained + // below. #include - // The final two, new header files provide - // the class - // parallel::distributed::Triangulation that - // provides meshes distributed across a - // potentially very large number of - // processors, while the second provides the - // namespace - // parallel::distributed::GridRefinement that - // offers functions that can adaptively - // refine such distributed meshes: + // The final two, new header files provide + // the class + // parallel::distributed::Triangulation that + // provides meshes distributed across a + // potentially very large number of + // processors, while the second provides the + // namespace + // parallel::distributed::GridRefinement that + // offers functions that can adaptively + // refine such distributed meshes: #include #include @@ -128,49 +128,49 @@ namespace Step40 { using namespace dealii; - // @sect3{The LaplaceProblem class template} - - // Next let's declare the main class of this - // program. Its structure is almost exactly - // that of the step-6 tutorial program. The - // only significant differences are: - // - The mpi_communicator - // variable that describes the set of - // processors we want this code to run - // on. In practice, this will be - // MPI_COMM_WORLD, i.e. all processors the - // batch scheduling system has assigned to - // this particular job. - // - The presence of the pcout - // variable of type ConditionOStream. - // - The obvious use of - // parallel::distributed::Triangulation - // instead of Triangulation. - // - The presence of two IndexSet objects - // that denote which sets of degrees of - // freedom (and associated elements of - // solution and right hand side vectors) we - // own on the current processor and which - // we need (as ghost elements) for the - // algorithms in this program to work. - // - The fact that all matrices and - // vectors are now distributed. We - // use their PETScWrapper versions - // for this since deal.II's own - // classes do not provide %parallel - // functionality. Note that as part - // of this class, we store a - // solution vector that does not - // only contain the degrees of - // freedom the current processor - // owns, but also (as ghost - // elements) all those vector - // elements that correspond to - // "locally relevant" degrees of - // freedom (i.e. all those that - // live on locally owned cells or - // the layer of ghost cells that - // surround it). + // @sect3{The LaplaceProblem class template} + + // Next let's declare the main class of this + // program. Its structure is almost exactly + // that of the step-6 tutorial program. The + // only significant differences are: + // - The mpi_communicator + // variable that describes the set of + // processors we want this code to run + // on. In practice, this will be + // MPI_COMM_WORLD, i.e. all processors the + // batch scheduling system has assigned to + // this particular job. + // - The presence of the pcout + // variable of type ConditionOStream. + // - The obvious use of + // parallel::distributed::Triangulation + // instead of Triangulation. + // - The presence of two IndexSet objects + // that denote which sets of degrees of + // freedom (and associated elements of + // solution and right hand side vectors) we + // own on the current processor and which + // we need (as ghost elements) for the + // algorithms in this program to work. + // - The fact that all matrices and + // vectors are now distributed. We + // use their PETScWrapper versions + // for this since deal.II's own + // classes do not provide %parallel + // functionality. Note that as part + // of this class, we store a + // solution vector that does not + // only contain the degrees of + // freedom the current processor + // owns, but also (as ghost + // elements) all those vector + // elements that correspond to + // "locally relevant" degrees of + // freedom (i.e. all those that + // live on locally owned cells or + // the layer of ghost cells that + // surround it). template class LaplaceProblem { @@ -207,33 +207,33 @@ namespace Step40 }; - // @sect3{The LaplaceProblem class implementation} + // @sect3{The LaplaceProblem class implementation} - // @sect4{Constructors and destructors} + // @sect4{Constructors and destructors} - // Constructors and destructors are rather - // trivial. In addition to what we do in - // step-6, we set the set of processors we - // want to work on to all machines available - // (MPI_COMM_WORLD); ask the triangulation to - // ensure that the mesh remains smooth and - // free to refined islands, for example; and - // initialize the pcout variable - // to only allow processor zero to output - // anything: + // Constructors and destructors are rather + // trivial. In addition to what we do in + // step-6, we set the set of processors we + // want to work on to all machines available + // (MPI_COMM_WORLD); ask the triangulation to + // ensure that the mesh remains smooth and + // free to refined islands, for example; and + // initialize the pcout variable + // to only allow processor zero to output + // anything: template LaplaceProblem::LaplaceProblem () - : - mpi_communicator (MPI_COMM_WORLD), - triangulation (mpi_communicator, - typename Triangulation::MeshSmoothing - (Triangulation::smoothing_on_refinement | - Triangulation::smoothing_on_coarsening)), - dof_handler (triangulation), - fe (2), - pcout (std::cout, - (Utilities::MPI::this_mpi_process(mpi_communicator) - == 0)) + : + mpi_communicator (MPI_COMM_WORLD), + triangulation (mpi_communicator, + typename Triangulation::MeshSmoothing + (Triangulation::smoothing_on_refinement | + Triangulation::smoothing_on_coarsening)), + dof_handler (triangulation), + fe (2), + pcout (std::cout, + (Utilities::MPI::this_mpi_process(mpi_communicator) + == 0)) {} @@ -245,234 +245,234 @@ namespace Step40 } - // @sect4{LaplaceProblem::setup_system} - - // The following function is, arguably, the - // most interesting one in the entire program - // since it goes to the heart of what - // distinguishes %parallel step-40 from - // sequential step-6. - // - // At the top we do what we always do: tell - // the DoFHandler object to distribute - // degrees of freedom. Since the - // triangulation we use here is distributed, - // the DoFHandler object is smart enough to - // recognize that on each processor it can - // only distribute degrees of freedom on - // cells it owns; this is followed by an - // exchange step in which processors tell - // each other about degrees of freedom on - // ghost cell. The result is a DoFHandler - // that knows about the degrees of freedom on - // locally owned cells and ghost cells - // (i.e. cells adjacent to locally owned - // cells) but nothing about cells that are - // further away, consistent with the basic - // philosophy of distributed computing that - // no processor can know everything. + // @sect4{LaplaceProblem::setup_system} + + // The following function is, arguably, the + // most interesting one in the entire program + // since it goes to the heart of what + // distinguishes %parallel step-40 from + // sequential step-6. + // + // At the top we do what we always do: tell + // the DoFHandler object to distribute + // degrees of freedom. Since the + // triangulation we use here is distributed, + // the DoFHandler object is smart enough to + // recognize that on each processor it can + // only distribute degrees of freedom on + // cells it owns; this is followed by an + // exchange step in which processors tell + // each other about degrees of freedom on + // ghost cell. The result is a DoFHandler + // that knows about the degrees of freedom on + // locally owned cells and ghost cells + // (i.e. cells adjacent to locally owned + // cells) but nothing about cells that are + // further away, consistent with the basic + // philosophy of distributed computing that + // no processor can know everything. template void LaplaceProblem::setup_system () { dof_handler.distribute_dofs (fe); - // The next two lines extract some - // informatino we will need later - // on, namely two index sets that - // provide information about which - // degrees of freedom are owned by - // the current processor (this - // information will be used to - // initialize solution and right - // hand side vectors, and the - // system matrix, indicating which - // elements to store on the current - // processor and which to expect to - // be stored somewhere else); and - // an index set that indicates - // which degrees of freedom are - // locally relevant (i.e. live on - // cells that the current processor - // owns or on the layer of ghost - // cells around the locally owned - // cells; we need all of these - // degrees of freedom, for example, - // to estimate the error on the - // local cells). + // The next two lines extract some + // informatino we will need later + // on, namely two index sets that + // provide information about which + // degrees of freedom are owned by + // the current processor (this + // information will be used to + // initialize solution and right + // hand side vectors, and the + // system matrix, indicating which + // elements to store on the current + // processor and which to expect to + // be stored somewhere else); and + // an index set that indicates + // which degrees of freedom are + // locally relevant (i.e. live on + // cells that the current processor + // owns or on the layer of ghost + // cells around the locally owned + // cells; we need all of these + // degrees of freedom, for example, + // to estimate the error on the + // local cells). locally_owned_dofs = dof_handler.locally_owned_dofs (); DoFTools::extract_locally_relevant_dofs (dof_handler, - locally_relevant_dofs); - - // Next, let us initialize the - // solution and right hand side - // vectors. As mentioned above, the - // solution vector we seek does not - // only store elements we own, but - // also ghost entries; on the other - // hand, the right hand side vector - // only needs to have the entries - // the current processor owns since - // all we will ever do is write - // into it, never read from it on - // locally owned cells (of course - // the linear solvers will read - // from it, but they do not care - // about the geometric location of - // degrees of freedom). + locally_relevant_dofs); + + // Next, let us initialize the + // solution and right hand side + // vectors. As mentioned above, the + // solution vector we seek does not + // only store elements we own, but + // also ghost entries; on the other + // hand, the right hand side vector + // only needs to have the entries + // the current processor owns since + // all we will ever do is write + // into it, never read from it on + // locally owned cells (of course + // the linear solvers will read + // from it, but they do not care + // about the geometric location of + // degrees of freedom). locally_relevant_solution.reinit (mpi_communicator, - locally_owned_dofs, - locally_relevant_dofs); + locally_owned_dofs, + locally_relevant_dofs); locally_relevant_solution = 0; system_rhs.reinit (mpi_communicator, - dof_handler.n_dofs(), - dof_handler.n_locally_owned_dofs()); + dof_handler.n_dofs(), + dof_handler.n_locally_owned_dofs()); system_rhs = 0; - // The next step is to compute hanging node - // and boundary value constraints, which we - // combine into a single object storing all - // constraints. - // - // As with all other things in %parallel, - // the mantra must be that no processor can - // store all information about the entire - // universe. As a consequence, we need to - // tell the constraints object for which - // degrees of freedom it can store - // constraints and for which it may not - // expect any information to store. In our - // case, as explained in the @ref - // distributed module, the degrees of - // freedom we need to care about on each - // processor are the locally relevant ones, - // so we pass this to the - // ConstraintMatrix::reinit function. As a - // side note, if you forget to pass this - // argument, the ConstraintMatrix class - // will allocate an array with length equal - // to the largest DoF index it has seen so - // far. For processors with high MPI - // process number, this may be very large - // -- maybe on the order of billions. The - // program would then allocate more memory - // than for likely all other operations - // combined for this single array. + // The next step is to compute hanging node + // and boundary value constraints, which we + // combine into a single object storing all + // constraints. + // + // As with all other things in %parallel, + // the mantra must be that no processor can + // store all information about the entire + // universe. As a consequence, we need to + // tell the constraints object for which + // degrees of freedom it can store + // constraints and for which it may not + // expect any information to store. In our + // case, as explained in the @ref + // distributed module, the degrees of + // freedom we need to care about on each + // processor are the locally relevant ones, + // so we pass this to the + // ConstraintMatrix::reinit function. As a + // side note, if you forget to pass this + // argument, the ConstraintMatrix class + // will allocate an array with length equal + // to the largest DoF index it has seen so + // far. For processors with high MPI + // process number, this may be very large + // -- maybe on the order of billions. The + // program would then allocate more memory + // than for likely all other operations + // combined for this single array. constraints.clear (); constraints.reinit (locally_relevant_dofs); DoFTools::make_hanging_node_constraints (dof_handler, constraints); VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction(), - constraints); + 0, + ZeroFunction(), + constraints); constraints.close (); - // The last part of this function deals - // with initializing the matrix with - // accompanying sparsity pattern. As in - // previous tutorial programs, we use the - // CompressedSimpleSparsityPattern as an - // intermediate with which we then - // initialize the PETSc matrix. To do so we - // have to tell the sparsity pattern its - // size but as above there is no way the - // resulting object will be able to store - // even a single pointer for each global - // degree of freedom; the best we can hope - // for is that it stores information about - // each locally relevant degree of freedom, - // i.e. all those that we may ever touch in - // the process of assembling the matrix - // (the @ref distributed_paper - // "distributed computing paper" has a long - // discussion why one really needs the - // locally relevant, and not the small set - // of locally active degrees of freedom in - // this context). - // - // So we tell the sparsity pattern its size - // and what DoFs to store anything for and - // then ask DoFTools::make_sparsity_pattern - // to fill it (this function ignores all - // cells that are not locally owned, - // mimicking what we will do below in the - // assembly process). After this, we call a - // function that exchanges entries in these - // sparsity pattern between processors so - // that in the end each processor really - // knows about all the entries that will - // exist in that part of the finite element - // matrix that it will own. The final step - // is to initialize the matrix with the - // sparsity pattern. + // The last part of this function deals + // with initializing the matrix with + // accompanying sparsity pattern. As in + // previous tutorial programs, we use the + // CompressedSimpleSparsityPattern as an + // intermediate with which we then + // initialize the PETSc matrix. To do so we + // have to tell the sparsity pattern its + // size but as above there is no way the + // resulting object will be able to store + // even a single pointer for each global + // degree of freedom; the best we can hope + // for is that it stores information about + // each locally relevant degree of freedom, + // i.e. all those that we may ever touch in + // the process of assembling the matrix + // (the @ref distributed_paper + // "distributed computing paper" has a long + // discussion why one really needs the + // locally relevant, and not the small set + // of locally active degrees of freedom in + // this context). + // + // So we tell the sparsity pattern its size + // and what DoFs to store anything for and + // then ask DoFTools::make_sparsity_pattern + // to fill it (this function ignores all + // cells that are not locally owned, + // mimicking what we will do below in the + // assembly process). After this, we call a + // function that exchanges entries in these + // sparsity pattern between processors so + // that in the end each processor really + // knows about all the entries that will + // exist in that part of the finite element + // matrix that it will own. The final step + // is to initialize the matrix with the + // sparsity pattern. CompressedSimpleSparsityPattern csp (dof_handler.n_dofs(), - dof_handler.n_dofs(), - locally_relevant_dofs); + dof_handler.n_dofs(), + locally_relevant_dofs); DoFTools::make_sparsity_pattern (dof_handler, - csp, - constraints, false); + csp, + constraints, false); SparsityTools::distribute_sparsity_pattern (csp, - dof_handler.n_locally_owned_dofs_per_processor(), - mpi_communicator, - locally_relevant_dofs); + dof_handler.n_locally_owned_dofs_per_processor(), + mpi_communicator, + locally_relevant_dofs); system_matrix.reinit (mpi_communicator, - csp, - dof_handler.n_locally_owned_dofs_per_processor(), - dof_handler.n_locally_owned_dofs_per_processor(), - Utilities::MPI::this_mpi_process(mpi_communicator)); + csp, + dof_handler.n_locally_owned_dofs_per_processor(), + dof_handler.n_locally_owned_dofs_per_processor(), + Utilities::MPI::this_mpi_process(mpi_communicator)); } - // @sect4{LaplaceProblem::assemble_system} - - // The function that then assembles the - // linear system is comparatively boring, - // being almost exactly what we've seen - // before. The points to watch out for are: - // - Assembly must only loop over locally - // owned cells. There are multiple ways to - // test that; for example, we could - // compare - // a cell's subdomain_id against - // information from the triangulation - // as in cell->subdomain_id() == - // triangulation.locally_owned_subdomain(), - // or skip all cells for which - // the condition cell->is_ghost() - // || cell->is_artificial() is - // true. The simplest way, however, is - // to simply ask the cell whether it is - // owned by the local processor. - // - Copying local contributions into the - // global matrix must include distributing - // constraints and boundary values. In - // other words, we can now (as we did in - // step-6) first copy every local - // contribution into the global matrix and - // only in a later step take care of - // hanging node constraints and boundary - // values. The reason is, as discussed in - // step-17, that PETSc does not provide - // access to arbitrary elements of the - // matrix once they have been assembled - // into it -- in parts because they may - // simple no longer reside on the current - // processor but have instead been shipped - // to a different machine. - // - The way we compute the right hand side - // (given the formula stated in the - // introduction) may not be the most - // elegant but will do for a program whose - // focus lies somewhere entirely different. + // @sect4{LaplaceProblem::assemble_system} + + // The function that then assembles the + // linear system is comparatively boring, + // being almost exactly what we've seen + // before. The points to watch out for are: + // - Assembly must only loop over locally + // owned cells. There are multiple ways to + // test that; for example, we could + // compare + // a cell's subdomain_id against + // information from the triangulation + // as in cell->subdomain_id() == + // triangulation.locally_owned_subdomain(), + // or skip all cells for which + // the condition cell->is_ghost() + // || cell->is_artificial() is + // true. The simplest way, however, is + // to simply ask the cell whether it is + // owned by the local processor. + // - Copying local contributions into the + // global matrix must include distributing + // constraints and boundary values. In + // other words, we can now (as we did in + // step-6) first copy every local + // contribution into the global matrix and + // only in a later step take care of + // hanging node constraints and boundary + // values. The reason is, as discussed in + // step-17, that PETSc does not provide + // access to arbitrary elements of the + // matrix once they have been assembled + // into it -- in parts because they may + // simple no longer reside on the current + // processor but have instead been shipped + // to a different machine. + // - The way we compute the right hand side + // (given the formula stated in the + // introduction) may not be the most + // elegant but will do for a program whose + // focus lies somewhere entirely different. template void LaplaceProblem::assemble_system () { const QGauss quadrature_formula(3); FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | - update_JxW_values); + update_values | update_gradients | + update_quadrature_points | + update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -487,42 +487,42 @@ namespace Step40 endc = dof_handler.end(); for (; cell!=endc; ++cell) if (cell->is_locally_owned()) - { - cell_matrix = 0; - cell_rhs = 0; - - fe_values.reinit (cell); - - for (unsigned int q_point=0; q_point - 0.5+0.25*std::sin(4.0 * numbers::PI * - fe_values.quadrature_point(q_point)[0]) - ? 1 : -1); - - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - constraints.distribute_local_to_global (cell_matrix, - cell_rhs, - local_dof_indices, - system_matrix, - system_rhs); - } + { + cell_matrix = 0; + cell_rhs = 0; + + fe_values.reinit (cell); + + for (unsigned int q_point=0; q_point + 0.5+0.25*std::sin(4.0 * numbers::PI * + fe_values.quadrature_point(q_point)[0]) + ? 1 : -1); + + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + constraints.distribute_local_to_global (cell_matrix, + cell_rhs, + local_dof_indices, + system_matrix, + system_rhs); + } system_matrix.compress (); system_rhs.compress (); @@ -530,88 +530,88 @@ namespace Step40 - // @sect4{LaplaceProblem::solve} - - // Even though solving linear systems - // on potentially tens of thousands - // of processors is by far not a - // trivial job, the function that - // does this is -- at least at the - // outside -- relatively simple. Most - // of the parts you've seen - // before. There are really only two - // things worth mentioning: - // - Solvers and preconditioners are - // built on the deal.II wrappers of - // PETSc functionality. It is - // relatively well known that the - // primary bottleneck of massively - // %parallel linear solvers is not - // actually the communication - // between processors, but the fact - // that it is difficult to produce - // preconditioners that scale well - // to large numbers of - // processors. Over the second half - // of the first decade of the 21st - // century, it has become clear - // that algebraic multigrid (AMG) - // methods turn out to be extremely - // efficient in this context, and - // we will use one of them -- the - // BoomerAMG implementation of the - // Hypre package that can be - // interfaced to through PETSc -- - // for the current program. The - // rest of the solver itself is - // boilerplate and has been shown - // before. Since the linear system - // is symmetric and positive - // definite, we can use the CG - // method as the outer solver. - // - Ultimately, we want a vector - // that stores not only the - // elements of the solution for - // degrees of freedom the current - // processor owns, but also all - // other locally relevant degrees - // of freedom. On the other hand, - // the solver itself needs a vector - // that is uniquely split between - // processors, without any - // overlap. We therefore create a - // vector at the beginning of this - // function that has these - // properties, use it to solve the - // linear system, and only assign - // it to the vector we want at the - // very end. This last step ensures - // that all ghost elements are also - // copied as necessary. + // @sect4{LaplaceProblem::solve} + + // Even though solving linear systems + // on potentially tens of thousands + // of processors is by far not a + // trivial job, the function that + // does this is -- at least at the + // outside -- relatively simple. Most + // of the parts you've seen + // before. There are really only two + // things worth mentioning: + // - Solvers and preconditioners are + // built on the deal.II wrappers of + // PETSc functionality. It is + // relatively well known that the + // primary bottleneck of massively + // %parallel linear solvers is not + // actually the communication + // between processors, but the fact + // that it is difficult to produce + // preconditioners that scale well + // to large numbers of + // processors. Over the second half + // of the first decade of the 21st + // century, it has become clear + // that algebraic multigrid (AMG) + // methods turn out to be extremely + // efficient in this context, and + // we will use one of them -- the + // BoomerAMG implementation of the + // Hypre package that can be + // interfaced to through PETSc -- + // for the current program. The + // rest of the solver itself is + // boilerplate and has been shown + // before. Since the linear system + // is symmetric and positive + // definite, we can use the CG + // method as the outer solver. + // - Ultimately, we want a vector + // that stores not only the + // elements of the solution for + // degrees of freedom the current + // processor owns, but also all + // other locally relevant degrees + // of freedom. On the other hand, + // the solver itself needs a vector + // that is uniquely split between + // processors, without any + // overlap. We therefore create a + // vector at the beginning of this + // function that has these + // properties, use it to solve the + // linear system, and only assign + // it to the vector we want at the + // very end. This last step ensures + // that all ghost elements are also + // copied as necessary. template void LaplaceProblem::solve () { PETScWrappers::MPI::Vector completely_distributed_solution (mpi_communicator, - dof_handler.n_dofs(), - dof_handler.n_locally_owned_dofs()); + dof_handler.n_dofs(), + dof_handler.n_locally_owned_dofs()); SolverControl solver_control (dof_handler.n_dofs(), 1e-12); PETScWrappers::SolverCG solver(solver_control, mpi_communicator); - // Ask for a symmetric preconditioner by - // setting the first parameter in - // AdditionalData to true. + // Ask for a symmetric preconditioner by + // setting the first parameter in + // AdditionalData to true. PETScWrappers::PreconditionBoomerAMG preconditioner(system_matrix, - PETScWrappers::PreconditionBoomerAMG::AdditionalData(true)); + PETScWrappers::PreconditionBoomerAMG::AdditionalData(true)); solver.solve (system_matrix, completely_distributed_solution, system_rhs, - preconditioner); + preconditioner); pcout << " Solved in " << solver_control.last_step() - << " iterations." << std::endl; + << " iterations." << std::endl; constraints.distribute (completely_distributed_solution); @@ -621,118 +621,118 @@ namespace Step40 - // @sect4{LaplaceProblem::refine_grid} - - // The function that estimates the - // error and refines the grid is - // again almost exactly like the one - // in step-6. The only difference is - // that the function that flags cells - // to be refined is now in namespace - // parallel::distributed::GridRefinement - // -- a namespace that has functions - // that can communicate between all - // involved processors and determine - // global thresholds to use in - // deciding which cells to refine and - // which to coarsen. - // - // Note that we didn't have to do - // anything special about the - // KellyErrorEstimator class: we just - // give it a vector with as many - // elements as the local - // triangulation has cells (locally - // owned cells, ghost cells, and - // artificial ones), but it only - // fills those entries that - // correspond to cells that are - // locally owned. + // @sect4{LaplaceProblem::refine_grid} + + // The function that estimates the + // error and refines the grid is + // again almost exactly like the one + // in step-6. The only difference is + // that the function that flags cells + // to be refined is now in namespace + // parallel::distributed::GridRefinement + // -- a namespace that has functions + // that can communicate between all + // involved processors and determine + // global thresholds to use in + // deciding which cells to refine and + // which to coarsen. + // + // Note that we didn't have to do + // anything special about the + // KellyErrorEstimator class: we just + // give it a vector with as many + // elements as the local + // triangulation has cells (locally + // owned cells, ghost cells, and + // artificial ones), but it only + // fills those entries that + // correspond to cells that are + // locally owned. template void LaplaceProblem::refine_grid () { Vector estimated_error_per_cell (triangulation.n_active_cells()); KellyErrorEstimator::estimate (dof_handler, - QGauss(3), - typename FunctionMap::type(), - locally_relevant_solution, - estimated_error_per_cell); + QGauss(3), + typename FunctionMap::type(), + locally_relevant_solution, + estimated_error_per_cell); parallel::distributed::GridRefinement:: refine_and_coarsen_fixed_number (triangulation, - estimated_error_per_cell, - 0.3, 0.03); + estimated_error_per_cell, + 0.3, 0.03); triangulation.execute_coarsening_and_refinement (); } - // @sect4{LaplaceProblem::output_results} - - // Compared to the corresponding - // function in step-6, the one here - // is a tad more complicated. There - // are two reasons: the first one is - // that we do not just want to output - // the solution but also for each - // cell which processor owns it - // (i.e. which "subdomain" it is - // in). Secondly, as discussed at - // length in step-17 and step-18, - // generating graphical data can be a - // bottleneck in parallelizing. In - // step-18, we have moved this step - // out of the actual computation but - // shifted it into a separate program - // that later combined the output - // from various processors into a - // single file. But this doesn't - // scale: if the number of processors - // is large, this may mean that the - // step of combining data on a single - // processor later becomes the - // longest running part of the - // program, or it may produce a file - // that's so large that it can't be - // visualized any more. We here - // follow a more sensible approach, - // namely creating individual files - // for each MPI process and leaving - // it to the visualization program to - // make sense of that. - // - // To start, the top of the function - // looks like always. In addition to - // attaching the solution vector (the - // one that has entries for all - // locally relevant, not only the - // locally owned, elements), we - // attach a data vector that stores, - // for each cell, the subdomain the - // cell belongs to. This is slightly - // tricky, because of course not - // every processor knows about every - // cell. The vector we attach - // therefore has an entry for every - // cell that the current processor - // has in its mesh (locally owned - // onces, ghost cells, and artificial - // cells), but the DataOut class will - // ignore all entries that correspond - // to cells that are not owned by the - // current processor. As a - // consequence, it doesn't actually - // matter what values we write into - // these vector entries: we simply - // fill the entire vector with the - // number of the current MPI process - // (i.e. the subdomain_id of the - // current process); this correctly - // sets the values we care for, - // i.e. the entries that correspond - // to locally owned cells, while - // providing the wrong value for all - // other elements -- but these are - // then ignored anyway. + // @sect4{LaplaceProblem::output_results} + + // Compared to the corresponding + // function in step-6, the one here + // is a tad more complicated. There + // are two reasons: the first one is + // that we do not just want to output + // the solution but also for each + // cell which processor owns it + // (i.e. which "subdomain" it is + // in). Secondly, as discussed at + // length in step-17 and step-18, + // generating graphical data can be a + // bottleneck in parallelizing. In + // step-18, we have moved this step + // out of the actual computation but + // shifted it into a separate program + // that later combined the output + // from various processors into a + // single file. But this doesn't + // scale: if the number of processors + // is large, this may mean that the + // step of combining data on a single + // processor later becomes the + // longest running part of the + // program, or it may produce a file + // that's so large that it can't be + // visualized any more. We here + // follow a more sensible approach, + // namely creating individual files + // for each MPI process and leaving + // it to the visualization program to + // make sense of that. + // + // To start, the top of the function + // looks like always. In addition to + // attaching the solution vector (the + // one that has entries for all + // locally relevant, not only the + // locally owned, elements), we + // attach a data vector that stores, + // for each cell, the subdomain the + // cell belongs to. This is slightly + // tricky, because of course not + // every processor knows about every + // cell. The vector we attach + // therefore has an entry for every + // cell that the current processor + // has in its mesh (locally owned + // onces, ghost cells, and artificial + // cells), but the DataOut class will + // ignore all entries that correspond + // to cells that are not owned by the + // current processor. As a + // consequence, it doesn't actually + // matter what values we write into + // these vector entries: we simply + // fill the entire vector with the + // number of the current MPI process + // (i.e. the subdomain_id of the + // current process); this correctly + // sets the values we care for, + // i.e. the entries that correspond + // to locally owned cells, while + // providing the wrong value for all + // other elements -- but these are + // then ignored anyway. template void LaplaceProblem::output_results (const unsigned int cycle) const { @@ -747,125 +747,125 @@ namespace Step40 data_out.build_patches (); - // The next step is to write this - // data to disk. We choose file - // names of the form - // solution-XX-PPPP.vtu - // where XX indicates - // the refinement cycle, - // PPPP refers to the - // processor number (enough for up - // to 10,000 processors, though we - // hope that nobody ever tries to - // generate this much data -- you - // would likely overflow all file - // system quotas), and - // .vtu indicates the - // XML-based Visualization Toolkit - // (VTK) file format. + // The next step is to write this + // data to disk. We choose file + // names of the form + // solution-XX-PPPP.vtu + // where XX indicates + // the refinement cycle, + // PPPP refers to the + // processor number (enough for up + // to 10,000 processors, though we + // hope that nobody ever tries to + // generate this much data -- you + // would likely overflow all file + // system quotas), and + // .vtu indicates the + // XML-based Visualization Toolkit + // (VTK) file format. const std::string filename = ("solution-" + - Utilities::int_to_string (cycle, 2) + - "." + - Utilities::int_to_string - (triangulation.locally_owned_subdomain(), 4)); + Utilities::int_to_string (cycle, 2) + + "." + + Utilities::int_to_string + (triangulation.locally_owned_subdomain(), 4)); std::ofstream output ((filename + ".vtu").c_str()); data_out.write_vtu (output); - // The last step is to write a - // "master record" that lists for - // the visualization program the - // names of the various files that - // combined represents the - // graphical data for the entire - // domain. The - // DataOutBase::write_pvtu_record - // does this, and it needs a list - // of filenames that we create - // first. Note that only one - // processor needs to generate this - // file; we arbitrarily choose - // processor zero to take over this - // job. + // The last step is to write a + // "master record" that lists for + // the visualization program the + // names of the various files that + // combined represents the + // graphical data for the entire + // domain. The + // DataOutBase::write_pvtu_record + // does this, and it needs a list + // of filenames that we create + // first. Note that only one + // processor needs to generate this + // file; we arbitrarily choose + // processor zero to take over this + // job. if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0) { - std::vector filenames; - for (unsigned int i=0; - i filenames; + for (unsigned int i=0; + ipcout instead of - // std::cout for output - // to the console (see also step-17) - // and that we only generate - // graphical output if at most 32 - // processors are involved. Without - // this limit, it would be just too - // easy for people carelessly running - // this program without reading it - // first to bring down the cluster - // interconnect and fill any file - // system available :-) - // - // A functional difference to step-6 - // is the use of a square domain and - // that we start with a slightly - // finer mesh (5 global refinement - // cycles) -- there just isn't much - // of a point showing a massively - // %parallel program starting on 4 - // cells (although admittedly the - // point is only slightly stronger - // starting on 1024). + // @sect4{LaplaceProblem::run} + + // The function that controls the + // overall behavior of the program is + // again like the one in step-6. The + // minor difference are the use of + // pcout instead of + // std::cout for output + // to the console (see also step-17) + // and that we only generate + // graphical output if at most 32 + // processors are involved. Without + // this limit, it would be just too + // easy for people carelessly running + // this program without reading it + // first to bring down the cluster + // interconnect and fill any file + // system available :-) + // + // A functional difference to step-6 + // is the use of a square domain and + // that we start with a slightly + // finer mesh (5 global refinement + // cycles) -- there just isn't much + // of a point showing a massively + // %parallel program starting on 4 + // cells (although admittedly the + // point is only slightly stronger + // starting on 1024). template void LaplaceProblem::run () { const unsigned int n_cycles = 8; for (unsigned int cycle=0; cyclemain(), again has the - // same structure as in all other - // programs, in particular - // step-6. Like in the other programs - // that use PETSc, we have to - // inialize and finalize PETSc, which - // also initializes and finalizes the - // MPI subsystem. - // - // Note how we enclose the use the - // use of the LaplaceProblem class in - // a pair of braces. This makes sure - // that all member variables of the - // object are destroyed by the time - // we hit the - // PetscFinalize - // call. Not doing this will lead to - // strange and hard to debug errors - // when PetscFinalize - // first deletes all PETSc vectors - // that are still around, and the - // destructor of the LaplaceProblem - // class then tries to delete them - // again. + // The final function, + // main(), again has the + // same structure as in all other + // programs, in particular + // step-6. Like in the other programs + // that use PETSc, we have to + // inialize and finalize PETSc, which + // also initializes and finalizes the + // MPI subsystem. + // + // Note how we enclose the use the + // use of the LaplaceProblem class in + // a pair of braces. This makes sure + // that all member variables of the + // object are destroyed by the time + // we hit the + // PetscFinalize + // call. Not doing this will lead to + // strange and hard to debug errors + // when PetscFinalize + // first deletes all PETSc vectors + // that are still around, and the + // destructor of the LaplaceProblem + // class then tries to delete them + // again. int main(int argc, char *argv[]) { try @@ -910,8 +910,8 @@ int main(int argc, char *argv[]) deallog.depth_console (0); { - LaplaceProblem<2> laplace_problem_2d; - laplace_problem_2d.run (); + LaplaceProblem<2> laplace_problem_2d; + laplace_problem_2d.run (); } PetscFinalize(); @@ -919,25 +919,25 @@ int main(int argc, char *argv[]) catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-41/step-41.cc b/deal.II/examples/step-41/step-41.cc index 53ca503baa..c0b8db33dd 100644 --- a/deal.II/examples/step-41/step-41.cc +++ b/deal.II/examples/step-41/step-41.cc @@ -13,12 +13,12 @@ // @sect3{Include files} - // As usual, at the beginning we - // include all the header files we - // need in here. With the exception - // of the various files that provide - // interfaces to the Trilinos - // library, there are no surprises: + // As usual, at the beginning we + // include all the header files we + // need in here. With the exception + // of the various files that provide + // interfaces to the Trilinos + // library, there are no surprises: #include #include #include @@ -55,31 +55,31 @@ namespace Step41 { using namespace dealii; - // @sect3{The ObstacleProblem class template} - - // This class supplies all function - // and variables needed to describe - // the obstacle problem. It is - // close to what we had to do in - // step-4, and so relatively - // simple. The only real new - // components are the - // update_solution_and_constraints - // function that computes the - // active set and a number of - // variables that are necessary to - // describe the original - // (unconstrained) form of the - // linear system - // (complete_system_matrix - // and - // complete_system_rhs) - // as well as the active set itself - // and the diagonal of the mass - // matrix $B$ used in scaling - // Lagrange multipliers in the - // active set formulation. The rest - // is as in step-4: + // @sect3{The ObstacleProblem class template} + + // This class supplies all function + // and variables needed to describe + // the obstacle problem. It is + // close to what we had to do in + // step-4, and so relatively + // simple. The only real new + // components are the + // update_solution_and_constraints + // function that computes the + // active set and a number of + // variables that are necessary to + // describe the original + // (unconstrained) form of the + // linear system + // (complete_system_matrix + // and + // complete_system_rhs) + // as well as the active set itself + // and the diagonal of the mass + // matrix $B$ used in scaling + // Lagrange multipliers in the + // active set formulation. The rest + // is as in step-4: template class ObstacleProblem { @@ -113,27 +113,27 @@ namespace Step41 }; - // @sect3{Right hand side, boundary values, and the obstacle} - - // In the following, we define - // classes that describe the right - // hand side function, the - // Dirichlet boundary values, and - // the height of the obstacle as a - // function of $\mathbf x$. In all - // three cases, we derive these - // classes from Function@, - // although in the case of - // RightHandSide and - // Obstacle this is - // more out of convention than - // necessity since we never pass - // such objects to the library. In - // any case, the definition of the - // right hand side and boundary - // values classes is obvious given - // our choice of $f=-10$, - // $u|_{\partial\Omega}=0$: + // @sect3{Right hand side, boundary values, and the obstacle} + + // In the following, we define + // classes that describe the right + // hand side function, the + // Dirichlet boundary values, and + // the height of the obstacle as a + // function of $\mathbf x$. In all + // three cases, we derive these + // classes from Function@, + // although in the case of + // RightHandSide and + // Obstacle this is + // more out of convention than + // necessity since we never pass + // such objects to the library. In + // any case, the definition of the + // right hand side and boundary + // values classes is obvious given + // our choice of $f=-10$, + // $u|_{\partial\Omega}=0$: template class RightHandSide : public Function { @@ -141,12 +141,12 @@ namespace Step41 RightHandSide () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; template double RightHandSide::value (const Point &p, - const unsigned int component) const + const unsigned int component) const { Assert (component == 0, ExcNotImplemented()); @@ -162,12 +162,12 @@ namespace Step41 BoundaryValues () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; template double BoundaryValues::value (const Point &p, - const unsigned int component) const + const unsigned int component) const { Assert (component == 0, ExcNotImplemented()); @@ -176,8 +176,8 @@ namespace Step41 - // We describe the obstacle function by a cascaded - // barrier (think: stair steps): + // We describe the obstacle function by a cascaded + // barrier (think: stair steps): template class Obstacle : public Function { @@ -185,12 +185,12 @@ namespace Step41 Obstacle () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; template double Obstacle::value (const Point &p, - const unsigned int component) const + const unsigned int component) const { Assert (component == 0, ExcNotImplemented()); @@ -206,30 +206,30 @@ namespace Step41 - // @sect3{Implementation of the ObstacleProblem class} + // @sect3{Implementation of the ObstacleProblem class} - // @sect4{ObstacleProblem::ObstacleProblem} + // @sect4{ObstacleProblem::ObstacleProblem} - // To everyone who has taken a look - // at the first few tutorial - // programs, the constructor is - // completely obvious: + // To everyone who has taken a look + // at the first few tutorial + // programs, the constructor is + // completely obvious: template ObstacleProblem::ObstacleProblem () - : - fe (1), - dof_handler (triangulation) + : + fe (1), + dof_handler (triangulation) {} - // @sect4{ObstacleProblem::make_grid} + // @sect4{ObstacleProblem::make_grid} - // We solve our obstacle problem on - // the square $[-1,1]\times [-1,1]$ - // in 2D. This function therefore - // just sets up one of the simplest - // possible meshes. + // We solve our obstacle problem on + // the square $[-1,1]\times [-1,1]$ + // in 2D. This function therefore + // just sets up one of the simplest + // possible meshes. template void ObstacleProblem::make_grid () { @@ -237,25 +237,25 @@ namespace Step41 triangulation.refine_global (7); std::cout << "Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << "Total number of cells: " - << triangulation.n_cells() - << std::endl; + << triangulation.n_active_cells() + << std::endl + << "Total number of cells: " + << triangulation.n_cells() + << std::endl; } - // @sect4{ObstacleProblem::setup_system} + // @sect4{ObstacleProblem::setup_system} - // In this first function of note, - // we set up the degrees of freedom - // handler, resize vectors and - // matrices, and deal with the - // constraints. Initially, the - // constraints are, of course, only - // given by boundary values, so we - // interpolate them towards the top - // of the function. + // In this first function of note, + // we set up the degrees of freedom + // handler, resize vectors and + // matrices, and deal with the + // constraints. Initially, the + // constraints are, of course, only + // given by boundary values, so we + // interpolate them towards the top + // of the function. template void ObstacleProblem::setup_system () { @@ -263,21 +263,21 @@ namespace Step41 active_set.set_size (dof_handler.n_dofs()); std::cout << "Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl - << std::endl; + << dof_handler.n_dofs() + << std::endl + << std::endl; VectorTools::interpolate_boundary_values (dof_handler, - 0, - BoundaryValues(), - constraints); + 0, + BoundaryValues(), + constraints); constraints.close (); CompressedSparsityPattern c_sparsity(dof_handler.n_dofs()); DoFTools::make_sparsity_pattern (dof_handler, - c_sparsity, - constraints, - false); + c_sparsity, + constraints, + false); system_matrix.reinit (c_sparsity); complete_system_matrix.reinit (c_sparsity); @@ -287,18 +287,18 @@ namespace Step41 complete_system_rhs.reinit (dof_handler.n_dofs()); contact_force.reinit (dof_handler.n_dofs()); - // The only other thing to do - // here is to compute the factors - // in the $B$ matrix which is - // used to scale the residual. As - // discussed in the introduction, - // we'll use a little trick to - // make this mass matrix - // diagonal, and in the following - // then first compute all of this - // as a matrix and then extract - // the diagonal elements for - // later use: + // The only other thing to do + // here is to compute the factors + // in the $B$ matrix which is + // used to scale the residual. As + // discussed in the introduction, + // we'll use a little trick to + // make this mass matrix + // diagonal, and in the following + // then first compute all of this + // as a matrix and then extract + // the diagonal elements for + // later use: TrilinosWrappers::SparseMatrix mass_matrix; mass_matrix.reinit (c_sparsity); assemble_mass_matrix_diagonal (mass_matrix); @@ -308,18 +308,18 @@ namespace Step41 } - // @sect4{ObstacleProblem::assemble_system} + // @sect4{ObstacleProblem::assemble_system} - // This function at once assembles - // the system matrix and - // right-hand-side and applied the - // constraints (both due to the - // active set as well as from - // boundary values) to our - // system. Otherwise, it is - // functionally equivalent to the - // corresponding function in, for - // example, step-4. + // This function at once assembles + // the system matrix and + // right-hand-side and applied the + // constraints (both due to the + // active set as well as from + // boundary values) to our + // system. Otherwise, it is + // functionally equivalent to the + // corresponding function in, for + // example, step-4. template void ObstacleProblem::assemble_system () { @@ -332,9 +332,9 @@ namespace Step41 const RightHandSide right_hand_side; FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | - update_JxW_values); + update_values | update_gradients | + update_quadrature_points | + update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -350,83 +350,83 @@ namespace Step41 for (; cell!=endc; ++cell) { - fe_values.reinit (cell); - cell_matrix = 0; - cell_rhs = 0; - - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - - constraints.distribute_local_to_global (cell_matrix, - cell_rhs, - local_dof_indices, - system_matrix, - system_rhs, - true); + fe_values.reinit (cell); + cell_matrix = 0; + cell_rhs = 0; + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + + constraints.distribute_local_to_global (cell_matrix, + cell_rhs, + local_dof_indices, + system_matrix, + system_rhs, + true); } } - // @sect4{ObstacleProblem::assemble_mass_matrix_diagonal} - - // The next function is used in the - // computation of the diagonal mass - // matrix $B$ used to scale - // variables in the active set - // method. As discussed in the - // introduction, we get the mass - // matrix to be diagonal by - // choosing the trapezoidal rule - // for quadrature. Doing so we - // don't really need the triple - // loop over quadrature points, - // indices $i$ and indices $j$ any - // more and can, instead, just use - // a double loop. The rest of the - // function is obvious given what - // we have discussed in many of the - // previous tutorial programs. - // - // Note that at the time this - // function is called, the - // constraints object only contains - // boundary value constraints; we - // therefore do not have to pay - // attention in the last - // copy-local-to-global step to - // preserve the values of matrix - // entries that may later on be - // constrained by the active set. - // - // Note also that the trick with - // the trapezoidal rule only works - // if we have in fact $Q_1$ - // elements. For higher order - // elements, one would need to use - // a quadrature formula that has - // quadrature points at all the - // support points of the finite - // element. Constructing such a - // quadrature formula isn't really - // difficult, but not the point - // here, and so we simply assert at - // the top of the function that our - // implicit assumption about the - // finite element is in fact - // satisfied. + // @sect4{ObstacleProblem::assemble_mass_matrix_diagonal} + + // The next function is used in the + // computation of the diagonal mass + // matrix $B$ used to scale + // variables in the active set + // method. As discussed in the + // introduction, we get the mass + // matrix to be diagonal by + // choosing the trapezoidal rule + // for quadrature. Doing so we + // don't really need the triple + // loop over quadrature points, + // indices $i$ and indices $j$ any + // more and can, instead, just use + // a double loop. The rest of the + // function is obvious given what + // we have discussed in many of the + // previous tutorial programs. + // + // Note that at the time this + // function is called, the + // constraints object only contains + // boundary value constraints; we + // therefore do not have to pay + // attention in the last + // copy-local-to-global step to + // preserve the values of matrix + // entries that may later on be + // constrained by the active set. + // + // Note also that the trick with + // the trapezoidal rule only works + // if we have in fact $Q_1$ + // elements. For higher order + // elements, one would need to use + // a quadrature formula that has + // quadrature points at all the + // support points of the finite + // element. Constructing such a + // quadrature formula isn't really + // difficult, but not the point + // here, and so we simply assert at + // the top of the function that our + // implicit assumption about the + // finite element is in fact + // satisfied. template void ObstacleProblem:: @@ -436,9 +436,9 @@ namespace Step41 const QTrapez quadrature_formula; FEValues fe_values (fe, - quadrature_formula, - update_values | - update_JxW_values); + quadrature_formula, + update_values | + update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -452,63 +452,63 @@ namespace Step41 for (; cell!=endc; ++cell) { - fe_values.reinit (cell); - cell_matrix = 0; + fe_values.reinit (cell); + cell_matrix = 0; - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + cell->get_dof_indices (local_dof_indices); - constraints.distribute_local_to_global (cell_matrix, - local_dof_indices, - mass_matrix); + constraints.distribute_local_to_global (cell_matrix, + local_dof_indices, + mass_matrix); } } - // @sect4{ObstacleProblem::update_solution_and_constraints} - - // In a sense, this is the central - // function of this program. It - // updates the active set of - // constrained degrees of freedom - // as discussed in the introduction - // and computes a ConstraintMatrix - // object from it that can then be - // used to eliminate constrained - // degrees of freedom from the - // solution of the next - // iteration. At the same time we - // set the constrained degrees of - // freedom of the solution to the - // correct value, namely the height - // of the obstacle. - // - // Fundamentally, the function is - // rather simple: We have to loop - // over all degrees of freedom and - // check the sign of the function - // $\Lambda^k_i + c([BU^k]_i - - // G_i) = \Lambda^k_i + cB_i(U^k_i - - // [g_h]_i)$ because in our case + // @sect4{ObstacleProblem::update_solution_and_constraints} + + // In a sense, this is the central + // function of this program. It + // updates the active set of + // constrained degrees of freedom + // as discussed in the introduction + // and computes a ConstraintMatrix + // object from it that can then be + // used to eliminate constrained + // degrees of freedom from the + // solution of the next + // iteration. At the same time we + // set the constrained degrees of + // freedom of the solution to the + // correct value, namely the height + // of the obstacle. + // + // Fundamentally, the function is + // rather simple: We have to loop + // over all degrees of freedom and + // check the sign of the function + // $\Lambda^k_i + c([BU^k]_i - + // G_i) = \Lambda^k_i + cB_i(U^k_i - + // [g_h]_i)$ because in our case // $G_i = B_i[g_h]_i$. To this end, - // we use the formula given in the - // introduction by which we can - // compute the Lagrange multiplier - // as the residual of the original - // linear system (given via the - // variables - // complete_system_matrix - // and - // complete_system_rhs. - // At the top of this function, we - // compute this residual using a - // function that is part of the - // matrix classes. + // we use the formula given in the + // introduction by which we can + // compute the Lagrange multiplier + // as the residual of the original + // linear system (given via the + // variables + // complete_system_matrix + // and + // complete_system_rhs. + // At the top of this function, we + // compute this residual using a + // function that is part of the + // matrix classes. template void ObstacleProblem::update_solution_and_constraints () @@ -519,67 +519,67 @@ namespace Step41 TrilinosWrappers::Vector lambda (dof_handler.n_dofs()); complete_system_matrix.residual (lambda, - solution, complete_system_rhs); + solution, complete_system_rhs); contact_force.ratio (lambda, diagonal_of_mass_matrix); contact_force *= -1; - // The next step is to reset the - // active set and constraints - // objects and to start the loop - // over all degrees of - // freedom. This is made slightly - // more complicated by the fact - // that we can't just loop over - // all elements of the solution - // vector since there is no way - // for us then to find out what - // location a DoF is associated - // with; however, we need this - // location to test whether the - // displacement of a DoF is - // larger or smaller than the - // height of the obstacle at this - // location. - // - // We work around this by looping - // over all cells and DoFs - // defined on each of these - // cells. We use here that the - // displacement is described - // using a $Q_1$ function for - // which degrees of freedom are - // always located on the vertices - // of the cell; thus, we can get - // the index of each degree of - // freedom and its location by - // asking the vertex for this - // information. On the other - // hand, this clearly wouldn't - // work for higher order - // elements, and so we add an - // assertion that makes sure that - // we only deal with elements for - // which all degrees of freedom - // are located in vertices to - // avoid tripping ourselves with - // non-functional code in case - // someone wants to play with - // increasing the polynomial - // degree of the solution. - // - // The price to pay for having to - // loop over cells rather than - // DoFs is that we may encounter - // some degrees of freedom more - // than once, namely each time we - // visit one of the cells - // adjacent to a given vertex. We - // will therefore have to keep - // track which vertices we have - // already touched and which we - // haven't so far. We do so by - // using an array of flags - // dof_touched: + // The next step is to reset the + // active set and constraints + // objects and to start the loop + // over all degrees of + // freedom. This is made slightly + // more complicated by the fact + // that we can't just loop over + // all elements of the solution + // vector since there is no way + // for us then to find out what + // location a DoF is associated + // with; however, we need this + // location to test whether the + // displacement of a DoF is + // larger or smaller than the + // height of the obstacle at this + // location. + // + // We work around this by looping + // over all cells and DoFs + // defined on each of these + // cells. We use here that the + // displacement is described + // using a $Q_1$ function for + // which degrees of freedom are + // always located on the vertices + // of the cell; thus, we can get + // the index of each degree of + // freedom and its location by + // asking the vertex for this + // information. On the other + // hand, this clearly wouldn't + // work for higher order + // elements, and so we add an + // assertion that makes sure that + // we only deal with elements for + // which all degrees of freedom + // are located in vertices to + // avoid tripping ourselves with + // non-functional code in case + // someone wants to play with + // increasing the polynomial + // degree of the solution. + // + // The price to pay for having to + // loop over cells rather than + // DoFs is that we may encounter + // some degrees of freedom more + // than once, namely each time we + // visit one of the cells + // adjacent to a given vertex. We + // will therefore have to keep + // track which vertices we have + // already touched and which we + // haven't so far. We do so by + // using an array of flags + // dof_touched: constraints.clear(); active_set.clear (); @@ -591,126 +591,126 @@ namespace Step41 endc = dof_handler.end(); for (; cell!=endc; ++cell) for (unsigned int v=0; v::vertices_per_cell; ++v) - { - Assert (dof_handler.get_fe().dofs_per_cell == - GeometryInfo::vertices_per_cell, - ExcNotImplemented()); - - const unsigned int dof_index = cell->vertex_dof_index (v,0); - - if (dof_touched[dof_index] == false) - dof_touched[dof_index] = true; - else - continue; - - // Now that we know that we - // haven't touched this DoF - // yet, let's get the value - // of the displacement - // function there as well - // as the value of the - // obstacle function and - // use this to decide - // whether the current DoF - // belongs to the active - // set. For that we use the - // function given above and - // in the introduction. - // - // If we decide that the - // DoF should be part of - // the active set, we add - // its index to the active - // set, introduce a - // nonhomogeneous equality - // constraint in the - // ConstraintMatrix object, - // and reset the solution - // value to the height of - // the obstacle. Finally, - // the residual of the - // non-contact part of the - // system serves as an - // additional control (the - // residual equals the - // remaining, unaccounted - // forces, and should be - // zero outside the contact - // zone), so we zero out - // the components of the - // residual vector (i.e., - // the Lagrange multiplier - // lambda) that correspond - // to the area where the - // body is in contact; at - // the end of the loop over - // all cells, the residual - // will therefore only - // consist of the residual - // in the non-contact - // zone. We output the norm - // of this residual along - // with the size of the - // active set after the - // loop. - const double obstacle_value = obstacle.value (cell->vertex(v)); - const double solution_value = solution (dof_index); - - if (lambda (dof_index) + - penalty_parameter * - diagonal_of_mass_matrix(dof_index) * - (solution_value - obstacle_value) - < - 0) - { - active_set.add_index (dof_index); - constraints.add_line (dof_index); - constraints.set_inhomogeneity (dof_index, obstacle_value); - - solution (dof_index) = obstacle_value; - - lambda (dof_index) = 0; - } - } + { + Assert (dof_handler.get_fe().dofs_per_cell == + GeometryInfo::vertices_per_cell, + ExcNotImplemented()); + + const unsigned int dof_index = cell->vertex_dof_index (v,0); + + if (dof_touched[dof_index] == false) + dof_touched[dof_index] = true; + else + continue; + + // Now that we know that we + // haven't touched this DoF + // yet, let's get the value + // of the displacement + // function there as well + // as the value of the + // obstacle function and + // use this to decide + // whether the current DoF + // belongs to the active + // set. For that we use the + // function given above and + // in the introduction. + // + // If we decide that the + // DoF should be part of + // the active set, we add + // its index to the active + // set, introduce a + // nonhomogeneous equality + // constraint in the + // ConstraintMatrix object, + // and reset the solution + // value to the height of + // the obstacle. Finally, + // the residual of the + // non-contact part of the + // system serves as an + // additional control (the + // residual equals the + // remaining, unaccounted + // forces, and should be + // zero outside the contact + // zone), so we zero out + // the components of the + // residual vector (i.e., + // the Lagrange multiplier + // lambda) that correspond + // to the area where the + // body is in contact; at + // the end of the loop over + // all cells, the residual + // will therefore only + // consist of the residual + // in the non-contact + // zone. We output the norm + // of this residual along + // with the size of the + // active set after the + // loop. + const double obstacle_value = obstacle.value (cell->vertex(v)); + const double solution_value = solution (dof_index); + + if (lambda (dof_index) + + penalty_parameter * + diagonal_of_mass_matrix(dof_index) * + (solution_value - obstacle_value) + < + 0) + { + active_set.add_index (dof_index); + constraints.add_line (dof_index); + constraints.set_inhomogeneity (dof_index, obstacle_value); + + solution (dof_index) = obstacle_value; + + lambda (dof_index) = 0; + } + } std::cout << " Size of active set: " << active_set.n_elements() - << std::endl; + << std::endl; std::cout << " Residual of the non-contact part of the system: " - << lambda.l2_norm() - << std::endl; - - // In a final step, we add to the - // set of constraints on DoFs we - // have so far from the active - // set those that result from - // Dirichlet boundary values, and - // close the constraints object: + << lambda.l2_norm() + << std::endl; + + // In a final step, we add to the + // set of constraints on DoFs we + // have so far from the active + // set those that result from + // Dirichlet boundary values, and + // close the constraints object: VectorTools::interpolate_boundary_values (dof_handler, - 0, - BoundaryValues(), - constraints); + 0, + BoundaryValues(), + constraints); constraints.close (); } - // @sect4{ObstacleProblem::solve} - - // There is nothing to say really - // about the solve function. In the - // context of a Newton method, we - // are not typically interested in - // very high accuracy (why ask for - // a highly accurate solution of a - // linear problem that we know only - // gives us an approximation of the - // solution of the nonlinear - // problem), and so we use the - // ReductionControl class that - // stops iterations when either an - // absolute tolerance is reached - // (for which we choose $10^{-12}$) - // or when the residual is reduced - // by a certain factor (here, - // $10^{-3}$). + // @sect4{ObstacleProblem::solve} + + // There is nothing to say really + // about the solve function. In the + // context of a Newton method, we + // are not typically interested in + // very high accuracy (why ask for + // a highly accurate solution of a + // linear problem that we know only + // gives us an approximation of the + // solution of the nonlinear + // problem), and so we use the + // ReductionControl class that + // stops iterations when either an + // absolute tolerance is reached + // (for which we choose $10^{-12}$) + // or when the residual is reduced + // by a certain factor (here, + // $10^{-3}$). template void ObstacleProblem::solve () { @@ -725,30 +725,30 @@ namespace Step41 constraints.distribute (solution); std::cout << " Error: " << reduction_control.initial_value() - << " -> " << reduction_control.last_value() - << " in " - << reduction_control.last_step() - << " CG iterations." - << std::endl; + << " -> " << reduction_control.last_value() + << " in " + << reduction_control.last_step() + << " CG iterations." + << std::endl; } - // @sect4{ObstacleProblem::output_results} - - // We use the vtk-format for the - // output. The file contains the - // displacement and a numerical - // represenation of the active - // set. The function looks standard - // but note that we can add an - // IndexSet object to the DataOut - // object in exactly the same way - // as a regular solution vector: it - // is simply interpreted as a - // function that is either zero - // (when a degree of freedom is not - // part of the IndexSet) or one (if - // it is). + // @sect4{ObstacleProblem::output_results} + + // We use the vtk-format for the + // output. The file contains the + // displacement and a numerical + // represenation of the active + // set. The function looks standard + // but note that we can add an + // IndexSet object to the DataOut + // object in exactly the same way + // as a regular solution vector: it + // is simply interpreted as a + // function that is either zero + // (when a degree of freedom is not + // part of the IndexSet) or one (if + // it is). template void ObstacleProblem::output_results (const unsigned int iteration) const { @@ -764,51 +764,51 @@ namespace Step41 data_out.build_patches (); std::ofstream output_vtk ((std::string("output_") + - Utilities::int_to_string (iteration, 3) + - ".vtk").c_str ()); + Utilities::int_to_string (iteration, 3) + + ".vtk").c_str ()); data_out.write_vtk (output_vtk); } - // @sect4{ObstacleProblem::run} - - // This is the function which has - // the top-level control over - // everything. It is not very - // long, and in fact rather - // straightforward: in every - // iteration of the active set - // method, we assemble the linear - // system, solve it, update the - // active set and project the - // solution back to the feasible - // set, and then output the - // results. The iteration is - // terminated whenever the active - // set has not changed in the - // previous iteration. - // - // The only trickier part is that - // we have to save the linear - // system (i.e., the matrix and - // right hand side) after - // assembling it in the first - // iteration. The reason is that - // this is the only step where we - // can access the linear system as - // built without any of the contact - // constraints active. We need this - // to compute the residual of the - // solution at other iterations, - // but in other iterations that - // linear system we form has the - // rows and columns that correspond - // to constrained degrees of - // freedom eliminated, and so we - // can no longer access the full - // residual of the original - // equation. + // @sect4{ObstacleProblem::run} + + // This is the function which has + // the top-level control over + // everything. It is not very + // long, and in fact rather + // straightforward: in every + // iteration of the active set + // method, we assemble the linear + // system, solve it, update the + // active set and project the + // solution back to the feasible + // set, and then output the + // results. The iteration is + // terminated whenever the active + // set has not changed in the + // previous iteration. + // + // The only trickier part is that + // we have to save the linear + // system (i.e., the matrix and + // right hand side) after + // assembling it in the first + // iteration. The reason is that + // this is the only step where we + // can access the linear system as + // built without any of the contact + // constraints active. We need this + // to compute the residual of the + // solution at other iterations, + // but in other iterations that + // linear system we form has the + // rows and columns that correspond + // to constrained degrees of + // freedom eliminated, and so we + // can no longer access the full + // residual of the original + // equation. template void ObstacleProblem::run () { @@ -818,26 +818,26 @@ namespace Step41 IndexSet active_set_old (active_set); for (unsigned int iteration=0; iteration<=solution.size (); ++iteration) { - std::cout << "Newton iteration " << iteration << std::endl; + std::cout << "Newton iteration " << iteration << std::endl; - assemble_system (); + assemble_system (); - if (iteration == 0) - { - complete_system_matrix.copy_from (system_matrix); - complete_system_rhs = system_rhs; - } + if (iteration == 0) + { + complete_system_matrix.copy_from (system_matrix); + complete_system_rhs = system_rhs; + } - solve (); - update_solution_and_constraints (); - output_results (iteration); + solve (); + update_solution_and_constraints (); + output_results (iteration); - if (active_set == active_set_old) - break; + if (active_set == active_set_old) + break; - active_set_old = active_set; + active_set_old = active_set; - std::cout << std::endl; + std::cout << std::endl; } } } @@ -845,13 +845,13 @@ namespace Step41 // @sect3{The main function} - // And this is the main function. It - // follows the pattern of all other - // main functions. The call to - // initialize MPI exists because the - // Trilinos library upon which we - // build our linear solvers in this - // program requires it. + // And this is the main function. It + // follows the pattern of all other + // main functions. The call to + // initialize MPI exists because the + // Trilinos library upon which we + // build our linear solvers in this + // program requires it. int main (int argc, char *argv[]) { try diff --git a/deal.II/examples/step-42/step-42.cc b/deal.II/examples/step-42/step-42.cc index cba58e4740..0c5fab4170 100644 --- a/deal.II/examples/step-42/step-42.cc +++ b/deal.II/examples/step-42/step-42.cc @@ -10,10 +10,10 @@ /* further information on this license. */ - // @sect3{Include files} + // @sect3{Include files} - // As usual, we start by including - // some well-known files: + // As usual, we start by including + // some well-known files: #include #include #include @@ -94,7 +94,7 @@ namespace Step42 template void copy(const MATRIX &matrix, - FullMatrix &full_matrix) + FullMatrix &full_matrix) { const unsigned int m = matrix.m(); const unsigned int n = matrix.n(); @@ -104,13 +104,13 @@ namespace Step42 Vector result (m); for(unsigned int i=0; i > &lower_dofs, - std::vector > &boundary_dofs); + std::vector > &boundary_dofs); void output_results (const unsigned int refinement_cycle) const; void refine_mesh (); @@ -168,20 +168,20 @@ namespace Step42 BoundaryValues () : Function(dim+1) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void vector_value (const Point &p, - Vector &value) const; + Vector &value) const; }; template double BoundaryValues::value (const Point &p, - const unsigned int component) const + const unsigned int component) const { Assert (component < this->n_components, - ExcIndexRange (component, 0, this->n_components)); + ExcIndexRange (component, 0, this->n_components)); if (component == 0 && p[0] == 0) return (dim == 2 ? - p[1]*(p[1]-1.) : p[1]*(p[1]-1.) * p[2]*(p[2]-1.)); @@ -192,7 +192,7 @@ namespace Step42 template void BoundaryValues::vector_value (const Point &p, - Vector &values) const + Vector &values) const { for (unsigned int c=0; cn_components; ++c) values(c) = BoundaryValues::value (p, c); @@ -208,10 +208,10 @@ namespace Step42 RightHandSide () : Function(dim+1) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void vector_value (const Point &p, - Vector &value) const; + Vector &value) const; }; @@ -219,7 +219,7 @@ namespace Step42 template double RightHandSide::value (const Point &/*p*/, - const unsigned int component) const + const unsigned int component) const { return (component == 1 ? 1 : 0); } @@ -228,7 +228,7 @@ namespace Step42 template void RightHandSide::vector_value (const Point &p, - Vector &values) const + Vector &values) const { for (unsigned int c=0; cn_components; ++c) values(c) = RightHandSide::value (p, c); @@ -242,10 +242,10 @@ namespace Step42 { public: InverseMatrix (const Matrix &m, - const Preconditioner &preconditioner); + const Preconditioner &preconditioner); void vmult (Vector &dst, - const Vector &src) const; + const Vector &src) const; mutable std::string name; private: @@ -256,16 +256,16 @@ namespace Step42 template InverseMatrix::InverseMatrix (const Matrix &m, - const Preconditioner &preconditioner) - : - matrix (&m), - preconditioner (&preconditioner) + const Preconditioner &preconditioner) + : + matrix (&m), + preconditioner (&preconditioner) {} template void InverseMatrix::vmult (Vector &dst, - const Vector &src) const + const Vector &src) const { SolverControl solver_control (src.size(), 1.0e-12*src.l2_norm()); SolverCG<> cg (solver_control); @@ -274,29 +274,29 @@ namespace Step42 try { - cg.solve (*matrix, dst, src, *preconditioner); + cg.solve (*matrix, dst, src, *preconditioner); } catch (...) { - std::cout << "Failure in " << __PRETTY_FUNCTION__ << std::endl; - abort (); + std::cout << "Failure in " << __PRETTY_FUNCTION__ << std::endl; + abort (); } #ifdef STEP_42_TEST if (name == "in schur") std::cout << " " << solver_control.last_step() - << " inner CG steps inside the Schur complement "; + << " inner CG steps inside the Schur complement "; else if (name == "top left") std::cout << " " << solver_control.last_step() - << " CG steps on the top left block "; + << " CG steps on the top left block "; else if (name == "rhs") std::cout << " " << solver_control.last_step() - << " CG steps for computing the r.h.s. "; + << " CG steps for computing the r.h.s. "; else abort (); std::cout << solver_control.initial_value() << "->" << solver_control.last_value() - << std::endl; + << std::endl; #endif } @@ -306,16 +306,16 @@ namespace Step42 { public: BlockSchurPreconditioner (const BlockSparseMatrix &S, - const InverseMatrix,PreconditionerMp> &Mpinv, - const PreconditionerA &Apreconditioner); + const InverseMatrix,PreconditionerMp> &Mpinv, + const PreconditionerA &Apreconditioner); void vmult (BlockVector &dst, - const BlockVector &src) const; + const BlockVector &src) const; private: const SmartPointer > system_matrix; const SmartPointer, - PreconditionerMp > > m_inverse; + PreconditionerMp > > m_inverse; const PreconditionerA &a_preconditioner; mutable Vector tmp; @@ -328,30 +328,30 @@ namespace Step42 const InverseMatrix,PreconditionerMp> &Mpinv, const PreconditionerA &Apreconditioner ) - : - system_matrix (&S), - m_inverse (&Mpinv), - a_preconditioner (Apreconditioner), - tmp (S.block(1,1).m()) + : + system_matrix (&S), + m_inverse (&Mpinv), + a_preconditioner (Apreconditioner), + tmp (S.block(1,1).m()) {} - // Now the interesting function, the multiplication of - // the preconditioner with a BlockVector. + // Now the interesting function, the multiplication of + // the preconditioner with a BlockVector. template void BlockSchurPreconditioner::vmult ( BlockVector &dst, const BlockVector &src) const { - // Form u_new = A^{-1} u + // Form u_new = A^{-1} u a_preconditioner.vmult (dst.block(0), src.block(0)); - // Form tmp = - B u_new + p - // (SparseMatrix::residual - // does precisely this) + // Form tmp = - B u_new + p + // (SparseMatrix::residual + // does precisely this) system_matrix->block(1,0).residual(tmp, dst.block(0), src.block(1)); - // Change sign in tmp + // Change sign in tmp tmp *= -1; - // Multiply by approximate Schur complement - // (i.e. a pressure mass matrix) + // Multiply by approximate Schur complement + // (i.e. a pressure mass matrix) m_inverse->vmult (dst.block(1), tmp); } @@ -360,20 +360,20 @@ namespace Step42 { public: SchurComplement (const BlockSparseMatrix &system_matrix, - const InverseMatrix, Preconditioner> &A_inverse); + const InverseMatrix, Preconditioner> &A_inverse); void vmult (Vector &dst, - const Vector &src) const; + const Vector &src) const; unsigned int m() const - { - return system_matrix->block(1,1).m(); - } + { + return system_matrix->block(1,1).m(); + } unsigned int n() const - { - return system_matrix->block(1,1).n(); - } + { + return system_matrix->block(1,1).n(); + } private: const SmartPointer > system_matrix; @@ -387,18 +387,18 @@ namespace Step42 template SchurComplement:: SchurComplement (const BlockSparseMatrix &system_matrix, - const InverseMatrix,Preconditioner> &A_inverse) - : - system_matrix (&system_matrix), - A_inverse (&A_inverse), - tmp1 (system_matrix.block(0,0).m()), - tmp2 (system_matrix.block(0,0).m()) + const InverseMatrix,Preconditioner> &A_inverse) + : + system_matrix (&system_matrix), + A_inverse (&A_inverse), + tmp1 (system_matrix.block(0,0).m()), + tmp2 (system_matrix.block(0,0).m()) {} template void SchurComplement::vmult (Vector &dst, - const Vector &src) const + const Vector &src) const { system_matrix->block(0,1).vmult (tmp1, src); A_inverse->name = "in schur"; @@ -413,12 +413,12 @@ namespace Step42 template StokesProblem::StokesProblem (const unsigned int degree) - : - degree (degree), - triangulation (Triangulation::limit_level_difference_at_vertices), - fe (FE_Q(degree+1), dim, - FE_Q(degree), 1), - dof_handler (triangulation) + : + degree (degree), + triangulation (Triangulation::limit_level_difference_at_vertices), + fe (FE_Q(degree+1), dim, + FE_Q(degree), 1), + dof_handler (triangulation) {} @@ -451,13 +451,13 @@ namespace Step42 std::vector component_mask (dim+1, true); component_mask[dim] = false; VectorTools::interpolate_boundary_values (mapping, - dof_handler, - dirichlet_boundary, - constraints, - component_mask); + dof_handler, + dirichlet_boundary, + constraints, + component_mask); DoFTools::make_hanging_node_constraints (dof_handler, - constraints); + constraints); mg_constrained_dofs.clear(); mg_constrained_dofs.initialize(dof_handler, dirichlet_boundary); @@ -468,17 +468,17 @@ namespace Step42 std::vector dofs_per_block (2); DoFTools::count_dofs_per_block (dof_handler, dofs_per_block, - block_component); + block_component); const unsigned int n_u = dofs_per_block[0], - n_p = dofs_per_block[1]; + n_p = dofs_per_block[1]; std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << " (" << n_u << '+' << n_p << ')' - << std::endl; + << triangulation.n_active_cells() + << std::endl + << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << " (" << n_u << '+' << n_p << ')' + << std::endl; { BlockCompressedSimpleSparsityPattern csp (2,2); @@ -509,7 +509,7 @@ namespace Step42 system_rhs.block(1).reinit (n_p); system_rhs.collect_sizes (); - //now setup stuff for mg + //now setup stuff for mg const unsigned int nlevels = triangulation.n_levels(); mg_matrices.resize(0, nlevels-1); @@ -523,24 +523,24 @@ namespace Step42 mg_dofs_per_component[level].resize (2); MGTools::count_dofs_per_block (dof_handler, mg_dofs_per_component, - block_component); + block_component); for (unsigned int level=0; level quadrature_formula(degree+2); FEValues fe_values (fe, quadrature_formula, - update_values | - update_quadrature_points | - update_JxW_values | - update_gradients); + update_values | + update_quadrature_points | + update_JxW_values | + update_gradients); const unsigned int dofs_per_cell = fe.dofs_per_cell; @@ -571,7 +571,7 @@ namespace Step42 const RightHandSide right_hand_side; std::vector > rhs_values (n_q_points, - Vector(dim+1)); + Vector(dim+1)); const FEValuesExtractors::Vector velocities (0); @@ -588,49 +588,49 @@ namespace Step42 endc = dof_handler.end(); for (; cell!=endc; ++cell) { - fe_values.reinit (cell); - local_matrix = 0; - local_rhs = 0; - - right_hand_side.vector_value_list(fe_values.get_quadrature_points(), - rhs_values); - - for (unsigned int q=0; qget_dof_indices (local_dof_indices); - constraints.distribute_local_to_global (local_matrix, local_rhs, - local_dof_indices, - system_matrix, system_rhs); + fe_values.reinit (cell); + local_matrix = 0; + local_rhs = 0; + + right_hand_side.vector_value_list(fe_values.get_quadrature_points(), + rhs_values); + + for (unsigned int q=0; qget_dof_indices (local_dof_indices); + constraints.distribute_local_to_global (local_matrix, local_rhs, + local_dof_indices, + system_matrix, system_rhs); } } @@ -640,10 +640,10 @@ namespace Step42 { QGauss quadrature_formula(degree+2); FEValues fe_values (fe, quadrature_formula, - update_values | - update_quadrature_points | - update_JxW_values | - update_gradients); + update_values | + update_quadrature_points | + update_JxW_values | + update_gradients); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -669,13 +669,13 @@ namespace Step42 std::vector boundary_interface_constraints (triangulation.n_levels()); for (unsigned int level=0; level::cell_iterator @@ -683,60 +683,60 @@ namespace Step42 endc = dof_handler.end(); for (; cell!=endc; ++cell) { - // Remember the level of the - // current cell. - const unsigned int level = cell->level(); - // Compute the values specified - // by update flags above. - fe_values.reinit (cell); - local_matrix = 0; - - for (unsigned int q=0; qlevel(); + // Compute the values specified + // by update flags above. + fe_values.reinit (cell); + local_matrix = 0; + + for (unsigned int q=0; qget_mg_dof_indices (local_dof_indices); - boundary_constraints[level] - .distribute_local_to_global (local_matrix, - local_dof_indices, - mg_matrices[level]); - - for (unsigned int i=0; iget_mg_dof_indices (local_dof_indices); + boundary_constraints[level] + .distribute_local_to_global (local_matrix, + local_dof_indices, + mg_matrices[level]); + + for (unsigned int i=0; i::type>(new typename InnerPreconditioner::type()); - mg_A_preconditioner[level] - ->initialize (mg_matrices[level].block(0,0), - typename InnerPreconditioner::type::AdditionalData()); + mg_A_preconditioner[level] + = std_cxx1x::shared_ptr::type>(new typename InnerPreconditioner::type()); + mg_A_preconditioner[level] + ->initialize (mg_matrices[level].block(0,0), + typename InnerPreconditioner::type::AdditionalData()); } } @@ -747,17 +747,17 @@ namespace Step42 public: struct AdditionalData { - const InnerPreconditioner *A_preconditioner; + const InnerPreconditioner *A_preconditioner; }; void initialize (const BlockSparseMatrix &system_matrix, - const AdditionalData &data); + const AdditionalData &data); void vmult (BlockVector &dst, - const BlockVector &src) const; + const BlockVector &src) const; void Tvmult (BlockVector &dst, - const BlockVector &src) const; + const BlockVector &src) const; void clear (); @@ -771,7 +771,7 @@ namespace Step42 void SchurComplementSmoother:: initialize (const BlockSparseMatrix &system_matrix, - const AdditionalData &data) + const AdditionalData &data) { this->system_matrix = &system_matrix; this->A_preconditioner = data.A_preconditioner; @@ -784,7 +784,7 @@ namespace Step42 void SchurComplementSmoother:: vmult (BlockVector &dst, - const BlockVector &src) const + const BlockVector &src) const { #ifdef STEP_42_TEST std::cout << "Entering smoother with " << dst.size() << " unknowns" << std::endl; @@ -816,28 +816,28 @@ schur_rhs -= src.block(1); SchurComplement schur_complement (*system_matrix, A_inverse); - // The usual control structures for - // the solver call are created... - SolverControl solver_control (dst.block(1).size(), - 1e-1*schur_rhs.l2_norm()); - SolverGMRES<> cg (solver_control); - - #ifdef STEP_42_TEST - std::cout << " Starting Schur complement solver -- " - << schur_complement.m() << " unknowns" - << std::endl; - #endif - try - { - cg.solve (schur_complement, dst.block(1), schur_rhs, - PreconditionIdentity()); - } - catch (...) - { - std::cout << "Failure in " << __PRETTY_FUNCTION__ << std::endl; - std::cout << schur_rhs.l2_norm () << std::endl; - abort (); - } + // The usual control structures for + // the solver call are created... + SolverControl solver_control (dst.block(1).size(), + 1e-1*schur_rhs.l2_norm()); + SolverGMRES<> cg (solver_control); + + #ifdef STEP_42_TEST + std::cout << " Starting Schur complement solver -- " + << schur_complement.m() << " unknowns" + << std::endl; + #endif + try + { + cg.solve (schur_complement, dst.block(1), schur_rhs, + PreconditionIdentity()); + } + catch (...) + { + std::cout << "Failure in " << __PRETTY_FUNCTION__ << std::endl; + std::cout << schur_rhs.l2_norm () << std::endl; + abort (); + } // no constraints to be taken care of here #ifdef STEP_42_TEST @@ -878,7 +878,7 @@ std::cout << "Exiting smoother with " << dst.size() << " unknowns" << std::endl; void SchurComplementSmoother:: Tvmult (BlockVector &, - const BlockVector &) const + const BlockVector &) const { Assert (false, ExcNotImplemented()); } @@ -932,24 +932,24 @@ std::cout << "Exiting smoother with " << dst.size() << " unknowns" << std::endl; mg_smoother.set_steps(2); Multigrid > mg(dof_handler, - mg_matrix, - mg_coarse, - mg_transfer, - mg_smoother, - mg_smoother); + mg_matrix, + mg_coarse, + mg_transfer, + mg_smoother, + mg_smoother); mg.set_debug(3); mg.set_edge_matrices(mg_interface_down, mg_interface_up); MGPREC preconditioner(dof_handler, mg, mg_transfer); SolverControl solver_control (system_matrix.m(), - 1e-6*system_rhs.l2_norm()); + 1e-6*system_rhs.l2_norm()); GrowingVectorMemory > vector_memory; SolverGMRES >::AdditionalData gmres_data; gmres_data.max_n_tmp_vectors = 100; SolverGMRES > gmres(solver_control, vector_memory, - gmres_data); + gmres_data); // PreconditionIdentity precondition_identity; #ifdef STEP_42_TEST @@ -957,19 +957,19 @@ std::cout << "Exiting smoother with " << dst.size() << " unknowns" << std::endl; #endif try { - gmres.solve(system_matrix, solution, system_rhs, - preconditioner); + gmres.solve(system_matrix, solution, system_rhs, + preconditioner); } catch (...) { - std::cout << "Failure in " << __PRETTY_FUNCTION__ << std::endl; - abort (); + std::cout << "Failure in " << __PRETTY_FUNCTION__ << std::endl; + abort (); } constraints.distribute (solution); std::cout << solver_control.last_step() - << " outer GMRES iterations "; + << " outer GMRES iterations "; } @@ -981,7 +981,7 @@ std::cout << "Exiting smoother with " << dst.size() << " unknowns" << std::endl; A_preconditioner = std_cxx1x::shared_ptr::type>(new typename InnerPreconditioner::type()); A_preconditioner->initialize (system_matrix.block(0,0), - typename InnerPreconditioner::type::AdditionalData()); + typename InnerPreconditioner::type::AdditionalData()); SparseMatrix pressure_mass_matrix; pressure_mass_matrix.reinit(sparsity_pattern.block(1,1)); @@ -990,7 +990,7 @@ std::cout << "Exiting smoother with " << dst.size() << " unknowns" << std::endl; SparseILU pmass_preconditioner; pmass_preconditioner.initialize (pressure_mass_matrix, - SparseILU::AdditionalData()); + SparseILU::AdditionalData()); InverseMatrix,SparseILU > m_inverse (pressure_mass_matrix, pmass_preconditioner); @@ -1000,22 +1000,22 @@ std::cout << "Exiting smoother with " << dst.size() << " unknowns" << std::endl; preconditioner (system_matrix, m_inverse, *A_preconditioner); SolverControl solver_control (system_matrix.m(), - 1e-6*system_rhs.l2_norm()); + 1e-6*system_rhs.l2_norm()); GrowingVectorMemory > vector_memory; SolverGMRES >::AdditionalData gmres_data; gmres_data.max_n_tmp_vectors = 100; SolverGMRES > gmres(solver_control, vector_memory, - gmres_data); + gmres_data); gmres.solve(system_matrix, solution, system_rhs, - preconditioner); + preconditioner); constraints.distribute (solution); std::cout << " " - << solver_control.last_step() - << " block GMRES iterations "; + << solver_control.last_step() + << " block GMRES iterations "; } @@ -1035,14 +1035,14 @@ std::cout << "Exiting smoother with " << dst.size() << " unknowns" << std::endl; DataOut data_out; data_out.attach_dof_handler (dof_handler); data_out.add_data_vector (solution, solution_names, - DataOut::type_dof_data, - data_component_interpretation); + DataOut::type_dof_data, + data_component_interpretation); data_out.build_patches (); std::ostringstream filename; filename << "solution-" - << Utilities::int_to_string (refinement_cycle, 2) - << ".vtk"; + << Utilities::int_to_string (refinement_cycle, 2) + << ".vtk"; std::ofstream output (filename.str().c_str()); data_out.write_vtk (output); @@ -1059,15 +1059,15 @@ std::cout << "Exiting smoother with " << dst.size() << " unknowns" << std::endl; std::vector component_mask (dim+1, false); component_mask[dim] = true; KellyErrorEstimator::estimate (static_cast&>(dof_handler), - QGauss(degree+1), - typename FunctionMap::type(), - solution, - estimated_error_per_cell, - component_mask); + QGauss(degree+1), + typename FunctionMap::type(), + solution, + estimated_error_per_cell, + component_mask); GridRefinement::refine_and_coarsen_fixed_number (triangulation, - estimated_error_per_cell, - 0.3, 0.0); + estimated_error_per_cell, + 0.3, 0.0); triangulation.execute_coarsening_and_refinement (); } @@ -1081,25 +1081,25 @@ std::cout << "Exiting smoother with " << dst.size() << " unknowns" << std::endl; subdivisions[0] = 1; const Point bottom_left = (dim == 2 ? - Point(0,0) : - Point(0,0,0)); + Point(0,0) : + Point(0,0,0)); const Point top_right = (dim == 2 ? - Point(1,1) : - Point(1,1,1)); + Point(1,1) : + Point(1,1,1)); GridGenerator::subdivided_hyper_rectangle (triangulation, - subdivisions, - bottom_left, - top_right); + subdivisions, + bottom_left, + top_right); } for (typename Triangulation::active_cell_iterator - cell = triangulation.begin_active(); - cell != triangulation.end(); ++cell) + cell = triangulation.begin_active(); + cell != triangulation.end(); ++cell) for (unsigned int f=0; f::faces_per_cell; ++f) - if (cell->face(f)->center()[0] == 1) - cell->face(f)->set_all_boundary_indicators(1); + if (cell->face(f)->center()[0] == 1) + cell->face(f)->set_all_boundary_indicators(1); @@ -1107,40 +1107,40 @@ std::cout << "Exiting smoother with " << dst.size() << " unknowns" << std::endl; for (unsigned int refinement_cycle = 0; refinement_cycle<10; - ++refinement_cycle) + ++refinement_cycle) { - std::cout << "Refinement cycle " << refinement_cycle << std::endl; + std::cout << "Refinement cycle " << refinement_cycle << std::endl; - if (refinement_cycle > 0) - refine_mesh (); + if (refinement_cycle > 0) + refine_mesh (); - std::ostringstream out_filename; - out_filename << "gitter" - << refinement_cycle - << ".eps"; + std::ostringstream out_filename; + out_filename << "gitter" + << refinement_cycle + << ".eps"; - std::ofstream grid_output (out_filename.str().c_str()); - GridOut grid_out; - grid_out.write_eps (triangulation, grid_output); + std::ofstream grid_output (out_filename.str().c_str()); + GridOut grid_out; + grid_out.write_eps (triangulation, grid_output); - setup_dofs (); + setup_dofs (); - std::cout << " Assembling..." << std::endl << std::flush; - assemble_system (); + std::cout << " Assembling..." << std::endl << std::flush; + assemble_system (); - std::cout << " Solving..." << std::flush; + std::cout << " Solving..." << std::flush; - solve_block (); - output_results (refinement_cycle); - system ("mv solution-* block"); + solve_block (); + output_results (refinement_cycle); + system ("mv solution-* block"); - solution = 0; + solution = 0; - solve (); - output_results (refinement_cycle); - system ("mv solution-* mg"); + solve (); + output_results (refinement_cycle); + system ("mv solution-* mg"); - std::cout << std::endl; + std::cout << std::endl; } } } diff --git a/deal.II/examples/step-43/step-43.cc b/deal.II/examples/step-43/step-43.cc index 207258096a..46bc040e5c 100644 --- a/deal.II/examples/step-43/step-43.cc +++ b/deal.II/examples/step-43/step-43.cc @@ -11,20 +11,20 @@ /* further information on this license. */ - // @sect3{Include files} - - // The first step, as always, is to - // include the functionality of a - // number of deal.II and C++ header - // files. - // - // The list includes some header - // files that provide vector, matrix, - // and preconditioner classes that - // implement interfaces to the - // respective Trilinos classes; some - // more information on these may be - // found in step-31. + // @sect3{Include files} + + // The first step, as always, is to + // include the functionality of a + // number of deal.II and C++ header + // files. + // + // The list includes some header + // files that provide vector, matrix, + // and preconditioner classes that + // implement interfaces to the + // respective Trilinos classes; some + // more information on these may be + // found in step-31. #include #include #include @@ -66,23 +66,23 @@ #include - // At the end of this top-matter, we - // open a namespace for the current - // project into which all the - // following material will go, and - // then import all deal.II names into - // this namespace: + // At the end of this top-matter, we + // open a namespace for the current + // project into which all the + // following material will go, and + // then import all deal.II names into + // this namespace: namespace Step43 { using namespace dealii; - // @sect3{Pressure right hand side, pressure boundary values and saturation initial value classes} + // @sect3{Pressure right hand side, pressure boundary values and saturation initial value classes} - // The following part is taken - // directly from step-21 so there is - // no need to repeat the - // descriptions found there. + // The following part is taken + // directly from step-21 so there is + // no need to repeat the + // descriptions found there. template class PressureRightHandSide : public Function { @@ -90,7 +90,7 @@ namespace Step43 PressureRightHandSide () : Function(1) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; @@ -98,7 +98,7 @@ namespace Step43 template double PressureRightHandSide::value (const Point &/*p*/, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { return 0; } @@ -111,14 +111,14 @@ namespace Step43 PressureBoundaryValues () : Function(1) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; template double PressureBoundaryValues::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { return 1-p[0]; } @@ -131,7 +131,7 @@ namespace Step43 SaturationBoundaryValues () : Function(1) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; @@ -139,7 +139,7 @@ namespace Step43 template double SaturationBoundaryValues::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { if (p[0] == 0) return 1; @@ -155,17 +155,17 @@ namespace Step43 SaturationInitialValues () : Function(1) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void vector_value (const Point &p, - Vector &value) const; + Vector &value) const; }; template double SaturationInitialValues::value (const Point &/*p*/, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { return 0.2; } @@ -174,59 +174,59 @@ namespace Step43 template void SaturationInitialValues::vector_value (const Point &p, - Vector &values) const + Vector &values) const { for (unsigned int c=0; cn_components; ++c) values(c) = SaturationInitialValues::value (p,c); } - // @sect3{Permeability models} + // @sect3{Permeability models} - // In this tutorial, we still use - // the two permeability models - // previously used in step-21 so we - // again refrain from commenting in - // detail about them. + // In this tutorial, we still use + // the two permeability models + // previously used in step-21 so we + // again refrain from commenting in + // detail about them. namespace SingleCurvingCrack { template class KInverse : public TensorFunction<2,dim> { public: - KInverse () - : - TensorFunction<2,dim> () - {} + KInverse () + : + TensorFunction<2,dim> () + {} - virtual void value_list (const std::vector > &points, - std::vector > &values) const; + virtual void value_list (const std::vector > &points, + std::vector > &values) const; }; template void KInverse::value_list (const std::vector > &points, - std::vector > &values) const + std::vector > &values) const { Assert (points.size() == values.size(), - ExcDimensionMismatch (points.size(), values.size())); + ExcDimensionMismatch (points.size(), values.size())); for (unsigned int p=0; p { public: - KInverse () - : - TensorFunction<2,dim> () - {} + KInverse () + : + TensorFunction<2,dim> () + {} - virtual void value_list (const std::vector > &points, - std::vector > &values) const; + virtual void value_list (const std::vector > &points, + std::vector > &values) const; private: - static std::vector > centers; + static std::vector > centers; - static std::vector > get_centers (); + static std::vector > get_centers (); }; @@ -263,15 +263,15 @@ namespace Step43 KInverse::get_centers () { const unsigned int N = (dim == 2 ? - 40 : - (dim == 3 ? - 100 : - throw ExcNotImplemented())); + 40 : + (dim == 3 ? + 100 : + throw ExcNotImplemented())); std::vector > centers_list (N); for (unsigned int i=0; i(rand())/RAND_MAX; + for (unsigned int d=0; d(rand())/RAND_MAX; return centers_list; } @@ -281,81 +281,81 @@ namespace Step43 template void KInverse::value_list (const std::vector > &points, - std::vector > &values) const + std::vector > &values) const { Assert (points.size() == values.size(), - ExcDimensionMismatch (points.size(), values.size())); + ExcDimensionMismatch (points.size(), values.size())); for (unsigned int p=0; p= 0) && (S<=1), - ExcMessage ("Saturation is outside its physically valid range.")); + ExcMessage ("Saturation is outside its physically valid range.")); return S*S / ( S * S + viscosity * (1-S) * (1-S)); } double fractional_flow_derivative (const double S, - const double viscosity) + const double viscosity) { Assert ((S >= 0) && (S<=1), - ExcMessage ("Saturation is outside its physically valid range.")); + ExcMessage ("Saturation is outside its physically valid range.")); const double temp = ( S * S + viscosity * (1-S) * (1-S) ); const double numerator = 2.0 * S * temp - - - S * S * - ( 2.0 * S - 2.0 * viscosity * (1-S) ); + - + S * S * + ( 2.0 * S - 2.0 * viscosity * (1-S) ); const double denominator = std::pow(temp, 2.0); const double F_prime = numerator / denominator; @@ -366,47 +366,47 @@ namespace Step43 } - // @sect3{Helper classes for solvers and preconditioners} - - // In this first part we define a - // number of classes that we need - // in the construction of linear - // solvers and - // preconditioners. This part is - // essentially the same as that - // used in step-31. The only - // difference is that the original - // variable name stokes_matrix is - // replaced by another name - // darcy_matrix to match our - // problem. + // @sect3{Helper classes for solvers and preconditioners} + + // In this first part we define a + // number of classes that we need + // in the construction of linear + // solvers and + // preconditioners. This part is + // essentially the same as that + // used in step-31. The only + // difference is that the original + // variable name stokes_matrix is + // replaced by another name + // darcy_matrix to match our + // problem. namespace LinearSolvers { template class InverseMatrix : public Subscriptor { public: - InverseMatrix (const Matrix &m, - const Preconditioner &preconditioner); + InverseMatrix (const Matrix &m, + const Preconditioner &preconditioner); - template - void vmult (VectorType &dst, - const VectorType &src) const; + template + void vmult (VectorType &dst, + const VectorType &src) const; private: - const SmartPointer matrix; - const Preconditioner &preconditioner; + const SmartPointer matrix; + const Preconditioner &preconditioner; }; template InverseMatrix:: InverseMatrix (const Matrix &m, - const Preconditioner &preconditioner) - : - matrix (&m), - preconditioner (preconditioner) + const Preconditioner &preconditioner) + : + matrix (&m), + preconditioner (preconditioner) {} @@ -416,7 +416,7 @@ namespace Step43 void InverseMatrix:: vmult (VectorType &dst, - const VectorType &src) const + const VectorType &src) const { SolverControl solver_control (src.size(), 1e-7*src.l2_norm()); SolverCG cg (solver_control); @@ -424,35 +424,35 @@ namespace Step43 dst = 0; try - { - cg.solve (*matrix, dst, src, preconditioner); - } + { + cg.solve (*matrix, dst, src, preconditioner); + } catch (std::exception &e) - { - Assert (false, ExcMessage(e.what())); - } + { + Assert (false, ExcMessage(e.what())); + } } template class BlockSchurPreconditioner : public Subscriptor { public: - BlockSchurPreconditioner ( - const TrilinosWrappers::BlockSparseMatrix &S, - const InverseMatrix &Mpinv, - const PreconditionerA &Apreconditioner); + BlockSchurPreconditioner ( + const TrilinosWrappers::BlockSparseMatrix &S, + const InverseMatrix &Mpinv, + const PreconditionerA &Apreconditioner); - void vmult (TrilinosWrappers::BlockVector &dst, - const TrilinosWrappers::BlockVector &src) const; + void vmult (TrilinosWrappers::BlockVector &dst, + const TrilinosWrappers::BlockVector &src) const; private: - const SmartPointer darcy_matrix; - const SmartPointer > m_inverse; - const PreconditionerA &a_preconditioner; + const SmartPointer darcy_matrix; + const SmartPointer > m_inverse; + const PreconditionerA &a_preconditioner; - mutable TrilinosWrappers::Vector tmp; + mutable TrilinosWrappers::Vector tmp; }; @@ -460,14 +460,14 @@ namespace Step43 template BlockSchurPreconditioner:: BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S, - const InverseMatrix &Mpinv, - const PreconditionerA &Apreconditioner) - : - darcy_matrix (&S), - m_inverse (&Mpinv), - a_preconditioner (Apreconditioner), - tmp (darcy_matrix->block(1,1).m()) + const InverseMatrix &Mpinv, + const PreconditionerA &Apreconditioner) + : + darcy_matrix (&S), + m_inverse (&Mpinv), + a_preconditioner (Apreconditioner), + tmp (darcy_matrix->block(1,1).m()) {} @@ -484,66 +484,66 @@ namespace Step43 } - // @sect3{The TwoPhaseFlowProblem class} - - // The definition of the class that - // defines the top-level logic of - // solving the time-dependent - // advection-dominated two-phase - // flow problem (or - // Buckley-Leverett problem - // [Buckley 1942]) is mainly based - // on tutorial programs step-21 and - // step-33, and in particular on - // step-31 where we have used - // basically the same general - // structure as done here. As in - // step-31, the key routines to - // look for in the implementation - // below are the run() - // and solve() - // functions. - // - // The main difference to step-31 - // is that, since adaptive operator - // splitting is considered, we need - // a couple more member variables - // to hold the last two computed - // Darcy (velocity/pressure) - // solutions in addition to the - // current one (which is either - // computed directly, or - // extrapolated from the previous - // two), and we need to remember - // the last two times we computed - // the Darcy solution. We also need - // a helper function that figures - // out whether we do indeed need to - // recompute the Darcy solution. - // - // Unlike step-31, this step uses - // one more ConstraintMatrix object - // called - // darcy_preconditioner_constraints. This - // constraint object is used only - // for assembling the matrix for - // the Darcy preconditioner and - // includes hanging node constrants - // as well as Dirichlet boundary - // value constraints for the - // pressure variable. We need this - // because we are building a - // Laplace matrix for the pressure - // as an approximation of the Schur - // complement) which is only - // positive definite if boundary - // conditions are applied. - // - // The collection of member - // functions and variables thus - // declared in this class is then - // rather similar to those in - // step-31: + // @sect3{The TwoPhaseFlowProblem class} + + // The definition of the class that + // defines the top-level logic of + // solving the time-dependent + // advection-dominated two-phase + // flow problem (or + // Buckley-Leverett problem + // [Buckley 1942]) is mainly based + // on tutorial programs step-21 and + // step-33, and in particular on + // step-31 where we have used + // basically the same general + // structure as done here. As in + // step-31, the key routines to + // look for in the implementation + // below are the run() + // and solve() + // functions. + // + // The main difference to step-31 + // is that, since adaptive operator + // splitting is considered, we need + // a couple more member variables + // to hold the last two computed + // Darcy (velocity/pressure) + // solutions in addition to the + // current one (which is either + // computed directly, or + // extrapolated from the previous + // two), and we need to remember + // the last two times we computed + // the Darcy solution. We also need + // a helper function that figures + // out whether we do indeed need to + // recompute the Darcy solution. + // + // Unlike step-31, this step uses + // one more ConstraintMatrix object + // called + // darcy_preconditioner_constraints. This + // constraint object is used only + // for assembling the matrix for + // the Darcy preconditioner and + // includes hanging node constrants + // as well as Dirichlet boundary + // value constraints for the + // pressure variable. We need this + // because we are building a + // Laplace matrix for the pressure + // as an approximation of the Schur + // complement) which is only + // positive definite if boundary + // conditions are applied. + // + // The collection of member + // functions and variables thus + // declared in this class is then + // rather similar to those in + // step-31: template class TwoPhaseFlowProblem { @@ -560,44 +560,44 @@ namespace Step43 void assemble_saturation_matrix (); void assemble_saturation_rhs (); void assemble_saturation_rhs_cell_term (const FEValues &saturation_fe_values, - const FEValues &darcy_fe_values, - const double global_max_u_F_prime, - const double global_S_variation, - const std::vector &local_dof_indices); + const FEValues &darcy_fe_values, + const double global_max_u_F_prime, + const double global_S_variation, + const std::vector &local_dof_indices); void assemble_saturation_rhs_boundary_term (const FEFaceValues &saturation_fe_face_values, - const FEFaceValues &darcy_fe_face_values, - const std::vector &local_dof_indices); + const FEFaceValues &darcy_fe_face_values, + const std::vector &local_dof_indices); void solve (); void refine_mesh (const unsigned int min_grid_level, - const unsigned int max_grid_level); + const unsigned int max_grid_level); void output_results () const; - // We follow with a number of - // helper functions that are - // used in a variety of places - // throughout the program: + // We follow with a number of + // helper functions that are + // used in a variety of places + // throughout the program: double get_max_u_F_prime () const; std::pair get_extrapolated_saturation_range () const; bool determine_whether_to_solve_for_pressure_and_velocity () const; void project_back_saturation (); double compute_viscosity (const std::vector &old_saturation, - const std::vector &old_old_saturation, - const std::vector > &old_saturation_grads, - const std::vector > &old_old_saturation_grads, - const std::vector > &present_darcy_values, - const double global_max_u_F_prime, - const double global_S_variation, - const double cell_diameter) const; - - - // This all is followed by the - // member variables, most of - // which are similar to the - // ones in step-31, with the - // exception of the ones that - // pertain to the macro time - // stepping for the - // velocity/pressure system: + const std::vector &old_old_saturation, + const std::vector > &old_saturation_grads, + const std::vector > &old_old_saturation_grads, + const std::vector > &present_darcy_values, + const double global_max_u_F_prime, + const double global_S_variation, + const double cell_diameter) const; + + + // This all is followed by the + // member variables, most of + // which are similar to the + // ones in step-31, with the + // exception of the ones that + // pertain to the macro time + // stepping for the + // velocity/pressure system: Triangulation triangulation; double global_Omega_diameter; @@ -656,146 +656,146 @@ namespace Step43 bool rebuild_saturation_matrix; - // At the very end we declare a - // variable that denotes the - // material model. Compared to - // step-21, we do this here as - // a member variable since we - // will want to use it in a - // variety of places and so - // having a central place where - // such a variable is declared - // will make it simpler to - // replace one class by another - // (e.g. replace - // RandomMedium::KInverse by - // SingleCurvingCrack::KInverse). + // At the very end we declare a + // variable that denotes the + // material model. Compared to + // step-21, we do this here as + // a member variable since we + // will want to use it in a + // variety of places and so + // having a central place where + // such a variable is declared + // will make it simpler to + // replace one class by another + // (e.g. replace + // RandomMedium::KInverse by + // SingleCurvingCrack::KInverse). const RandomMedium::KInverse k_inverse; }; - // @sect3{TwoPhaseFlowProblem::TwoPhaseFlowProblem} - - // The constructor of this class is an - // extension of the constructors in step-21 - // and step-31. We need to add the various - // variables that concern the saturation. As - // discussed in the introduction, we are - // going to use $Q_2 \times Q_1$ - // (Taylor-Hood) elements again for the Darcy - // system, an element combination that fulfills - // the Ladyzhenskaya-Babuska-Brezzi (LBB) - // conditions - // [Brezzi and Fortin 1991, Chen 2005], and $Q_1$ - // elements for the saturation. However, by - // using variables that store the polynomial - // degree of the Darcy and temperature finite - // elements, it is easy to consistently - // modify the degree of the elements as well - // as all quadrature formulas used on them - // downstream. Moreover, we initialize the - // time stepping variables related to - // operator splitting as well as the option - // for matrix assembly and preconditioning: + // @sect3{TwoPhaseFlowProblem::TwoPhaseFlowProblem} + + // The constructor of this class is an + // extension of the constructors in step-21 + // and step-31. We need to add the various + // variables that concern the saturation. As + // discussed in the introduction, we are + // going to use $Q_2 \times Q_1$ + // (Taylor-Hood) elements again for the Darcy + // system, an element combination that fulfills + // the Ladyzhenskaya-Babuska-Brezzi (LBB) + // conditions + // [Brezzi and Fortin 1991, Chen 2005], and $Q_1$ + // elements for the saturation. However, by + // using variables that store the polynomial + // degree of the Darcy and temperature finite + // elements, it is easy to consistently + // modify the degree of the elements as well + // as all quadrature formulas used on them + // downstream. Moreover, we initialize the + // time stepping variables related to + // operator splitting as well as the option + // for matrix assembly and preconditioning: template TwoPhaseFlowProblem::TwoPhaseFlowProblem (const unsigned int degree) - : - triangulation (Triangulation::maximum_smoothing), + : + triangulation (Triangulation::maximum_smoothing), - degree (degree), - darcy_degree (degree), - darcy_fe (FE_Q(darcy_degree+1), dim, - FE_Q(darcy_degree), 1), - darcy_dof_handler (triangulation), + degree (degree), + darcy_degree (degree), + darcy_fe (FE_Q(darcy_degree+1), dim, + FE_Q(darcy_degree), 1), + darcy_dof_handler (triangulation), - saturation_degree (degree+1), - saturation_fe (saturation_degree), - saturation_dof_handler (triangulation), + saturation_degree (degree+1), + saturation_fe (saturation_degree), + saturation_dof_handler (triangulation), - saturation_refinement_threshold (0.5), + saturation_refinement_threshold (0.5), - time (0), - end_time (10), + time (0), + end_time (10), - current_macro_time_step (0), - old_macro_time_step (0), + current_macro_time_step (0), + old_macro_time_step (0), - time_step (0), - old_time_step (0), - viscosity (0.2), + time_step (0), + old_time_step (0), + viscosity (0.2), porosity (1.0), AOS_threshold (3.0), - rebuild_saturation_matrix (true) + rebuild_saturation_matrix (true) {} - // @sect3{TwoPhaseFlowProblem::setup_dofs} - - // This is the function that sets up the - // DoFHandler objects we have here (one for - // the Darcy part and one for the saturation - // part) as well as set to the right sizes - // the various objects required for the - // linear algebra in this program. Its basic - // operations are similar to what - // step-31 did. - // - // The body of the function first enumerates - // all degrees of freedom for the Darcy and - // saturation systems. For the Darcy part, - // degrees of freedom are then sorted to - // ensure that velocities precede pressure - // DoFs so that we can partition the Darcy - // matrix into a $2 \times 2$ matrix. - // - // Then, we need to incorporate - // hanging node constraints and - // Dirichlet boundary value - // constraints into - // darcy_preconditioner_constraints. - // The boundary condition - // constraints are only set on the - // pressure component since the - // Schur complement preconditioner - // that corresponds to the porous - // media flow operator in non-mixed - // form, $-\nabla \cdot [\mathbf K - // \lambda_t(S)]\nabla$, acts only - // on the pressure - // variable. Therefore, we use a - // component_mask that filters out - // the velocity component, so that - // the condensation is performed on - // pressure degrees of freedom - // only. - // - // After having done so, we count - // the number of degrees of freedom - // in the various blocks. This - // information is then used to - // create the sparsity pattern for - // the Darcy and saturation system - // matrices as well as the - // preconditioner matrix from which - // we build the Darcy - // preconditioner. As in step-31, - // we choose to create the pattern - // not as in the first few tutorial - // programs, but by using the - // blocked version of - // CompressedSimpleSparsityPattern. The - // reason for doing this is mainly - // memory, that is, the - // SparsityPattern class would - // consume too much memory when - // used in three spatial dimensions - // as we intend to do for this - // program. So, for this, we follow - // the same way as step-31 did and - // we don't have to repeat - // descriptions again for the rest - // of the member function. + // @sect3{TwoPhaseFlowProblem::setup_dofs} + + // This is the function that sets up the + // DoFHandler objects we have here (one for + // the Darcy part and one for the saturation + // part) as well as set to the right sizes + // the various objects required for the + // linear algebra in this program. Its basic + // operations are similar to what + // step-31 did. + // + // The body of the function first enumerates + // all degrees of freedom for the Darcy and + // saturation systems. For the Darcy part, + // degrees of freedom are then sorted to + // ensure that velocities precede pressure + // DoFs so that we can partition the Darcy + // matrix into a $2 \times 2$ matrix. + // + // Then, we need to incorporate + // hanging node constraints and + // Dirichlet boundary value + // constraints into + // darcy_preconditioner_constraints. + // The boundary condition + // constraints are only set on the + // pressure component since the + // Schur complement preconditioner + // that corresponds to the porous + // media flow operator in non-mixed + // form, $-\nabla \cdot [\mathbf K + // \lambda_t(S)]\nabla$, acts only + // on the pressure + // variable. Therefore, we use a + // component_mask that filters out + // the velocity component, so that + // the condensation is performed on + // pressure degrees of freedom + // only. + // + // After having done so, we count + // the number of degrees of freedom + // in the various blocks. This + // information is then used to + // create the sparsity pattern for + // the Darcy and saturation system + // matrices as well as the + // preconditioner matrix from which + // we build the Darcy + // preconditioner. As in step-31, + // we choose to create the pattern + // not as in the first few tutorial + // programs, but by using the + // blocked version of + // CompressedSimpleSparsityPattern. The + // reason for doing this is mainly + // memory, that is, the + // SparsityPattern class would + // consume too much memory when + // used in three spatial dimensions + // as we intend to do for this + // program. So, for this, we follow + // the same way as step-31 did and + // we don't have to repeat + // descriptions again for the rest + // of the member function. template void TwoPhaseFlowProblem::setup_dofs () { @@ -834,20 +834,20 @@ namespace Step43 std::vector darcy_dofs_per_block (2); DoFTools::count_dofs_per_block (darcy_dof_handler, darcy_dofs_per_block, darcy_block_component); const unsigned int n_u = darcy_dofs_per_block[0], - n_p = darcy_dofs_per_block[1], - n_s = saturation_dof_handler.n_dofs(); + n_p = darcy_dofs_per_block[1], + n_s = saturation_dof_handler.n_dofs(); std::cout << "Number of active cells: " - << triangulation.n_active_cells() - << " (on " - << triangulation.n_levels() - << " levels)" - << std::endl - << "Number of degrees of freedom: " - << n_u + n_p + n_s - << " (" << n_u << '+' << n_p << '+'<< n_s <<')' - << std::endl - << std::endl; + << triangulation.n_active_cells() + << " (on " + << triangulation.n_levels() + << " levels)" + << std::endl + << "Number of degrees of freedom: " + << n_u + n_p + n_s + << " (" << n_u << '+' << n_p << '+'<< n_s <<')' + << std::endl + << std::endl; { darcy_matrix.clear (); @@ -864,15 +864,15 @@ namespace Step43 Table<2,DoFTools::Coupling> coupling (dim+1, dim+1); for (unsigned int c=0; c coupling (dim+1, dim+1); for (unsigned int c=0; c::assemble_darcy_preconditioner} - - // This function assembles the matrix we use - // for preconditioning the Darcy system. What - // we need are a vector mass matrix weighted by - // $\left(\mathbf{K} \lambda_t\right)^{-1}$ - // on the velocity components and a mass - // matrix weighted by $\left(\mathbf{K} - // \lambda_t\right)$ on the pressure - // component. We start by generating a - // quadrature object of appropriate order, - // the FEValues object that can give values - // and gradients at the quadrature points - // (together with quadrature weights). Next - // we create data structures for the cell - // matrix and the relation between local and - // global DoFs. The vectors phi_u and - // grad_phi_p are going to hold the values of - // the basis functions in order to faster - // build up the local matrices, as was - // already done in step-22. Before we start - // the loop over all active cells, we have to - // specify which components are pressure and - // which are velocity. - // - // The creation of the local matrix - // is rather simple. There are only - // a term weighted by - // $\left(\mathbf{K} - // \lambda_t\right)^{-1}$ (on the - // velocity) and a Laplace matrix - // weighted by $\left(\mathbf{K} - // \lambda_t\right)$ to be - // generated, so the creation of - // the local matrix is done in - // essentially two lines. Since the - // material model functions at the - // top of this file only provide - // the inverses of the permeability - // and mobility, we have to compute - // $\mathbf K$ and $\lambda_t$ by - // hand from the given values, once - // per quadrature point. - // - // Once the - // local matrix is ready (loop over - // rows and columns in the local - // matrix on each quadrature - // point), we get the local DoF - // indices and write the local - // information into the global - // matrix. We do this by directly - // applying the constraints - // (i.e. darcy_preconditioner_constraints) - // that takes care of hanging node - // and zero Dirichlet boundary - // condition constraints. By doing - // so, we don't have to do that - // afterwards, and we later don't - // have to use - // ConstraintMatrix::condense and - // MatrixTools::apply_boundary_values, - // both functions that would need - // to modify matrix and vector - // entries and so are difficult to - // write for the Trilinos classes - // where we don't immediately have - // access to individual memory - // locations. + // @sect3{Assembling matrices and preconditioners} + + // The next few functions are + // devoted to setting up the + // various system and + // preconditioner matrices and + // right hand sides that we have to + // deal with in this program. + + // @sect4{TwoPhaseFlowProblem::assemble_darcy_preconditioner} + + // This function assembles the matrix we use + // for preconditioning the Darcy system. What + // we need are a vector mass matrix weighted by + // $\left(\mathbf{K} \lambda_t\right)^{-1}$ + // on the velocity components and a mass + // matrix weighted by $\left(\mathbf{K} + // \lambda_t\right)$ on the pressure + // component. We start by generating a + // quadrature object of appropriate order, + // the FEValues object that can give values + // and gradients at the quadrature points + // (together with quadrature weights). Next + // we create data structures for the cell + // matrix and the relation between local and + // global DoFs. The vectors phi_u and + // grad_phi_p are going to hold the values of + // the basis functions in order to faster + // build up the local matrices, as was + // already done in step-22. Before we start + // the loop over all active cells, we have to + // specify which components are pressure and + // which are velocity. + // + // The creation of the local matrix + // is rather simple. There are only + // a term weighted by + // $\left(\mathbf{K} + // \lambda_t\right)^{-1}$ (on the + // velocity) and a Laplace matrix + // weighted by $\left(\mathbf{K} + // \lambda_t\right)$ to be + // generated, so the creation of + // the local matrix is done in + // essentially two lines. Since the + // material model functions at the + // top of this file only provide + // the inverses of the permeability + // and mobility, we have to compute + // $\mathbf K$ and $\lambda_t$ by + // hand from the given values, once + // per quadrature point. + // + // Once the + // local matrix is ready (loop over + // rows and columns in the local + // matrix on each quadrature + // point), we get the local DoF + // indices and write the local + // information into the global + // matrix. We do this by directly + // applying the constraints + // (i.e. darcy_preconditioner_constraints) + // that takes care of hanging node + // and zero Dirichlet boundary + // condition constraints. By doing + // so, we don't have to do that + // afterwards, and we later don't + // have to use + // ConstraintMatrix::condense and + // MatrixTools::apply_boundary_values, + // both functions that would need + // to modify matrix and vector + // entries and so are difficult to + // write for the Trilinos classes + // where we don't immediately have + // access to individual memory + // locations. template void TwoPhaseFlowProblem::assemble_darcy_preconditioner () @@ -1036,12 +1036,12 @@ namespace Step43 const QGauss quadrature_formula(darcy_degree+2); FEValues darcy_fe_values (darcy_fe, quadrature_formula, - update_JxW_values | - update_values | - update_gradients | - update_quadrature_points); + update_JxW_values | + update_values | + update_gradients | + update_quadrature_points); FEValues saturation_fe_values (saturation_fe, quadrature_formula, - update_values); + update_values); const unsigned int dofs_per_cell = darcy_fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -1067,89 +1067,89 @@ namespace Step43 for (; cell!=endc; ++cell, ++saturation_cell) { - darcy_fe_values.reinit (cell); - saturation_fe_values.reinit (saturation_cell); - - local_matrix = 0; - - saturation_fe_values.get_function_values (old_saturation_solution, old_saturation_values); - - k_inverse.value_list (darcy_fe_values.get_quadrature_points(), - k_inverse_values); - - for (unsigned int q=0; q permeability = invert(k_inverse_values[q]); - - for (unsigned int k=0; kget_dof_indices (local_dof_indices); - darcy_preconditioner_constraints.distribute_local_to_global (local_matrix, - local_dof_indices, - darcy_preconditioner_matrix); + darcy_fe_values.reinit (cell); + saturation_fe_values.reinit (saturation_cell); + + local_matrix = 0; + + saturation_fe_values.get_function_values (old_saturation_solution, old_saturation_values); + + k_inverse.value_list (darcy_fe_values.get_quadrature_points(), + k_inverse_values); + + for (unsigned int q=0; q permeability = invert(k_inverse_values[q]); + + for (unsigned int k=0; kget_dof_indices (local_dof_indices); + darcy_preconditioner_constraints.distribute_local_to_global (local_matrix, + local_dof_indices, + darcy_preconditioner_matrix); } } - // @sect4{TwoPhaseFlowProblem::build_darcy_preconditioner} - - // After calling the above - // functions to assemble the - // preconditioner matrix, this - // function generates the inner - // preconditioners that are going - // to be used for the Schur - // complement block - // preconditioner. The - // preconditioners need to be - // regenerated at every saturation - // time step since they depend on - // the saturation $S$ that varies - // with time. - // - // In here, we set up the - // preconditioner for the - // velocity-velocity matrix - // $\mathbf{M}^{\mathbf{u}}$ and - // the Schur complement - // $\mathbf{S}$. As explained in - // the introduction, we are going - // to use an IC preconditioner - // based on the vector matrix - // $\mathbf{M}^{\mathbf{u}}$ and - // another based on the scalar - // Laplace matrix - // $\tilde{\mathbf{S}}^p$ (which is - // spectrally close to the Schur - // complement of the Darcy - // matrix). Usually, the - // TrilinosWrappers::PreconditionIC - // class can be seen as a good - // black-box preconditioner which - // does not need any special - // knowledge of the matrix - // structure and/or the operator - // that's behind it. + // @sect4{TwoPhaseFlowProblem::build_darcy_preconditioner} + + // After calling the above + // functions to assemble the + // preconditioner matrix, this + // function generates the inner + // preconditioners that are going + // to be used for the Schur + // complement block + // preconditioner. The + // preconditioners need to be + // regenerated at every saturation + // time step since they depend on + // the saturation $S$ that varies + // with time. + // + // In here, we set up the + // preconditioner for the + // velocity-velocity matrix + // $\mathbf{M}^{\mathbf{u}}$ and + // the Schur complement + // $\mathbf{S}$. As explained in + // the introduction, we are going + // to use an IC preconditioner + // based on the vector matrix + // $\mathbf{M}^{\mathbf{u}}$ and + // another based on the scalar + // Laplace matrix + // $\tilde{\mathbf{S}}^p$ (which is + // spectrally close to the Schur + // complement of the Darcy + // matrix). Usually, the + // TrilinosWrappers::PreconditionIC + // class can be seen as a good + // black-box preconditioner which + // does not need any special + // knowledge of the matrix + // structure and/or the operator + // that's behind it. template void TwoPhaseFlowProblem::build_darcy_preconditioner () @@ -1157,50 +1157,50 @@ namespace Step43 assemble_darcy_preconditioner (); Amg_preconditioner = std_cxx1x::shared_ptr - (new TrilinosWrappers::PreconditionIC()); + (new TrilinosWrappers::PreconditionIC()); Amg_preconditioner->initialize(darcy_preconditioner_matrix.block(0,0)); Mp_preconditioner = std_cxx1x::shared_ptr - (new TrilinosWrappers::PreconditionIC()); + (new TrilinosWrappers::PreconditionIC()); Mp_preconditioner->initialize(darcy_preconditioner_matrix.block(1,1)); } - // @sect4{TwoPhaseFlowProblem::assemble_darcy_system} - - // This is the function that assembles the - // linear system for the Darcy system. - // - // Regarding the technical details of - // implementation, the procedures are similar - // to those in step-22 and step-31. We reset - // matrix and vector, create a quadrature - // formula on the cells, and then create the - // respective FEValues object. - // - // There is one thing that needs to be - // commented: since we have a separate - // finite element and DoFHandler for the - // saturation, we need to generate a second - // FEValues object for the proper evaluation - // of the saturation solution. This isn't too - // complicated to realize here: just use the - // saturation structures and set an update - // flag for the basis function values which - // we need for evaluation of the saturation - // solution. The only important part to - // remember here is that the same quadrature - // formula is used for both FEValues objects - // to ensure that we get matching information - // when we loop over the quadrature points of - // the two objects. - // - // The declarations proceed with some - // shortcuts for array sizes, the creation of - // the local matrix, right hand side as well - // as the vector for the indices of the local - // dofs compared to the global system. + // @sect4{TwoPhaseFlowProblem::assemble_darcy_system} + + // This is the function that assembles the + // linear system for the Darcy system. + // + // Regarding the technical details of + // implementation, the procedures are similar + // to those in step-22 and step-31. We reset + // matrix and vector, create a quadrature + // formula on the cells, and then create the + // respective FEValues object. + // + // There is one thing that needs to be + // commented: since we have a separate + // finite element and DoFHandler for the + // saturation, we need to generate a second + // FEValues object for the proper evaluation + // of the saturation solution. This isn't too + // complicated to realize here: just use the + // saturation structures and set an update + // flag for the basis function values which + // we need for evaluation of the saturation + // solution. The only important part to + // remember here is that the same quadrature + // formula is used for both FEValues objects + // to ensure that we get matching information + // when we loop over the quadrature points of + // the two objects. + // + // The declarations proceed with some + // shortcuts for array sizes, the creation of + // the local matrix, right hand side as well + // as the vector for the indices of the local + // dofs compared to the global system. template void TwoPhaseFlowProblem::assemble_darcy_system () { @@ -1211,15 +1211,15 @@ namespace Step43 QGauss face_quadrature_formula(darcy_degree+2); FEValues darcy_fe_values (darcy_fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); FEValues saturation_fe_values (saturation_fe, quadrature_formula, - update_values); + update_values); FEFaceValues darcy_fe_face_values (darcy_fe, face_quadrature_formula, - update_values | update_normal_vectors | - update_quadrature_points | update_JxW_values); + update_values | update_normal_vectors | + update_quadrature_points | update_JxW_values); const unsigned int dofs_per_cell = darcy_fe.dofs_per_cell; @@ -1238,33 +1238,33 @@ namespace Step43 std::vector boundary_values (n_face_q_points); std::vector > k_inverse_values (n_q_points); - // Next we need a vector that - // will contain the values of the - // saturation solution at the - // previous time level at the - // quadrature points to assemble - // the saturation dependent - // coefficients in the Darcy - // equations. - // - // The set of vectors we create - // next hold the evaluations of - // the basis functions as well as - // their gradients that will be - // used for creating the - // matrices. Putting these into - // their own arrays rather than - // asking the FEValues object for - // this information each time it - // is needed is an optimization - // to accelerate the assembly - // process, see step-22 for - // details. - // - // The last two declarations are used to - // extract the individual blocks (velocity, - // pressure, saturation) from the total FE - // system. + // Next we need a vector that + // will contain the values of the + // saturation solution at the + // previous time level at the + // quadrature points to assemble + // the saturation dependent + // coefficients in the Darcy + // equations. + // + // The set of vectors we create + // next hold the evaluations of + // the basis functions as well as + // their gradients that will be + // used for creating the + // matrices. Putting these into + // their own arrays rather than + // asking the FEValues object for + // this information each time it + // is needed is an optimization + // to accelerate the assembly + // process, see step-22 for + // details. + // + // The last two declarations are used to + // extract the individual blocks (velocity, + // pressure, saturation) from the total FE + // system. std::vector old_saturation_values (n_q_points); std::vector > phi_u (dofs_per_cell); @@ -1274,72 +1274,72 @@ namespace Step43 const FEValuesExtractors::Vector velocities (0); const FEValuesExtractors::Scalar pressure (dim); - // Now start the loop over all - // cells in the problem. We are - // working on two different - // DoFHandlers for this assembly - // routine, so we must have two - // different cell iterators for - // the two objects in use. This - // might seem a bit peculiar, but - // since both the Darcy system - // and the saturation system use - // the same grid we can assume - // that the two iterators run in - // sync over the cells of the two - // DoFHandler objects. - // - // The first statements within - // the loop are again all very - // familiar, doing the update of - // the finite element data as - // specified by the update flags, - // zeroing out the local arrays - // and getting the values of the - // old solution at the quadrature - // points. At this point we also - // have to get the values of the - // saturation function of the - // previous time step at the - // quadrature points. To this - // end, we can use the - // FEValues::get_function_values - // (previously already used in - // step-9, step-14 and step-15), - // a function that takes a - // solution vector and returns a - // list of function values at the - // quadrature points of the - // present cell. In fact, it - // returns the complete - // vector-valued solution at each - // quadrature point, i.e. not - // only the saturation but also - // the velocities and pressure. - // - // Then we are ready to loop over - // the quadrature points on the - // cell to do the - // integration. The formula for - // this follows in a - // straightforward way from what - // has been discussed in the - // introduction. - // - // Once this is done, we start the loop over - // the rows and columns of the local matrix - // and feed the matrix with the relevant - // products. - // - // The last step in the loop over all cells - // is to enter the local contributions into - // the global matrix and vector structures to - // the positions specified in - // local_dof_indices. Again, we let the - // ConstraintMatrix class do the insertion of - // the cell matrix elements to the global - // matrix, which already condenses the - // hanging node constraints. + // Now start the loop over all + // cells in the problem. We are + // working on two different + // DoFHandlers for this assembly + // routine, so we must have two + // different cell iterators for + // the two objects in use. This + // might seem a bit peculiar, but + // since both the Darcy system + // and the saturation system use + // the same grid we can assume + // that the two iterators run in + // sync over the cells of the two + // DoFHandler objects. + // + // The first statements within + // the loop are again all very + // familiar, doing the update of + // the finite element data as + // specified by the update flags, + // zeroing out the local arrays + // and getting the values of the + // old solution at the quadrature + // points. At this point we also + // have to get the values of the + // saturation function of the + // previous time step at the + // quadrature points. To this + // end, we can use the + // FEValues::get_function_values + // (previously already used in + // step-9, step-14 and step-15), + // a function that takes a + // solution vector and returns a + // list of function values at the + // quadrature points of the + // present cell. In fact, it + // returns the complete + // vector-valued solution at each + // quadrature point, i.e. not + // only the saturation but also + // the velocities and pressure. + // + // Then we are ready to loop over + // the quadrature points on the + // cell to do the + // integration. The formula for + // this follows in a + // straightforward way from what + // has been discussed in the + // introduction. + // + // Once this is done, we start the loop over + // the rows and columns of the local matrix + // and feed the matrix with the relevant + // products. + // + // The last step in the loop over all cells + // is to enter the local contributions into + // the global matrix and vector structures to + // the positions specified in + // local_dof_indices. Again, we let the + // ConstraintMatrix class do the insertion of + // the cell matrix elements to the global + // matrix, which already condenses the + // hanging node constraints. typename DoFHandler::active_cell_iterator cell = darcy_dof_handler.begin_active(), endc = darcy_dof_handler.end(); @@ -1348,105 +1348,105 @@ namespace Step43 for (; cell!=endc; ++cell, ++saturation_cell) { - darcy_fe_values.reinit (cell); - saturation_fe_values.reinit (saturation_cell); - - local_matrix = 0; - local_rhs = 0; - - saturation_fe_values.get_function_values (old_saturation_solution, old_saturation_values); - - pressure_right_hand_side.value_list (darcy_fe_values.get_quadrature_points(), - pressure_rhs_values); - k_inverse.value_list (darcy_fe_values.get_quadrature_points(), - k_inverse_values); - - for (unsigned int q=0; q::faces_per_cell; - ++face_no) - if (cell->at_boundary(face_no)) - { - darcy_fe_face_values.reinit (cell, face_no); - - pressure_boundary_values - .value_list (darcy_fe_face_values.get_quadrature_points(), - boundary_values); - - for (unsigned int q=0; q - phi_i_u = darcy_fe_face_values[velocities].value (i, q); - - local_rhs(i) += -(phi_i_u * - darcy_fe_face_values.normal_vector(q) * - boundary_values[q] * - darcy_fe_face_values.JxW(q)); - } - } - - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - - darcy_constraints.distribute_local_to_global (local_matrix, - local_rhs, - local_dof_indices, - darcy_matrix, - darcy_rhs); + darcy_fe_values.reinit (cell); + saturation_fe_values.reinit (saturation_cell); + + local_matrix = 0; + local_rhs = 0; + + saturation_fe_values.get_function_values (old_saturation_solution, old_saturation_values); + + pressure_right_hand_side.value_list (darcy_fe_values.get_quadrature_points(), + pressure_rhs_values); + k_inverse.value_list (darcy_fe_values.get_quadrature_points(), + k_inverse_values); + + for (unsigned int q=0; q::faces_per_cell; + ++face_no) + if (cell->at_boundary(face_no)) + { + darcy_fe_face_values.reinit (cell, face_no); + + pressure_boundary_values + .value_list (darcy_fe_face_values.get_quadrature_points(), + boundary_values); + + for (unsigned int q=0; q + phi_i_u = darcy_fe_face_values[velocities].value (i, q); + + local_rhs(i) += -(phi_i_u * + darcy_fe_face_values.normal_vector(q) * + boundary_values[q] * + darcy_fe_face_values.JxW(q)); + } + } + + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + + darcy_constraints.distribute_local_to_global (local_matrix, + local_rhs, + local_dof_indices, + darcy_matrix, + darcy_rhs); } } - // @sect4{TwoPhaseFlowProblem::assemble_saturation_system} - - // This function is to assemble the linear - // system for the saturation transport - // equation. It calls, if necessary, two - // other member functions: - // assemble_saturation_matrix() and - // assemble_saturation_rhs(). The former - // function then assembles the saturation - // matrix that only needs to be changed - // occasionally. On the other hand, the - // latter function that assembles the right - // hand side must be called at every - // saturation time step. + // @sect4{TwoPhaseFlowProblem::assemble_saturation_system} + + // This function is to assemble the linear + // system for the saturation transport + // equation. It calls, if necessary, two + // other member functions: + // assemble_saturation_matrix() and + // assemble_saturation_rhs(). The former + // function then assembles the saturation + // matrix that only needs to be changed + // occasionally. On the other hand, the + // latter function that assembles the right + // hand side must be called at every + // saturation time step. template void TwoPhaseFlowProblem::assemble_saturation_system () { if (rebuild_saturation_matrix == true) { - saturation_matrix = 0; - assemble_saturation_matrix (); + saturation_matrix = 0; + assemble_saturation_matrix (); } saturation_rhs = 0; @@ -1455,27 +1455,27 @@ namespace Step43 - // @sect4{TwoPhaseFlowProblem::assemble_saturation_matrix} + // @sect4{TwoPhaseFlowProblem::assemble_saturation_matrix} - // This function is easily understood since - // it only forms a simple mass matrix for the - // left hand side of the saturation linear - // system by basis functions phi_i_s and - // phi_j_s only. Finally, as usual, we enter - // the local contribution into the global - // matrix by specifying the position in - // local_dof_indices. This is done by letting - // the ConstraintMatrix class do the - // insertion of the cell matrix elements to - // the global matrix, which already condenses - // the hanging node constraints. + // This function is easily understood since + // it only forms a simple mass matrix for the + // left hand side of the saturation linear + // system by basis functions phi_i_s and + // phi_j_s only. Finally, as usual, we enter + // the local contribution into the global + // matrix by specifying the position in + // local_dof_indices. This is done by letting + // the ConstraintMatrix class do the + // insertion of the cell matrix elements to + // the global matrix, which already condenses + // the hanging node constraints. template void TwoPhaseFlowProblem::assemble_saturation_matrix () { QGauss quadrature_formula(saturation_degree+2); FEValues saturation_fe_values (saturation_fe, quadrature_formula, - update_values | update_JxW_values); + update_values | update_JxW_values); const unsigned int dofs_per_cell = saturation_fe.dofs_per_cell; @@ -1491,67 +1491,67 @@ namespace Step43 endc = saturation_dof_handler.end(); for (; cell!=endc; ++cell) { - saturation_fe_values.reinit (cell); - local_matrix = 0; - local_rhs = 0; - - for (unsigned int q=0; qget_dof_indices (local_dof_indices); - - saturation_constraints.distribute_local_to_global (local_matrix, - local_dof_indices, - saturation_matrix); + saturation_fe_values.reinit (cell); + local_matrix = 0; + local_rhs = 0; + + for (unsigned int q=0; qget_dof_indices (local_dof_indices); + + saturation_constraints.distribute_local_to_global (local_matrix, + local_dof_indices, + saturation_matrix); } } - // @sect4{TwoPhaseFlowProblem::assemble_saturation_rhs} - - // This function is to assemble the right - // hand side of the saturation transport - // equation. Before going about it, we have to - // create two FEValues objects for the Darcy - // and saturation systems respectively and, - // in addition, two FEFaceValues objects for - // the two systems because we have a - // boundary integral term in the weak form of - // saturation equation. For the FEFaceValues - // object of the saturation system, we also - // require normal vectors, which we request - // using the update_normal_vectors flag. - // - // Next, before looping over all the cells, - // we have to compute some parameters - // (e.g. global_u_infty, global_S_variation, - // and global_Omega_diameter) that the - // artificial viscosity $\nu$ needs. This is - // largely the same as was done in - // step-31, so you may see there for more - // information. - // - // The real works starts with the loop over all the - // saturation and Darcy cells to put the - // local contributions into the global - // vector. In this loop, in order to simplify - // the implementation, we split some of the - // work into two helper functions: - // assemble_saturation_rhs_cell_term and - // assemble_saturation_rhs_boundary_term. - // We note that we insert cell or boundary - // contributions into the global vector in - // the two functions rather than in this - // present function. + // @sect4{TwoPhaseFlowProblem::assemble_saturation_rhs} + + // This function is to assemble the right + // hand side of the saturation transport + // equation. Before going about it, we have to + // create two FEValues objects for the Darcy + // and saturation systems respectively and, + // in addition, two FEFaceValues objects for + // the two systems because we have a + // boundary integral term in the weak form of + // saturation equation. For the FEFaceValues + // object of the saturation system, we also + // require normal vectors, which we request + // using the update_normal_vectors flag. + // + // Next, before looping over all the cells, + // we have to compute some parameters + // (e.g. global_u_infty, global_S_variation, + // and global_Omega_diameter) that the + // artificial viscosity $\nu$ needs. This is + // largely the same as was done in + // step-31, so you may see there for more + // information. + // + // The real works starts with the loop over all the + // saturation and Darcy cells to put the + // local contributions into the global + // vector. In this loop, in order to simplify + // the implementation, we split some of the + // work into two helper functions: + // assemble_saturation_rhs_cell_term and + // assemble_saturation_rhs_boundary_term. + // We note that we insert cell or boundary + // contributions into the global vector in + // the two functions rather than in this + // present function. template void TwoPhaseFlowProblem::assemble_saturation_rhs () { @@ -1559,17 +1559,17 @@ namespace Step43 QGauss face_quadrature_formula(saturation_degree+2); FEValues saturation_fe_values (saturation_fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); FEValues darcy_fe_values (darcy_fe, quadrature_formula, - update_values); + update_values); FEFaceValues saturation_fe_face_values (saturation_fe, face_quadrature_formula, - update_values | update_normal_vectors | - update_quadrature_points | update_JxW_values); + update_values | update_normal_vectors | + update_quadrature_points | update_JxW_values); FEFaceValues darcy_fe_face_values (darcy_fe, face_quadrature_formula, - update_values); + update_values); FEFaceValues saturation_fe_face_values_neighbor (saturation_fe, face_quadrature_formula, - update_values); + update_values); const unsigned int dofs_per_cell = saturation_dof_handler.get_fe().dofs_per_cell; std::vector local_dof_indices (dofs_per_cell); @@ -1585,58 +1585,58 @@ namespace Step43 darcy_cell = darcy_dof_handler.begin_active(); for (; cell!=endc; ++cell, ++darcy_cell) { - saturation_fe_values.reinit (cell); - darcy_fe_values.reinit (darcy_cell); - - cell->get_dof_indices (local_dof_indices); - - assemble_saturation_rhs_cell_term (saturation_fe_values, - darcy_fe_values, - global_max_u_F_prime, - global_S_variation, - local_dof_indices); - - for (unsigned int face_no=0; face_no::faces_per_cell; - ++face_no) - if (cell->at_boundary(face_no)) - { - darcy_fe_face_values.reinit (darcy_cell, face_no); - saturation_fe_face_values.reinit (cell, face_no); - assemble_saturation_rhs_boundary_term (saturation_fe_face_values, - darcy_fe_face_values, - local_dof_indices); - } + saturation_fe_values.reinit (cell); + darcy_fe_values.reinit (darcy_cell); + + cell->get_dof_indices (local_dof_indices); + + assemble_saturation_rhs_cell_term (saturation_fe_values, + darcy_fe_values, + global_max_u_F_prime, + global_S_variation, + local_dof_indices); + + for (unsigned int face_no=0; face_no::faces_per_cell; + ++face_no) + if (cell->at_boundary(face_no)) + { + darcy_fe_face_values.reinit (darcy_cell, face_no); + saturation_fe_face_values.reinit (cell, face_no); + assemble_saturation_rhs_boundary_term (saturation_fe_face_values, + darcy_fe_face_values, + local_dof_indices); + } } } - // @sect4{TwoPhaseFlowProblem::assemble_saturation_rhs_cell_term} - - // This function takes care of integrating - // the cell terms of the right hand side of - // the saturation equation, and then - // assembling it into the global right hand - // side vector. Given the discussion in the - // introduction, the form of these - // contributions is clear. The only tricky - // part is getting the artificial viscosity - // and all that is necessary to compute - // it. The first half of the function is - // devoted to this task. - // - // The last part of the function is copying - // the local contributions into the global - // vector with position specified in - // local_dof_indices. + // @sect4{TwoPhaseFlowProblem::assemble_saturation_rhs_cell_term} + + // This function takes care of integrating + // the cell terms of the right hand side of + // the saturation equation, and then + // assembling it into the global right hand + // side vector. Given the discussion in the + // introduction, the form of these + // contributions is clear. The only tricky + // part is getting the artificial viscosity + // and all that is necessary to compute + // it. The first half of the function is + // devoted to this task. + // + // The last part of the function is copying + // the local contributions into the global + // vector with position specified in + // local_dof_indices. template void TwoPhaseFlowProblem:: assemble_saturation_rhs_cell_term (const FEValues &saturation_fe_values, - const FEValues &darcy_fe_values, - const double global_max_u_F_prime, - const double global_S_variation, - const std::vector &local_dof_indices) + const FEValues &darcy_fe_values, + const double global_max_u_F_prime, + const double global_S_variation, + const std::vector &local_dof_indices) { const unsigned int dofs_per_cell = saturation_fe_values.dofs_per_cell; const unsigned int n_q_points = saturation_fe_values.n_quadrature_points; @@ -1655,66 +1655,66 @@ namespace Step43 const double nu = compute_viscosity (old_saturation_solution_values, - old_old_saturation_solution_values, - old_grad_saturation_solution_values, - old_old_grad_saturation_solution_values, - present_darcy_solution_values, - global_max_u_F_prime, - global_S_variation, - saturation_fe_values.get_cell()->diameter()); + old_old_saturation_solution_values, + old_grad_saturation_solution_values, + old_old_grad_saturation_solution_values, + present_darcy_solution_values, + global_max_u_F_prime, + global_S_variation, + saturation_fe_values.get_cell()->diameter()); Vector local_rhs (dofs_per_cell); for (unsigned int q=0; q present_u; - for (unsigned int d=0; d grad_phi_i_s = saturation_fe_values.shape_grad (i, q); - - local_rhs(i) += (time_step * - fractional_flow(old_s,viscosity) * - present_u * - grad_phi_i_s - - - time_step * - nu * - old_grad_saturation_solution_values[q] * grad_phi_i_s - + - porosity * old_s * phi_i_s) - * - saturation_fe_values.JxW(q); - } + { + const double old_s = old_saturation_solution_values[q]; + Tensor<1,dim> present_u; + for (unsigned int d=0; d grad_phi_i_s = saturation_fe_values.shape_grad (i, q); + + local_rhs(i) += (time_step * + fractional_flow(old_s,viscosity) * + present_u * + grad_phi_i_s + - + time_step * + nu * + old_grad_saturation_solution_values[q] * grad_phi_i_s + + + porosity * old_s * phi_i_s) + * + saturation_fe_values.JxW(q); + } saturation_constraints.distribute_local_to_global (local_rhs, - local_dof_indices, - saturation_rhs); + local_dof_indices, + saturation_rhs); } - // @sect4{TwoPhaseFlowProblem::assemble_saturation_rhs_boundary_term} + // @sect4{TwoPhaseFlowProblem::assemble_saturation_rhs_boundary_term} - // The next function is responsible for the - // boundary integral terms in the right - // hand side form of the saturation - // equation. For these, we have to compute - // the upwinding flux on the global - // boundary faces, i.e. we impose Dirichlet - // boundary conditions weakly only on - // inflow parts of the global boundary. As - // before, this has been described in - // step-21 so we refrain from giving more - // descriptions about that. + // The next function is responsible for the + // boundary integral terms in the right + // hand side form of the saturation + // equation. For these, we have to compute + // the upwinding flux on the global + // boundary faces, i.e. we impose Dirichlet + // boundary conditions weakly only on + // inflow parts of the global boundary. As + // before, this has been described in + // step-21 so we refrain from giving more + // descriptions about that. template void TwoPhaseFlowProblem:: assemble_saturation_rhs_boundary_term (const FEFaceValues &saturation_fe_face_values, - const FEFaceValues &darcy_fe_face_values, - const std::vector &local_dof_indices) + const FEFaceValues &darcy_fe_face_values, + const std::vector &local_dof_indices) { const unsigned int dofs_per_cell = saturation_fe_face_values.dofs_per_cell; const unsigned int n_face_q_points = saturation_fe_face_values.n_quadrature_points; @@ -1723,72 +1723,72 @@ namespace Step43 std::vector old_saturation_solution_values_face(n_face_q_points); std::vector > present_darcy_solution_values_face(n_face_q_points, - Vector(dim+1)); + Vector(dim+1)); std::vector neighbor_saturation (n_face_q_points); saturation_fe_face_values.get_function_values (old_saturation_solution, - old_saturation_solution_values_face); + old_saturation_solution_values_face); darcy_fe_face_values.get_function_values (darcy_solution, - present_darcy_solution_values_face); + present_darcy_solution_values_face); SaturationBoundaryValues saturation_boundary_values; saturation_boundary_values .value_list (saturation_fe_face_values.get_quadrature_points(), - neighbor_saturation); + neighbor_saturation); for (unsigned int q=0; q present_u_face; - for (unsigned int d=0; d= 0); - - for (unsigned int i=0; i present_u_face; + for (unsigned int d=0; d= 0); + + for (unsigned int i=0; i::solve} - - // This function implements the operator - // splitting algorithm, i.e. in each time - // step it either re-computes the solution - // of the Darcy system or extrapolates - // velocity/pressure from previous time - // steps, then determines the size of the - // time step, and then updates the - // saturation variable. The implementation - // largely follows similar code in - // step-31. It is, next to the run() - // function, the central one in this - // program. - // - // At the beginning of the function, we ask - // whether to solve the pressure-velocity - // part by evaluating the posteriori - // criterion (see the following - // function). If necessary, we will solve - // the pressure-velocity part using the - // GMRES solver with the Schur complement - // block preconditioner as is described in - // the introduction. + // @sect3{TwoPhaseFlowProblem::solve} + + // This function implements the operator + // splitting algorithm, i.e. in each time + // step it either re-computes the solution + // of the Darcy system or extrapolates + // velocity/pressure from previous time + // steps, then determines the size of the + // time step, and then updates the + // saturation variable. The implementation + // largely follows similar code in + // step-31. It is, next to the run() + // function, the central one in this + // program. + // + // At the beginning of the function, we ask + // whether to solve the pressure-velocity + // part by evaluating the posteriori + // criterion (see the following + // function). If necessary, we will solve + // the pressure-velocity part using the + // GMRES solver with the Schur complement + // block preconditioner as is described in + // the introduction. template void TwoPhaseFlowProblem::solve () { @@ -1797,201 +1797,201 @@ namespace Step43 if (solve_for_pressure_and_velocity == true) { - std::cout << " Solving Darcy (pressure-velocity) system..." << std::endl; + std::cout << " Solving Darcy (pressure-velocity) system..." << std::endl; - assemble_darcy_system (); - build_darcy_preconditioner (); + assemble_darcy_system (); + build_darcy_preconditioner (); - { - const LinearSolvers::InverseMatrix - mp_inverse (darcy_preconditioner_matrix.block(1,1), *Mp_preconditioner); + { + const LinearSolvers::InverseMatrix + mp_inverse (darcy_preconditioner_matrix.block(1,1), *Mp_preconditioner); - const LinearSolvers::BlockSchurPreconditioner - preconditioner (darcy_matrix, mp_inverse, *Amg_preconditioner); + const LinearSolvers::BlockSchurPreconditioner + preconditioner (darcy_matrix, mp_inverse, *Amg_preconditioner); - SolverControl solver_control (darcy_matrix.m(), - 1e-16*darcy_rhs.l2_norm()); + SolverControl solver_control (darcy_matrix.m(), + 1e-16*darcy_rhs.l2_norm()); - SolverGMRES - gmres (solver_control, - SolverGMRES::AdditionalData(100)); + SolverGMRES + gmres (solver_control, + SolverGMRES::AdditionalData(100)); - for (unsigned int i=0; icurrent_macro_time_step), - // and $DT$ the length of the last macro - // time step (given by - // old_macro_time_step), - // then we get - // $u^\ast = u_p + dt \frac{u_p-u_{pp}}{DT} - // = (1+dt/DT)u_p - dt/DT u_{pp}$, where - // $u_p$ and $u_{pp}$ are the last two - // computed Darcy solutions. We can - // implement this formula using just - // two lines of code. - // - // Note that the algorithm here only - // works if we have at least two - // previously computed Darcy solutions - // from which we can extrapolate to the - // current time, and this is ensured by - // requiring re-computation of the Darcy - // solution for the first 2 time steps. + // On the other hand, if we have decided + // that we don't want to compute the + // solution of the Darcy system for the + // current time step, then we need to + // simply extrapolate the previous two + // Darcy solutions to the same time as we + // would have computed the + // velocity/pressure at. We do a simple + // linear extrapolation, i.e. given the + // current length $dt$ of the macro time + // step from the time when we last + // computed the Darcy solution to now + // (given by + // current_macro_time_step), + // and $DT$ the length of the last macro + // time step (given by + // old_macro_time_step), + // then we get + // $u^\ast = u_p + dt \frac{u_p-u_{pp}}{DT} + // = (1+dt/DT)u_p - dt/DT u_{pp}$, where + // $u_p$ and $u_{pp}$ are the last two + // computed Darcy solutions. We can + // implement this formula using just + // two lines of code. + // + // Note that the algorithm here only + // works if we have at least two + // previously computed Darcy solutions + // from which we can extrapolate to the + // current time, and this is ensured by + // requiring re-computation of the Darcy + // solution for the first 2 time steps. else { - darcy_solution = last_computed_darcy_solution; - darcy_solution.sadd (1 + current_macro_time_step / old_macro_time_step, - -current_macro_time_step / old_macro_time_step, - second_last_computed_darcy_solution); + darcy_solution = last_computed_darcy_solution; + darcy_solution.sadd (1 + current_macro_time_step / old_macro_time_step, + -current_macro_time_step / old_macro_time_step, + second_last_computed_darcy_solution); } - // With the so computed velocity - // vector, compute the optimal - // time step based on the CFL - // criterion discussed in the - // introduction... + // With the so computed velocity + // vector, compute the optimal + // time step based on the CFL + // criterion discussed in the + // introduction... { old_time_step = time_step; const double max_u_F_prime = get_max_u_F_prime(); if (max_u_F_prime > 0) - time_step = porosity * - GridTools::minimal_cell_diameter(triangulation) / - saturation_degree / - max_u_F_prime / 50; + time_step = porosity * + GridTools::minimal_cell_diameter(triangulation) / + saturation_degree / + max_u_F_prime / 50; else - time_step = end_time - time; + time_step = end_time - time; } - // ...and then also update the - // length of the macro time steps - // we use while we're dealing - // with time step sizes. In - // particular, this involves: (i) - // If we have just recomputed the - // Darcy solution, then the - // length of the previous macro - // time step is now fixed and the - // length of the current macro - // time step is, up to now, - // simply the length of the - // current (micro) time - // step. (ii) If we have not - // recomputed the Darcy solution, - // then the length of the current - // macro time step has just grown - // by time_step. + // ...and then also update the + // length of the macro time steps + // we use while we're dealing + // with time step sizes. In + // particular, this involves: (i) + // If we have just recomputed the + // Darcy solution, then the + // length of the previous macro + // time step is now fixed and the + // length of the current macro + // time step is, up to now, + // simply the length of the + // current (micro) time + // step. (ii) If we have not + // recomputed the Darcy solution, + // then the length of the current + // macro time step has just grown + // by time_step. if (solve_for_pressure_and_velocity == true) { - old_macro_time_step = current_macro_time_step; - current_macro_time_step = time_step; + old_macro_time_step = current_macro_time_step; + current_macro_time_step = time_step; } else current_macro_time_step += time_step; - // The last step in this function - // is to recompute the saturation - // solution based on the velocity - // field we've just - // obtained. This naturally - // happens in every time step, - // and we don't skip any of these - // computations. At the end of - // computing the saturation, we - // project back into the allowed - // interval $[0,1]$ to make sure - // our solution remains physical. + // The last step in this function + // is to recompute the saturation + // solution based on the velocity + // field we've just + // obtained. This naturally + // happens in every time step, + // and we don't skip any of these + // computations. At the end of + // computing the saturation, we + // project back into the allowed + // interval $[0,1]$ to make sure + // our solution remains physical. { std::cout << " Solving saturation transport equation..." << std::endl; assemble_saturation_system (); SolverControl solver_control (saturation_matrix.m(), - 1e-16*saturation_rhs.l2_norm()); + 1e-16*saturation_rhs.l2_norm()); SolverCG cg (solver_control); TrilinosWrappers::PreconditionIC preconditioner; preconditioner.initialize (saturation_matrix); cg.solve (saturation_matrix, saturation_solution, - saturation_rhs, preconditioner); + saturation_rhs, preconditioner); saturation_constraints.distribute (saturation_solution); project_back_saturation (); std::cout << " ..." - << solver_control.last_step() - << " CG iterations." - << std::endl; + << solver_control.last_step() + << " CG iterations." + << std::endl; } } - // @sect3{TwoPhaseFlowProblem::refine_mesh} - - // The next function does the - // refinement and coarsening of the - // mesh. It does its work in three - // blocks: (i) Compute refinement - // indicators by looking at the - // gradient of a solution vector - // extrapolated linearly from the - // previous two using the - // respective sizes of the time - // step (or taking the only - // solution we have if this is the - // first time step). (ii) Flagging - // those cells for refinement and - // coarsening where the gradient is - // larger or smaller than a certain - // threshold, preserving minimal - // and maximal levels of mesh - // refinement. (iii) Transfering - // the solution from the old to the - // new mesh. None of this is - // particularly difficult. + // @sect3{TwoPhaseFlowProblem::refine_mesh} + + // The next function does the + // refinement and coarsening of the + // mesh. It does its work in three + // blocks: (i) Compute refinement + // indicators by looking at the + // gradient of a solution vector + // extrapolated linearly from the + // previous two using the + // respective sizes of the time + // step (or taking the only + // solution we have if this is the + // first time step). (ii) Flagging + // those cells for refinement and + // coarsening where the gradient is + // larger or smaller than a certain + // threshold, preserving minimal + // and maximal levels of mesh + // refinement. (iii) Transfering + // the solution from the old to the + // new mesh. None of this is + // particularly difficult. template void TwoPhaseFlowProblem:: refine_mesh (const unsigned int min_grid_level, - const unsigned int max_grid_level) + const unsigned int max_grid_level) { Vector refinement_indicators (triangulation.n_active_cells()); { @@ -2001,40 +2001,40 @@ namespace Step43 TrilinosWrappers::Vector extrapolated_saturation_solution (saturation_solution); if (timestep_number != 0) - extrapolated_saturation_solution.sadd ((1. + time_step/old_time_step), - time_step/old_time_step, old_saturation_solution); + extrapolated_saturation_solution.sadd ((1. + time_step/old_time_step), + time_step/old_time_step, old_saturation_solution); typename DoFHandler::active_cell_iterator - cell = saturation_dof_handler.begin_active(), - endc = saturation_dof_handler.end(); + cell = saturation_dof_handler.begin_active(), + endc = saturation_dof_handler.end(); for (unsigned int cell_no=0; cell!=endc; ++cell, ++cell_no) - { - fe_values.reinit(cell); - fe_values.get_function_grads (extrapolated_saturation_solution, - grad_saturation); + { + fe_values.reinit(cell); + fe_values.get_function_grads (extrapolated_saturation_solution, + grad_saturation); - refinement_indicators(cell_no) = grad_saturation[0].norm(); - } + refinement_indicators(cell_no) = grad_saturation[0].norm(); + } } { typename DoFHandler::active_cell_iterator - cell = saturation_dof_handler.begin_active(), - endc = saturation_dof_handler.end(); + cell = saturation_dof_handler.begin_active(), + endc = saturation_dof_handler.end(); for (unsigned int cell_no=0; cell!=endc; ++cell, ++cell_no) - { - cell->clear_coarsen_flag(); - cell->clear_refine_flag(); - - if ((static_cast(cell->level()) < max_grid_level) && - (std::fabs(refinement_indicators(cell_no)) > saturation_refinement_threshold)) - cell->set_refine_flag(); - else - if ((static_cast(cell->level()) > min_grid_level) && - (std::fabs(refinement_indicators(cell_no)) < 0.5 * saturation_refinement_threshold)) - cell->set_coarsen_flag(); - } + { + cell->clear_coarsen_flag(); + cell->clear_refine_flag(); + + if ((static_cast(cell->level()) < max_grid_level) && + (std::fabs(refinement_indicators(cell_no)) > saturation_refinement_threshold)) + cell->set_refine_flag(); + else + if ((static_cast(cell->level()) > min_grid_level) && + (std::fabs(refinement_indicators(cell_no)) < 0.5 * saturation_refinement_threshold)) + cell->set_coarsen_flag(); + } } triangulation.prepare_coarsening_and_refinement (); @@ -2086,22 +2086,22 @@ namespace Step43 - // @sect3{TwoPhaseFlowProblem::output_results} + // @sect3{TwoPhaseFlowProblem::output_results} - // This function generates - // graphical output. It is in - // essence a copy of the - // implementation in step-31. + // This function generates + // graphical output. It is in + // essence a copy of the + // implementation in step-31. template void TwoPhaseFlowProblem::output_results () const { const FESystem joint_fe (darcy_fe, 1, - saturation_fe, 1); + saturation_fe, 1); DoFHandler joint_dof_handler (triangulation); joint_dof_handler.distribute_dofs (joint_fe); Assert (joint_dof_handler.n_dofs() == - darcy_dof_handler.n_dofs() + saturation_dof_handler.n_dofs(), - ExcInternalError()); + darcy_dof_handler.n_dofs() + saturation_dof_handler.n_dofs(), + ExcInternalError()); Vector joint_solution (joint_dof_handler.n_dofs()); @@ -2111,40 +2111,40 @@ namespace Step43 std::vector local_saturation_dof_indices (saturation_fe.dofs_per_cell); typename DoFHandler::active_cell_iterator - joint_cell = joint_dof_handler.begin_active(), - joint_endc = joint_dof_handler.end(), - darcy_cell = darcy_dof_handler.begin_active(), - saturation_cell = saturation_dof_handler.begin_active(); + joint_cell = joint_dof_handler.begin_active(), + joint_endc = joint_dof_handler.end(), + darcy_cell = darcy_dof_handler.begin_active(), + saturation_cell = saturation_dof_handler.begin_active(); for (; joint_cell!=joint_endc; ++joint_cell, ++darcy_cell, ++saturation_cell) - { - joint_cell->get_dof_indices (local_joint_dof_indices); - darcy_cell->get_dof_indices (local_darcy_dof_indices); - saturation_cell->get_dof_indices (local_saturation_dof_indices); - - for (unsigned int i=0; iget_dof_indices (local_joint_dof_indices); + darcy_cell->get_dof_indices (local_darcy_dof_indices); + saturation_cell->get_dof_indices (local_saturation_dof_indices); + + for (unsigned int i=0; i joint_solution_names (dim, "velocity"); joint_solution_names.push_back ("pressure"); @@ -2162,54 +2162,54 @@ namespace Step43 data_out.attach_dof_handler (joint_dof_handler); data_out.add_data_vector (joint_solution, joint_solution_names, - DataOut::type_dof_data, - data_component_interpretation); + DataOut::type_dof_data, + data_component_interpretation); data_out.build_patches (); std::string filename = "solution-" + - Utilities::int_to_string (timestep_number, 5) + ".vtu"; + Utilities::int_to_string (timestep_number, 5) + ".vtu"; std::ofstream output (filename.c_str()); data_out.write_vtu (output); } - // @sect3{Tool functions} - - // @sect4{TwoPhaseFlowProblem::determine_whether_to_solve_for_pressure_and_velocity} - - // This function implements the a - // posteriori criterion for - // adaptive operator splitting. The - // function is relatively - // straightforward given the way we - // have implemented other functions - // above and given the formula for - // the criterion derived in the - // paper. - // - // If one decides that one wants - // the original IMPES method in - // which the Darcy equation is - // solved in every time step, then - // this can be achieved by setting - // the threshold value - // AOS_threshold (with - // a default of $5.0$) to zero, - // thereby forcing the function to - // always return true. - // - // Finally, note that the function - // returns true unconditionally for - // the first two time steps to - // ensure that we have always - // solved the Darcy system at least - // twice when skipping its - // solution, thereby allowing us to - // extrapolate the velocity from - // the last two solutions in - // solve(). + // @sect3{Tool functions} + + // @sect4{TwoPhaseFlowProblem::determine_whether_to_solve_for_pressure_and_velocity} + + // This function implements the a + // posteriori criterion for + // adaptive operator splitting. The + // function is relatively + // straightforward given the way we + // have implemented other functions + // above and given the formula for + // the criterion derived in the + // paper. + // + // If one decides that one wants + // the original IMPES method in + // which the Darcy equation is + // solved in every time step, then + // this can be achieved by setting + // the threshold value + // AOS_threshold (with + // a default of $5.0$) to zero, + // thereby forcing the function to + // always return true. + // + // Finally, note that the function + // returns true unconditionally for + // the first two time steps to + // ensure that we have always + // solved the Darcy system at least + // twice when skipping its + // solution, thereby allowing us to + // extrapolate the velocity from + // the last two solutions in + // solve(). template bool TwoPhaseFlowProblem::determine_whether_to_solve_for_pressure_and_velocity () const @@ -2221,7 +2221,7 @@ namespace Step43 const unsigned int n_q_points = quadrature_formula.size(); FEValues fe_values (saturation_fe, quadrature_formula, - update_values | update_quadrature_points); + update_values | update_quadrature_points); std::vector old_saturation_after_solving_pressure (n_q_points); std::vector present_saturation (n_q_points); @@ -2235,35 +2235,35 @@ namespace Step43 endc = saturation_dof_handler.end(); for (; cell!=endc; ++cell) { - double max_local_mobility_reciprocal_difference = 0.0; - double max_local_permeability_inverse_l1_norm = 0.0; - - fe_values.reinit(cell); - fe_values.get_function_values (saturation_matching_last_computed_darcy_solution, - old_saturation_after_solving_pressure); - fe_values.get_function_values (saturation_solution, - present_saturation); - - k_inverse.value_list (fe_values.get_quadrature_points(), - k_inverse_values); - - for (unsigned int q=0; q AOS_threshold); @@ -2271,57 +2271,57 @@ namespace Step43 - // @sect4{TwoPhaseFlowProblem::project_back_saturation} - - // The next function simply makes - // sure that the saturation values - // always remain within the - // physically reasonable range of - // $[0,1]$. While the continuous - // equations guarantee that this is - // so, the discrete equations - // don't. However, if we allow the - // discrete solution to escape this - // range we get into trouble - // because terms like $F(S)$ and - // $F'(S)$ will produce - // unreasonable results - // (e.g. $F'(S)<0$ for $S<0$, which - // would imply that the wetting - // fluid phase flows against - // the direction of the bulk fluid - // velocity)). Consequently, at the - // end of each time step, we simply - // project the saturation field - // back into the physically - // reasonable region. + // @sect4{TwoPhaseFlowProblem::project_back_saturation} + + // The next function simply makes + // sure that the saturation values + // always remain within the + // physically reasonable range of + // $[0,1]$. While the continuous + // equations guarantee that this is + // so, the discrete equations + // don't. However, if we allow the + // discrete solution to escape this + // range we get into trouble + // because terms like $F(S)$ and + // $F'(S)$ will produce + // unreasonable results + // (e.g. $F'(S)<0$ for $S<0$, which + // would imply that the wetting + // fluid phase flows against + // the direction of the bulk fluid + // velocity)). Consequently, at the + // end of each time step, we simply + // project the saturation field + // back into the physically + // reasonable region. template void TwoPhaseFlowProblem::project_back_saturation () { for (unsigned int i=0; i 1) - saturation_solution(i) = 1; + if (saturation_solution(i) > 1) + saturation_solution(i) = 1; } - // @sect4{TwoPhaseFlowProblem::get_max_u_F_prime} - // - // Another simpler helper function: - // Compute the maximum of the total - // velocity times the derivative of - // the fraction flow function, - // i.e., compute $\|\mathbf{u} - // F'(S)\|_{L_\infty(\Omega)}$. This - // term is used in both the - // computation of the time step as - // well as in normalizing the - // entropy-residual term in the - // artificial viscosity. + // @sect4{TwoPhaseFlowProblem::get_max_u_F_prime} + // + // Another simpler helper function: + // Compute the maximum of the total + // velocity times the derivative of + // the fraction flow function, + // i.e., compute $\|\mathbf{u} + // F'(S)\|_{L_\infty(\Omega)}$. This + // term is used in both the + // computation of the time step as + // well as in normalizing the + // entropy-residual term in the + // artificial viscosity. template double TwoPhaseFlowProblem::get_max_u_F_prime () const @@ -2330,12 +2330,12 @@ namespace Step43 const unsigned int n_q_points = quadrature_formula.size(); FEValues darcy_fe_values (darcy_fe, quadrature_formula, - update_values); + update_values); FEValues saturation_fe_values (saturation_fe, quadrature_formula, - update_values); + update_values); std::vector > darcy_solution_values(n_q_points, - Vector(dim+1)); + Vector(dim+1)); std::vector saturation_values (n_q_points); double max_velocity_times_dF_dS = 0; @@ -2347,49 +2347,49 @@ namespace Step43 saturation_cell = saturation_dof_handler.begin_active(); for (; cell!=endc; ++cell, ++saturation_cell) { - darcy_fe_values.reinit (cell); - saturation_fe_values.reinit (saturation_cell); + darcy_fe_values.reinit (cell); + saturation_fe_values.reinit (saturation_cell); - darcy_fe_values.get_function_values (darcy_solution, darcy_solution_values); - saturation_fe_values.get_function_values (old_saturation_solution, saturation_values); + darcy_fe_values.get_function_values (darcy_solution, darcy_solution_values); + saturation_fe_values.get_function_values (old_saturation_solution, saturation_values); - for (unsigned int q=0; q velocity; - for (unsigned int i=0; i velocity; + for (unsigned int i=0; i::get_extrapolated_saturation_range} - // - // For computing the stabilization - // term, we need to know the range - // of the saturation - // variable. Unlike in step-31, - // this range is trivially bounded - // by the interval $[0,1]$ but we - // can do a bit better by looping - // over a collection of quadrature - // points and seeing what the - // values are there. If we can, - // i.e., if there are at least two - // timesteps around, we can even - // take the values extrapolated to - // the next time step. - // - // As before, the function is taken - // with minimal modifications from - // step-31. + // @sect4{TwoPhaseFlowProblem::get_extrapolated_saturation_range} + // + // For computing the stabilization + // term, we need to know the range + // of the saturation + // variable. Unlike in step-31, + // this range is trivially bounded + // by the interval $[0,1]$ but we + // can do a bit better by looping + // over a collection of quadrature + // points and seeing what the + // values are there. If we can, + // i.e., if there are at least two + // timesteps around, we can even + // take the values extrapolated to + // the next time step. + // + // As before, the function is taken + // with minimal modifications from + // step-31. template std::pair TwoPhaseFlowProblem::get_extrapolated_saturation_range () const @@ -2398,94 +2398,94 @@ namespace Step43 const unsigned int n_q_points = quadrature_formula.size(); FEValues fe_values (saturation_fe, quadrature_formula, - update_values); + update_values); std::vector old_saturation_values(n_q_points); std::vector old_old_saturation_values(n_q_points); if (timestep_number != 0) { - double min_saturation = std::numeric_limits::max(), - max_saturation = -std::numeric_limits::max(); - - typename DoFHandler::active_cell_iterator - cell = saturation_dof_handler.begin_active(), - endc = saturation_dof_handler.end(); - for (; cell!=endc; ++cell) - { - fe_values.reinit (cell); - fe_values.get_function_values (old_saturation_solution, - old_saturation_values); - fe_values.get_function_values (old_old_saturation_solution, - old_old_saturation_values); - - for (unsigned int q=0; q::max(), + max_saturation = -std::numeric_limits::max(); + + typename DoFHandler::active_cell_iterator + cell = saturation_dof_handler.begin_active(), + endc = saturation_dof_handler.end(); + for (; cell!=endc; ++cell) + { + fe_values.reinit (cell); + fe_values.get_function_values (old_saturation_solution, + old_saturation_values); + fe_values.get_function_values (old_old_saturation_solution, + old_old_saturation_values); + + for (unsigned int q=0; q::max(), - max_saturation = -std::numeric_limits::max(); - - typename DoFHandler::active_cell_iterator - cell = saturation_dof_handler.begin_active(), - endc = saturation_dof_handler.end(); - for (; cell!=endc; ++cell) - { - fe_values.reinit (cell); - fe_values.get_function_values (old_saturation_solution, - old_saturation_values); - - for (unsigned int q=0; q::max(), + max_saturation = -std::numeric_limits::max(); + + typename DoFHandler::active_cell_iterator + cell = saturation_dof_handler.begin_active(), + endc = saturation_dof_handler.end(); + for (; cell!=endc; ++cell) + { + fe_values.reinit (cell); + fe_values.get_function_values (old_saturation_solution, + old_saturation_values); + + for (unsigned int q=0; q::compute_viscosity} - // - // The final tool function is used - // to compute the artificial - // viscosity on a given cell. This - // isn't particularly complicated - // if you have the formula for it - // in front of you, and looking at - // the implementation in - // step-31. The major difference to - // that tutorial program is that - // the velocity here is not simply - // $\mathbf u$ but $\mathbf u - // F'(S)$ and some of the formulas - // need to be adjusted accordingly. + // @sect4{TwoPhaseFlowProblem::compute_viscosity} + // + // The final tool function is used + // to compute the artificial + // viscosity on a given cell. This + // isn't particularly complicated + // if you have the formula for it + // in front of you, and looking at + // the implementation in + // step-31. The major difference to + // that tutorial program is that + // the velocity here is not simply + // $\mathbf u$ but $\mathbf u + // F'(S)$ and some of the formulas + // need to be adjusted accordingly. template double TwoPhaseFlowProblem:: compute_viscosity (const std::vector &old_saturation, - const std::vector &old_old_saturation, - const std::vector > &old_saturation_grads, - const std::vector > &old_old_saturation_grads, - const std::vector > &present_darcy_values, - const double global_max_u_F_prime, - const double global_S_variation, - const double cell_diameter) const + const std::vector &old_old_saturation, + const std::vector > &old_saturation_grads, + const std::vector > &old_old_saturation_grads, + const std::vector > &present_darcy_values, + const double global_max_u_F_prime, + const double global_S_variation, + const double cell_diameter) const { const double beta = .4 * dim; const double alpha = 1; @@ -2502,70 +2502,70 @@ namespace Step43 for (unsigned int q=0; q < n_q_points; ++q) { - Tensor<1,dim> u; - for (unsigned int d=0; d u; + for (unsigned int d=0; d::run} - - // This function is, besides - // solve(), the - // primary function of this program - // as it controls the time - // iteration as well as when the - // solution is written into output - // files and when to do mesh - // refinement. - // - // With the exception of the - // startup code that loops back to - // the beginning of the function - // through the goto - // start_time_iteration - // label, everything should be - // relatively straightforward. In - // any case, it mimicks the - // corresponding function in - // step-31. + // @sect3{TwoPhaseFlowProblem::run} + + // This function is, besides + // solve(), the + // primary function of this program + // as it controls the time + // iteration as well as when the + // solution is written into output + // files and when to do mesh + // refinement. + // + // With the exception of the + // startup code that loops back to + // the beginning of the function + // through the goto + // start_time_iteration + // label, everything should be + // relatively straightforward. In + // any case, it mimicks the + // corresponding function in + // step-31. template void TwoPhaseFlowProblem::run () { @@ -2584,10 +2584,10 @@ namespace Step43 start_time_iteration: VectorTools::project (saturation_dof_handler, - saturation_constraints, - QGauss(saturation_degree+2), - SaturationInitialValues(), - old_saturation_solution); + saturation_constraints, + QGauss(saturation_degree+2), + SaturationInitialValues(), + old_saturation_solution); timestep_number = 0; time_step = old_time_step = 0; @@ -2597,34 +2597,34 @@ namespace Step43 do { - std::cout << "Timestep " << timestep_number - << ": t=" << time - << ", dt=" << time_step - << std::endl; + std::cout << "Timestep " << timestep_number + << ": t=" << time + << ", dt=" << time_step + << std::endl; - solve (); + solve (); - std::cout << std::endl; + std::cout << std::endl; - if (timestep_number % 200 == 0) - output_results (); + if (timestep_number % 200 == 0) + output_results (); - if (timestep_number % 25 == 0) - refine_mesh (initial_refinement, - initial_refinement + n_pre_refinement_steps); + if (timestep_number % 25 == 0) + refine_mesh (initial_refinement, + initial_refinement + n_pre_refinement_steps); - if ((timestep_number == 0) && - (pre_refinement_step < n_pre_refinement_steps)) - { - ++pre_refinement_step; - goto start_time_iteration; - } + if ((timestep_number == 0) && + (pre_refinement_step < n_pre_refinement_steps)) + { + ++pre_refinement_step; + goto start_time_iteration; + } - time += time_step; - ++timestep_number; + time += time_step; + ++timestep_number; - old_old_saturation_solution = old_saturation_solution; - old_saturation_solution = saturation_solution; + old_old_saturation_solution = old_saturation_solution; + old_saturation_solution = saturation_solution; } while (time <= end_time); } @@ -2632,14 +2632,14 @@ namespace Step43 - // @sect3{The main() function} - // - // The main function looks almost the - // same as in all other programs. In - // particular, it is essentially the - // same as in step-31 where we also - // explain the need to initialize the - // MPI subsystem. + // @sect3{The main() function} + // + // The main function looks almost the + // same as in all other programs. In + // particular, it is essentially the + // same as in step-31 where we also + // explain the need to initialize the + // MPI subsystem. int main (int argc, char *argv[]) { try diff --git a/deal.II/examples/step-44/step-44.cc b/deal.II/examples/step-44/step-44.cc index 4832daaa7b..4d3b44ebfb 100644 --- a/deal.II/examples/step-44/step-44.cc +++ b/deal.II/examples/step-44/step-44.cc @@ -9,11 +9,11 @@ /* to the file deal.II/doc/license.html for the text and */ /* further information on this license. */ - // We start by including all the necessary - // deal.II header files and some C++ related - // ones. They have been discussed in detail - // in previous tutorial programs, so you need - // only refer to past tutorials for details. + // We start by including all the necessary + // deal.II header files and some C++ related + // ones. They have been discussed in detail + // in previous tutorial programs, so you need + // only refer to past tutorials for details. #include #include #include @@ -55,10 +55,10 @@ #include - // We then stick everything that relates to - // this tutorial program into a namespace of - // its own, and import all the deal.II - // function and class names into it: + // We then stick everything that relates to + // this tutorial program into a namespace of + // its own, and import all the deal.II + // function and class names into it: namespace Step44 { using namespace dealii; @@ -85,14 +85,14 @@ namespace Step44 // The quadrature order should be adjusted accordingly. struct FESystem { - unsigned int poly_degree; - unsigned int quad_order; + unsigned int poly_degree; + unsigned int quad_order; - static void - declare_parameters(ParameterHandler &prm); + static void + declare_parameters(ParameterHandler &prm); - void - parse_parameters(ParameterHandler &prm); + void + parse_parameters(ParameterHandler &prm); }; @@ -100,13 +100,13 @@ namespace Step44 { prm.enter_subsection("Finite element system"); { - prm.declare_entry("Polynomial degree", "2", - Patterns::Integer(0), - "Displacement system polynomial order"); + prm.declare_entry("Polynomial degree", "2", + Patterns::Integer(0), + "Displacement system polynomial order"); - prm.declare_entry("Quadrature order", "3", - Patterns::Integer(0), - "Gauss quadrature order"); + prm.declare_entry("Quadrature order", "3", + Patterns::Integer(0), + "Gauss quadrature order"); } prm.leave_subsection(); } @@ -115,8 +115,8 @@ namespace Step44 { prm.enter_subsection("Finite element system"); { - poly_degree = prm.get_integer("Polynomial degree"); - quad_order = prm.get_integer("Quadrature order"); + poly_degree = prm.get_integer("Polynomial degree"); + quad_order = prm.get_integer("Quadrature order"); } prm.leave_subsection(); } @@ -128,32 +128,32 @@ namespace Step44 // results given in the literature. struct Geometry { - unsigned int global_refinement; - double scale; - double p_p0; + unsigned int global_refinement; + double scale; + double p_p0; - static void - declare_parameters(ParameterHandler &prm); + static void + declare_parameters(ParameterHandler &prm); - void - parse_parameters(ParameterHandler &prm); + void + parse_parameters(ParameterHandler &prm); }; void Geometry::declare_parameters(ParameterHandler &prm) { prm.enter_subsection("Geometry"); { - prm.declare_entry("Global refinement", "2", - Patterns::Integer(0), - "Global refinement level"); + prm.declare_entry("Global refinement", "2", + Patterns::Integer(0), + "Global refinement level"); - prm.declare_entry("Grid scale", "1e-3", - Patterns::Double(0.0), - "Global grid scaling factor"); + prm.declare_entry("Grid scale", "1e-3", + Patterns::Double(0.0), + "Global grid scaling factor"); - prm.declare_entry("Pressure ratio p/p0", "100", - Patterns::Selection("20|40|60|80|100"), - "Ratio of applied pressure to reference pressure"); + prm.declare_entry("Pressure ratio p/p0", "100", + Patterns::Selection("20|40|60|80|100"), + "Ratio of applied pressure to reference pressure"); } prm.leave_subsection(); } @@ -162,9 +162,9 @@ namespace Step44 { prm.enter_subsection("Geometry"); { - global_refinement = prm.get_integer("Global refinement"); - scale = prm.get_double("Grid scale"); - p_p0 = prm.get_double("Pressure ratio p/p0"); + global_refinement = prm.get_integer("Global refinement"); + scale = prm.get_double("Grid scale"); + p_p0 = prm.get_double("Pressure ratio p/p0"); } prm.leave_subsection(); } @@ -175,27 +175,27 @@ namespace Step44 // for the neo-Hookean material. struct Materials { - double nu; - double mu; + double nu; + double mu; - static void - declare_parameters(ParameterHandler &prm); + static void + declare_parameters(ParameterHandler &prm); - void - parse_parameters(ParameterHandler &prm); + void + parse_parameters(ParameterHandler &prm); }; void Materials::declare_parameters(ParameterHandler &prm) { prm.enter_subsection("Material properties"); { - prm.declare_entry("Poisson's ratio", "0.4999", - Patterns::Double(-1.0,0.5), - "Poisson's ratio"); + prm.declare_entry("Poisson's ratio", "0.4999", + Patterns::Double(-1.0,0.5), + "Poisson's ratio"); - prm.declare_entry("Shear modulus", "80.194e6", - Patterns::Double(), - "Shear modulus"); + prm.declare_entry("Shear modulus", "80.194e6", + Patterns::Double(), + "Shear modulus"); } prm.leave_subsection(); } @@ -204,8 +204,8 @@ namespace Step44 { prm.enter_subsection("Material properties"); { - nu = prm.get_double("Poisson's ratio"); - mu = prm.get_double("Shear modulus"); + nu = prm.get_double("Poisson's ratio"); + mu = prm.get_double("Shear modulus"); } prm.leave_subsection(); } @@ -217,42 +217,42 @@ namespace Step44 // within a Newton increment. struct LinearSolver { - std::string type_lin; - double tol_lin; - double max_iterations_lin; - std::string preconditioner_type; - double preconditioner_relaxation; + std::string type_lin; + double tol_lin; + double max_iterations_lin; + std::string preconditioner_type; + double preconditioner_relaxation; - static void - declare_parameters(ParameterHandler &prm); + static void + declare_parameters(ParameterHandler &prm); - void - parse_parameters(ParameterHandler &prm); + void + parse_parameters(ParameterHandler &prm); }; void LinearSolver::declare_parameters(ParameterHandler &prm) { prm.enter_subsection("Linear solver"); { - prm.declare_entry("Solver type", "CG", - Patterns::Selection("CG|Direct"), - "Type of solver used to solve the linear system"); + prm.declare_entry("Solver type", "CG", + Patterns::Selection("CG|Direct"), + "Type of solver used to solve the linear system"); - prm.declare_entry("Residual", "1e-6", - Patterns::Double(0.0), - "Linear solver residual (scaled by residual norm)"); + prm.declare_entry("Residual", "1e-6", + Patterns::Double(0.0), + "Linear solver residual (scaled by residual norm)"); - prm.declare_entry("Max iteration multiplier", "1", - Patterns::Double(0.0), - "Linear solver iterations (multiples of the system matrix size)"); + prm.declare_entry("Max iteration multiplier", "1", + Patterns::Double(0.0), + "Linear solver iterations (multiples of the system matrix size)"); - prm.declare_entry("Preconditioner type", "ssor", - Patterns::Selection("jacobi|ssor"), - "Type of preconditioner"); + prm.declare_entry("Preconditioner type", "ssor", + Patterns::Selection("jacobi|ssor"), + "Type of preconditioner"); - prm.declare_entry("Preconditioner relaxation", "0.65", - Patterns::Double(0.0), - "Preconditioner relaxation value"); + prm.declare_entry("Preconditioner relaxation", "0.65", + Patterns::Double(0.0), + "Preconditioner relaxation value"); } prm.leave_subsection(); } @@ -261,11 +261,11 @@ namespace Step44 { prm.enter_subsection("Linear solver"); { - type_lin = prm.get("Solver type"); - tol_lin = prm.get_double("Residual"); - max_iterations_lin = prm.get_double("Max iteration multiplier"); - preconditioner_type = prm.get("Preconditioner type"); - preconditioner_relaxation = prm.get_double("Preconditioner relaxation"); + type_lin = prm.get("Solver type"); + tol_lin = prm.get_double("Residual"); + max_iterations_lin = prm.get_double("Max iteration multiplier"); + preconditioner_type = prm.get("Preconditioner type"); + preconditioner_relaxation = prm.get_double("Preconditioner relaxation"); } prm.leave_subsection(); } @@ -277,32 +277,32 @@ namespace Step44 // iterations for the Newton-Raphson nonlinear solver. struct NonlinearSolver { - unsigned int max_iterations_NR; - double tol_f; - double tol_u; + unsigned int max_iterations_NR; + double tol_f; + double tol_u; - static void - declare_parameters(ParameterHandler &prm); + static void + declare_parameters(ParameterHandler &prm); - void - parse_parameters(ParameterHandler &prm); + void + parse_parameters(ParameterHandler &prm); }; void NonlinearSolver::declare_parameters(ParameterHandler &prm) { prm.enter_subsection("Nonlinear solver"); { - prm.declare_entry("Max iterations Newton-Raphson", "10", - Patterns::Integer(0), - "Number of Newton-Raphson iterations allowed"); + prm.declare_entry("Max iterations Newton-Raphson", "10", + Patterns::Integer(0), + "Number of Newton-Raphson iterations allowed"); - prm.declare_entry("Tolerance force", "1.0e-9", - Patterns::Double(0.0), - "Force residual tolerance"); + prm.declare_entry("Tolerance force", "1.0e-9", + Patterns::Double(0.0), + "Force residual tolerance"); - prm.declare_entry("Tolerance displacement", "1.0e-6", - Patterns::Double(0.0), - "Displacement error tolerance"); + prm.declare_entry("Tolerance displacement", "1.0e-6", + Patterns::Double(0.0), + "Displacement error tolerance"); } prm.leave_subsection(); } @@ -311,9 +311,9 @@ namespace Step44 { prm.enter_subsection("Nonlinear solver"); { - max_iterations_NR = prm.get_integer("Max iterations Newton-Raphson"); - tol_f = prm.get_double("Tolerance force"); - tol_u = prm.get_double("Tolerance displacement"); + max_iterations_NR = prm.get_integer("Max iterations Newton-Raphson"); + tol_f = prm.get_double("Tolerance force"); + tol_u = prm.get_double("Tolerance displacement"); } prm.leave_subsection(); } @@ -323,27 +323,27 @@ namespace Step44 // and the simulation end-time. struct Time { - double delta_t; - double end_time; + double delta_t; + double end_time; - static void - declare_parameters(ParameterHandler &prm); + static void + declare_parameters(ParameterHandler &prm); - void - parse_parameters(ParameterHandler &prm); + void + parse_parameters(ParameterHandler &prm); }; void Time::declare_parameters(ParameterHandler &prm) { prm.enter_subsection("Time"); { - prm.declare_entry("End time", "1", - Patterns::Double(), - "End time"); + prm.declare_entry("End time", "1", + Patterns::Double(), + "End time"); - prm.declare_entry("Time step size", "0.1", - Patterns::Double(), - "Time step size"); + prm.declare_entry("Time step size", "0.1", + Patterns::Double(), + "Time step size"); } prm.leave_subsection(); } @@ -352,8 +352,8 @@ namespace Step44 { prm.enter_subsection("Time"); { - end_time = prm.get_double("End time"); - delta_t = prm.get_double("Time step size"); + end_time = prm.get_double("End time"); + delta_t = prm.get_double("Time step size"); } prm.leave_subsection(); } @@ -362,20 +362,20 @@ namespace Step44 // Finally we consolidate all of the above structures into // a single container that holds all of our run-time selections. struct AllParameters : public FESystem, - public Geometry, - public Materials, - public LinearSolver, - public NonlinearSolver, - public Time + public Geometry, + public Materials, + public LinearSolver, + public NonlinearSolver, + public Time { - AllParameters(const std::string & input_file); + AllParameters(const std::string & input_file); - static void - declare_parameters(ParameterHandler &prm); + static void + declare_parameters(ParameterHandler &prm); - void - parse_parameters(ParameterHandler &prm); + void + parse_parameters(ParameterHandler &prm); }; AllParameters::AllParameters(const std::string & input_file) @@ -427,9 +427,9 @@ namespace Step44 // check the size of the input vectors template void extract_submatrix (const std::vector &row_index_set, - const std::vector &column_index_set, - const MatrixType &matrix, - FullMatrix &sub_matrix) + const std::vector &column_index_set, + const MatrixType &matrix, + FullMatrix &sub_matrix) { const unsigned int n_rows_submatrix = row_index_set.size(); @@ -440,18 +440,18 @@ namespace Step44 sub_matrix.reinit(n_rows_submatrix, n_cols_submatrix); for (unsigned int sub_row = 0; sub_row < n_rows_submatrix; ++sub_row) - { - const unsigned int row = row_index_set[sub_row]; - Assert(row<=matrix.m(), ExcInternalError()); - - for (unsigned int sub_col = 0; sub_col < n_cols_submatrix; ++sub_col) - { - const unsigned int col = column_index_set[sub_col]; - Assert(col<=matrix.n(), ExcInternalError()); - - sub_matrix(sub_row, sub_col) = matrix(row, col); - } - } + { + const unsigned int row = row_index_set[sub_row]; + Assert(row<=matrix.m(), ExcInternalError()); + + for (unsigned int sub_col = 0; sub_col < n_cols_submatrix; ++sub_col) + { + const unsigned int col = column_index_set[sub_col]; + Assert(col<=matrix.n(), ExcInternalError()); + + sub_matrix(sub_row, sub_col) = matrix(row, col); + } + } } // As above, but to extract entries from @@ -474,20 +474,20 @@ namespace Step44 sub_matrix.reinit(n_rows_submatrix, n_cols_submatrix); for (unsigned int sub_row = 0; sub_row < n_rows_submatrix; ++sub_row) - { - const unsigned int row = row_index_set[sub_row]; - Assert(row<=matrix.m(), ExcInternalError()); - - for (unsigned int sub_col = 0; sub_col < n_cols_submatrix; ++sub_col) - { - const unsigned int col = column_index_set[sub_col]; - Assert(col<=matrix.n(), ExcInternalError()); - if (matrix.get_sparsity_pattern().exists(row, col) == false) - continue; - - sub_matrix(sub_row, sub_col) = matrix(row, col); - } - } + { + const unsigned int row = row_index_set[sub_row]; + Assert(row<=matrix.m(), ExcInternalError()); + + for (unsigned int sub_col = 0; sub_col < n_cols_submatrix; ++sub_col) + { + const unsigned int col = column_index_set[sub_col]; + Assert(col<=matrix.n(), ExcInternalError()); + if (matrix.get_sparsity_pattern().exists(row, col) == false) + continue; + + sub_matrix(sub_row, sub_col) = matrix(row, col); + } + } } // The replace_submatrix function takes @@ -500,9 +500,9 @@ namespace Step44 template void replace_submatrix(const std::vector &row_index_set, - const std::vector &column_index_set, - const MatrixType &sub_matrix, - FullMatrix &matrix) + const std::vector &column_index_set, + const MatrixType &sub_matrix, + FullMatrix &matrix) { const unsigned int n_rows_submatrix = row_index_set.size(); Assert(n_rows_submatrix<=sub_matrix.m(), ExcInternalError()); @@ -510,19 +510,19 @@ namespace Step44 Assert(n_cols_submatrix<=sub_matrix.n(), ExcInternalError()); for (unsigned int sub_row = 0; sub_row < n_rows_submatrix; ++sub_row) - { - const unsigned int row = row_index_set[sub_row]; - Assert(row<=matrix.m(), ExcInternalError()); + { + const unsigned int row = row_index_set[sub_row]; + Assert(row<=matrix.m(), ExcInternalError()); - for (unsigned int sub_col = 0; sub_col < n_cols_submatrix; ++sub_col) - { - const unsigned int col = column_index_set[sub_col]; - Assert(col<=matrix.n(), ExcInternalError()); + for (unsigned int sub_col = 0; sub_col < n_cols_submatrix; ++sub_col) + { + const unsigned int col = column_index_set[sub_col]; + Assert(col<=matrix.n(), ExcInternalError()); - matrix(row, col) = sub_matrix(sub_row, sub_col); + matrix(row, col) = sub_matrix(sub_row, sub_col); - } - } + } + } } // Now we define some frequently used @@ -532,19 +532,19 @@ namespace Step44 { public: - // $\mathbf{I}$ - static const SymmetricTensor<2, dim> I; - // $\mathbf{I} \otimes \mathbf{I}$ - static const SymmetricTensor<4, dim> IxI; - // $\mathcal{S}$, note that as we only use - // this fourth-order unit tensor to operate - // on symmetric second-order tensors. - // To maintain notation consistent with Holzapfel (2001) - // we name the tensor $\mathcal{I}$ - static const SymmetricTensor<4, dim> II; - // Fourth-order deviatoric such that - // $\textrm{dev} \{ \bullet \} = \{ \bullet \} - [1/\textrm{dim}][ \{ \bullet\} :\mathbf{I}]\mathbf{I}$ - static const SymmetricTensor<4, dim> dev_P; + // $\mathbf{I}$ + static const SymmetricTensor<2, dim> I; + // $\mathbf{I} \otimes \mathbf{I}$ + static const SymmetricTensor<4, dim> IxI; + // $\mathcal{S}$, note that as we only use + // this fourth-order unit tensor to operate + // on symmetric second-order tensors. + // To maintain notation consistent with Holzapfel (2001) + // we name the tensor $\mathcal{I}$ + static const SymmetricTensor<4, dim> II; + // Fourth-order deviatoric such that + // $\textrm{dev} \{ \bullet \} = \{ \bullet \} - [1/\textrm{dim}][ \{ \bullet\} :\mathbf{I}]\mathbf{I}$ + static const SymmetricTensor<4, dim> dev_P; }; template @@ -573,37 +573,37 @@ namespace Step44 { public: Time (const double time_end, - const double delta_t) - : - timestep(0), - time_current(0.0), - time_end(time_end), - delta_t(delta_t) { + const double delta_t) + : + timestep(0), + time_current(0.0), + time_end(time_end), + delta_t(delta_t) { } virtual ~Time() - {} + {} double current() const - { - return time_current; - } + { + return time_current; + } double end() const - { - return time_end; - } + { + return time_end; + } double get_delta_t() const - { - return delta_t; - } + { + return delta_t; + } unsigned int get_timestep() const - { - return timestep; - } + { + return timestep; + } void increment() - { - time_current += delta_t; - ++timestep; - } + { + time_current += delta_t; + ++timestep; + } private: unsigned int timestep; @@ -647,191 +647,191 @@ namespace Step44 { public: Material_Compressible_Neo_Hook_Three_Field(const double mu, - const double nu) - : - kappa((2.0 * mu * (1.0 + nu)) / (3.0 * (1.0 - 2.0 * nu))), - c_1(mu / 2.0), - det_F(1.0), - p_tilde(0.0), - J_tilde(1.0), - b_bar(AdditionalTools::StandardTensors::I) - { - Assert(kappa > 0, ExcInternalError()); - } + const double nu) + : + kappa((2.0 * mu * (1.0 + nu)) / (3.0 * (1.0 - 2.0 * nu))), + c_1(mu / 2.0), + det_F(1.0), + p_tilde(0.0), + J_tilde(1.0), + b_bar(AdditionalTools::StandardTensors::I) + { + Assert(kappa > 0, ExcInternalError()); + } ~Material_Compressible_Neo_Hook_Three_Field() - {} - - // We update the material model with - // various deformation dependent data - // based on $F$ and the pressure $\widetilde{p}$ - // and dilatation $\widetilde{J}$, - // and at the end of the - // function include a physical check for - // internal consistency: + {} + + // We update the material model with + // various deformation dependent data + // based on $F$ and the pressure $\widetilde{p}$ + // and dilatation $\widetilde{J}$, + // and at the end of the + // function include a physical check for + // internal consistency: void update_material_data(const Tensor<2, dim> & F, - const double p_tilde_in, - const double J_tilde_in) - { - det_F = determinant(F); - b_bar = std::pow(det_F, -2.0 / 3.0) * symmetrize(F * transpose(F)); - p_tilde = p_tilde_in; - J_tilde = J_tilde_in; - - Assert(det_F > 0, ExcInternalError()); - } - - // The second function determines the - // Kirchhoff stress $\boldsymbol{\tau} - // = \boldsymbol{\tau}_{\textrm{iso}} + - // \boldsymbol{\tau}_{\textrm{vol}}$ + const double p_tilde_in, + const double J_tilde_in) + { + det_F = determinant(F); + b_bar = std::pow(det_F, -2.0 / 3.0) * symmetrize(F * transpose(F)); + p_tilde = p_tilde_in; + J_tilde = J_tilde_in; + + Assert(det_F > 0, ExcInternalError()); + } + + // The second function determines the + // Kirchhoff stress $\boldsymbol{\tau} + // = \boldsymbol{\tau}_{\textrm{iso}} + + // \boldsymbol{\tau}_{\textrm{vol}}$ SymmetricTensor<2, dim> get_tau() - { - return get_tau_iso() + get_tau_vol(); - } - - // The fourth-order elasticity tensor - // in the spatial setting - // $\mathfrak{c}$ is calculated from - // the SEF $\Psi$ as $ J - // \mathfrak{c}_{ijkl} = F_{iA} F_{jB} - // \mathfrak{C}_{ABCD} F_{kC} F_{lD}$ - // where $ \mathfrak{C} = 4 - // \frac{\partial^2 - // \Psi(\mathbf{C})}{\partial - // \mathbf{C} \partial \mathbf{C}}$ + { + return get_tau_iso() + get_tau_vol(); + } + + // The fourth-order elasticity tensor + // in the spatial setting + // $\mathfrak{c}$ is calculated from + // the SEF $\Psi$ as $ J + // \mathfrak{c}_{ijkl} = F_{iA} F_{jB} + // \mathfrak{C}_{ABCD} F_{kC} F_{lD}$ + // where $ \mathfrak{C} = 4 + // \frac{\partial^2 + // \Psi(\mathbf{C})}{\partial + // \mathbf{C} \partial \mathbf{C}}$ SymmetricTensor<4, dim> get_Jc() const - { - return get_Jc_vol() + get_Jc_iso(); - } - - // Derivative of the volumetric free - // energy with respect to $\widetilde{J}$ return - // $\frac{\partial - // \Psi_{\text{vol}}(\widetilde{J})}{\partial - // \widetilde{J}}$ + { + return get_Jc_vol() + get_Jc_iso(); + } + + // Derivative of the volumetric free + // energy with respect to $\widetilde{J}$ return + // $\frac{\partial + // \Psi_{\text{vol}}(\widetilde{J})}{\partial + // \widetilde{J}}$ double get_dPsi_vol_dJ() const - { - return (kappa / 2.0) * (J_tilde - 1.0 / J_tilde); - } - - // Second derivative of the volumetric - // free energy wrt $\widetilde{J}$. We - // need the following computation - // explicitly in the tangent so we make - // it public. We calculate - // $\frac{\partial^2 - // \Psi_{\textrm{vol}}(\widetilde{J})}{\partial - // \widetilde{J} \partial - // \widetilde{J}}$ + { + return (kappa / 2.0) * (J_tilde - 1.0 / J_tilde); + } + + // Second derivative of the volumetric + // free energy wrt $\widetilde{J}$. We + // need the following computation + // explicitly in the tangent so we make + // it public. We calculate + // $\frac{\partial^2 + // \Psi_{\textrm{vol}}(\widetilde{J})}{\partial + // \widetilde{J} \partial + // \widetilde{J}}$ double get_d2Psi_vol_dJ2() const - { - return ( (kappa / 2.0) * (1.0 + 1.0 / (J_tilde * J_tilde))); - } + { + return ( (kappa / 2.0) * (1.0 + 1.0 / (J_tilde * J_tilde))); + } - // The next few functions return - // various data that we choose to store - // with the material: + // The next few functions return + // various data that we choose to store + // with the material: double get_det_F() const - { - return det_F; - } + { + return det_F; + } double get_p_tilde() const - { - return p_tilde; - } + { + return p_tilde; + } double get_J_tilde() const - { - return J_tilde; - } + { + return J_tilde; + } protected: - // Define constitutive model paramaters - // $\kappa$ (bulk modulus) - // and the neo-Hookean model - // parameter $c_1$: + // Define constitutive model paramaters + // $\kappa$ (bulk modulus) + // and the neo-Hookean model + // parameter $c_1$: const double kappa; const double c_1; - // Model specific data that is - // convenient to store with the - // material: + // Model specific data that is + // convenient to store with the + // material: double det_F; double p_tilde; double J_tilde; SymmetricTensor<2, dim> b_bar; - // The following functions are used - // internally in determining the result - // of some of the public functions - // above. The first one determines the - // volumetric Kirchhoff stress - // $\boldsymbol{\tau}_{\textrm{vol}}$: + // The following functions are used + // internally in determining the result + // of some of the public functions + // above. The first one determines the + // volumetric Kirchhoff stress + // $\boldsymbol{\tau}_{\textrm{vol}}$: SymmetricTensor<2, dim> get_tau_vol() const - { - return p_tilde * det_F * AdditionalTools::StandardTensors::I; - } - - // Next, determine the isochoric - // Kirchhoff stress - // $\boldsymbol{\tau}_{\textrm{iso}} = - // \mathcal{P}:\overline{\boldsymbol{\tau}}$: + { + return p_tilde * det_F * AdditionalTools::StandardTensors::I; + } + + // Next, determine the isochoric + // Kirchhoff stress + // $\boldsymbol{\tau}_{\textrm{iso}} = + // \mathcal{P}:\overline{\boldsymbol{\tau}}$: SymmetricTensor<2, dim> get_tau_iso() const - { - return AdditionalTools::StandardTensors::dev_P * get_tau_bar(); - } + { + return AdditionalTools::StandardTensors::dev_P * get_tau_bar(); + } - // Then, determine the fictitious - // Kirchhoff stress - // $\overline{\boldsymbol{\tau}}$: + // Then, determine the fictitious + // Kirchhoff stress + // $\overline{\boldsymbol{\tau}}$: SymmetricTensor<2, dim> get_tau_bar() const - { - return 2.0 * c_1 * b_bar; - } + { + return 2.0 * c_1 * b_bar; + } - // Calculate the volumetric part of the - // tangent $J - // \mathfrak{c}_\textrm{vol}$: + // Calculate the volumetric part of the + // tangent $J + // \mathfrak{c}_\textrm{vol}$: SymmetricTensor<4, dim> get_Jc_vol() const - { + { - return p_tilde * det_F - * ( AdditionalTools::StandardTensors::IxI - - (2.0 * AdditionalTools::StandardTensors::II) ); - } + return p_tilde * det_F + * ( AdditionalTools::StandardTensors::IxI + - (2.0 * AdditionalTools::StandardTensors::II) ); + } - // Calculate the isochoric part of the - // tangent $J - // \mathfrak{c}_\textrm{iso}$: + // Calculate the isochoric part of the + // tangent $J + // \mathfrak{c}_\textrm{iso}$: SymmetricTensor<4, dim> get_Jc_iso() const - { - const SymmetricTensor<2, dim> tau_bar = get_tau_bar(); - const SymmetricTensor<2, dim> tau_iso = get_tau_iso(); - const SymmetricTensor<4, dim> tau_iso_x_I - = outer_product(tau_iso, - AdditionalTools::StandardTensors::I); - const SymmetricTensor<4, dim> I_x_tau_iso - = outer_product(AdditionalTools::StandardTensors::I, - tau_iso); - const SymmetricTensor<4, dim> c_bar = get_c_bar(); - - return (2.0 / 3.0) * trace(tau_bar) - * AdditionalTools::StandardTensors::dev_P - - (2.0 / 3.0) * (tau_iso_x_I + I_x_tau_iso) - + AdditionalTools::StandardTensors::dev_P * c_bar - * AdditionalTools::StandardTensors::dev_P; - } - - // Calculate the fictitious elasticity - // tensor $\overline{\mathfrak{c}}$. - // For the material model chosen this - // is simply zero: + { + const SymmetricTensor<2, dim> tau_bar = get_tau_bar(); + const SymmetricTensor<2, dim> tau_iso = get_tau_iso(); + const SymmetricTensor<4, dim> tau_iso_x_I + = outer_product(tau_iso, + AdditionalTools::StandardTensors::I); + const SymmetricTensor<4, dim> I_x_tau_iso + = outer_product(AdditionalTools::StandardTensors::I, + tau_iso); + const SymmetricTensor<4, dim> c_bar = get_c_bar(); + + return (2.0 / 3.0) * trace(tau_bar) + * AdditionalTools::StandardTensors::dev_P + - (2.0 / 3.0) * (tau_iso_x_I + I_x_tau_iso) + + AdditionalTools::StandardTensors::dev_P * c_bar + * AdditionalTools::StandardTensors::dev_P; + } + + // Calculate the fictitious elasticity + // tensor $\overline{\mathfrak{c}}$. + // For the material model chosen this + // is simply zero: SymmetricTensor<4, dim> get_c_bar() const - { - return SymmetricTensor<4, dim>(); - } + { + return SymmetricTensor<4, dim>(); + } }; // @sect3{Quadrature point history} @@ -848,157 +848,157 @@ namespace Step44 { public: PointHistory() - : - material(NULL), - F_inv(AdditionalTools::StandardTensors::I), - tau(SymmetricTensor<2, dim>()), - d2Psi_vol_dJ2(0.0), - dPsi_vol_dJ(0.0), - Jc(SymmetricTensor<4, dim>()) - {} + : + material(NULL), + F_inv(AdditionalTools::StandardTensors::I), + tau(SymmetricTensor<2, dim>()), + d2Psi_vol_dJ2(0.0), + dPsi_vol_dJ(0.0), + Jc(SymmetricTensor<4, dim>()) + {} virtual ~PointHistory() - { - delete material; - material = NULL; - } - - // The first function is used to create - // a material object and to initialize - // all tensors correctly: - // The second one updates the stored - // values and stresses based on the + { + delete material; + material = NULL; + } + + // The first function is used to create + // a material object and to initialize + // all tensors correctly: + // The second one updates the stored + // values and stresses based on the // current deformation measure // $\textrm{Grad}\mathbf{u}_{\textrm{n}}$, // pressure $\widetilde{p}$ and // dilation $\widetilde{J}$ field // values. void setup_lqp (const Parameters::AllParameters & parameters) - { - material = new Material_Compressible_Neo_Hook_Three_Field(parameters.mu, - parameters.nu); - update_values(Tensor<2, dim>(), 0.0, 1.0); - } - - // To this end, we calculate the - // deformation gradient $\mathbf{F}$ - // from the displacement gradient - // $\textrm{Grad}\ \mathbf{u}$, i.e. - // $\mathbf{F}(\mathbf{u}) = \mathbf{I} - // + \textrm{Grad}\ \mathbf{u}$ and - // then let the material model - // associated with this quadrature - // point update itself. When computing - // the deformation gradient, we have to - // take care with which data types we - // compare the sum $\mathbf{I} + - // \textrm{Grad}\ \mathbf{u}$: Since - // $I$ has data type SymmetricTensor, - // just writing I + - // Grad_u_n would convert the - // second argument to a symmetric - // tensor, perform the sum, and then - // cast the result to a Tensor (i.e., - // the type of a possibly non-symmetric - // tensor). However, since - // Grad_u_n is - // nonsymmetric in general, the - // conversion to SymmetricTensor will - // fail. We can avoid this back and - // forth by converting $I$ to Tensor - // first, and then performing the - // addition as between non-symmetric - // tensors: + { + material = new Material_Compressible_Neo_Hook_Three_Field(parameters.mu, + parameters.nu); + update_values(Tensor<2, dim>(), 0.0, 1.0); + } + + // To this end, we calculate the + // deformation gradient $\mathbf{F}$ + // from the displacement gradient + // $\textrm{Grad}\ \mathbf{u}$, i.e. + // $\mathbf{F}(\mathbf{u}) = \mathbf{I} + // + \textrm{Grad}\ \mathbf{u}$ and + // then let the material model + // associated with this quadrature + // point update itself. When computing + // the deformation gradient, we have to + // take care with which data types we + // compare the sum $\mathbf{I} + + // \textrm{Grad}\ \mathbf{u}$: Since + // $I$ has data type SymmetricTensor, + // just writing I + + // Grad_u_n would convert the + // second argument to a symmetric + // tensor, perform the sum, and then + // cast the result to a Tensor (i.e., + // the type of a possibly non-symmetric + // tensor). However, since + // Grad_u_n is + // nonsymmetric in general, the + // conversion to SymmetricTensor will + // fail. We can avoid this back and + // forth by converting $I$ to Tensor + // first, and then performing the + // addition as between non-symmetric + // tensors: void update_values (const Tensor<2, dim> & Grad_u_n, - const double p_tilde, - const double J_tilde) - { - const Tensor<2, dim> F - = (Tensor<2, dim>(AdditionalTools::StandardTensors::I) + - Grad_u_n); - material->update_material_data(F, p_tilde, J_tilde); - - // The material has been updated so - // we now calculate the Kirchhoff - // stress $\mathbf{\tau}$, the - // tangent $J\mathfrak{c}$ - // and the first and second derivatives - // of the volumetric free energy. - // - // We also store the inverse of - // the deformation gradient since - // we frequently use it: - F_inv = invert(F); - tau = material->get_tau(); - Jc = material->get_Jc(); - dPsi_vol_dJ = material->get_dPsi_vol_dJ(); - d2Psi_vol_dJ2 = material->get_d2Psi_vol_dJ2(); - - } - - // We offer an interface to retrieve - // certain data. Here are the - // kinematic variables: + const double p_tilde, + const double J_tilde) + { + const Tensor<2, dim> F + = (Tensor<2, dim>(AdditionalTools::StandardTensors::I) + + Grad_u_n); + material->update_material_data(F, p_tilde, J_tilde); + + // The material has been updated so + // we now calculate the Kirchhoff + // stress $\mathbf{\tau}$, the + // tangent $J\mathfrak{c}$ + // and the first and second derivatives + // of the volumetric free energy. + // + // We also store the inverse of + // the deformation gradient since + // we frequently use it: + F_inv = invert(F); + tau = material->get_tau(); + Jc = material->get_Jc(); + dPsi_vol_dJ = material->get_dPsi_vol_dJ(); + d2Psi_vol_dJ2 = material->get_d2Psi_vol_dJ2(); + + } + + // We offer an interface to retrieve + // certain data. Here are the + // kinematic variables: double get_J_tilde() const - { - return material->get_J_tilde(); - } + { + return material->get_J_tilde(); + } double get_det_F() const - { - return material->get_det_F(); - } + { + return material->get_det_F(); + } const Tensor<2, dim>& get_F_inv() const - { - return F_inv; - } - - // ...and the kinetic variables. These - // are used in the material and global - // tangent matrix and residual assembly - // operations: + { + return F_inv; + } + + // ...and the kinetic variables. These + // are used in the material and global + // tangent matrix and residual assembly + // operations: double get_p_tilde() const - { - return material->get_p_tilde(); - } + { + return material->get_p_tilde(); + } const SymmetricTensor<2, dim>& get_tau() const - { - return tau; - } + { + return tau; + } double get_dPsi_vol_dJ() const - { - return dPsi_vol_dJ; - } + { + return dPsi_vol_dJ; + } double get_d2Psi_vol_dJ2() const - { - return d2Psi_vol_dJ2; - } + { + return d2Psi_vol_dJ2; + } - // and finally the tangent + // and finally the tangent const SymmetricTensor<4, dim>& get_Jc() const - { - return Jc; - } - - // In terms of member functions, this - // class stores for the quadrature - // point it represents a copy of a - // material type in case different - // materials are used in different - // regions of the domain, as well as - // the inverse of the deformation - // gradient... + { + return Jc; + } + + // In terms of member functions, this + // class stores for the quadrature + // point it represents a copy of a + // material type in case different + // materials are used in different + // regions of the domain, as well as + // the inverse of the deformation + // gradient... private: Material_Compressible_Neo_Hook_Three_Field* material; Tensor<2, dim> F_inv; - // ... and stress-type variables along - // with the tangent $J\mathfrak{c}$: + // ... and stress-type variables along + // with the tangent $J\mathfrak{c}$: SymmetricTensor<2, dim> tau; double d2Psi_vol_dJ2; double dPsi_vol_dJ; @@ -1027,19 +1027,19 @@ namespace Step44 private: - // In the private section of this - // class, we first forward declare a - // number of objects that are used in - // parallelizing work using the - // WorkStream object (see the @ref - // threads module for more information - // on this). - // - // We declare such structures for the - // computation of tangent (stiffness) - // matrix, right hand side, static - // condensation, and for updating - // quadrature points: + // In the private section of this + // class, we first forward declare a + // number of objects that are used in + // parallelizing work using the + // WorkStream object (see the @ref + // threads module for more information + // on this). + // + // We declare such structures for the + // computation of tangent (stiffness) + // matrix, right hand side, static + // condensation, and for updating + // quadrature points: struct PerTaskData_K; struct ScratchData_K; @@ -1052,36 +1052,36 @@ namespace Step44 struct PerTaskData_UQPH; struct ScratchData_UQPH; - // We start the collection of member - // functions with one that builds the - // grid: + // We start the collection of member + // functions with one that builds the + // grid: void make_grid(); - // Set up the finite element system to - // be solved: + // Set up the finite element system to + // be solved: void system_setup(); void determine_component_extractors(); - // Several functions to assemble the - // system and right hand side matrices - // using multi-threading. Each of them - // comes as a wrapper function, one - // that is executed to do the work in - // the WorkStream model on one cell, - // and one that copies the work done on - // this one cell into the global object - // that represents it: + // Several functions to assemble the + // system and right hand side matrices + // using multi-threading. Each of them + // comes as a wrapper function, one + // that is executed to do the work in + // the WorkStream model on one cell, + // and one that copies the work done on + // this one cell into the global object + // that represents it: void assemble_system_tangent(); void assemble_system_tangent_one_cell(const typename DoFHandler::active_cell_iterator & cell, - ScratchData_K & scratch, - PerTaskData_K & data); + ScratchData_K & scratch, + PerTaskData_K & data); void copy_local_to_global_K(const PerTaskData_K & data); @@ -1091,8 +1091,8 @@ namespace Step44 void assemble_system_rhs_one_cell(const typename DoFHandler::active_cell_iterator & cell, - ScratchData_RHS & scratch, - PerTaskData_RHS & data); + ScratchData_RHS & scratch, + PerTaskData_RHS & data); void copy_local_to_global_rhs(const PerTaskData_RHS & data); @@ -1102,22 +1102,22 @@ namespace Step44 void assemble_sc_one_cell(const typename DoFHandler::active_cell_iterator & cell, - ScratchData_SC & scratch, - PerTaskData_SC & data); + ScratchData_SC & scratch, + PerTaskData_SC & data); void copy_local_to_global_sc(const PerTaskData_SC & data); - // Apply Dirichlet boundary conditions on - // the displacement field + // Apply Dirichlet boundary conditions on + // the displacement field void make_constraints(const int & it_nr); - // Create and update the quadrature - // points. Here, no data needs to be - // copied into a global object, so the - // copy_local_to_global function is - // empty: + // Create and update the quadrature + // points. Here, no data needs to be + // copied into a global object, so the + // copy_local_to_global function is + // empty: void setup_qph(); @@ -1126,66 +1126,66 @@ namespace Step44 void update_qph_incremental_one_cell(const typename DoFHandler::active_cell_iterator & cell, - ScratchData_UQPH & scratch, - PerTaskData_UQPH & data); + ScratchData_UQPH & scratch, + PerTaskData_UQPH & data); void copy_local_to_global_UQPH(const PerTaskData_UQPH & data) - {} + {} - // Solve for the displacement using a - // Newton-Raphson method. We break this - // function into the nonlinear loop and - // the function that solves the - // linearized Newton-Raphson step: + // Solve for the displacement using a + // Newton-Raphson method. We break this + // function into the nonlinear loop and + // the function that solves the + // linearized Newton-Raphson step: void solve_nonlinear_timestep(BlockVector & solution_delta); std::pair solve_linear_system(BlockVector & newton_update); - // Solution retrieval as well as - // post-processing and writing data to - // file: + // Solution retrieval as well as + // post-processing and writing data to + // file: BlockVector get_total_solution(const BlockVector & solution_delta) const; void output_results() const; - // Finally, some member variables that - // describe the current state: A - // collection of the parameters used to - // describe the problem setup... + // Finally, some member variables that + // describe the current state: A + // collection of the parameters used to + // describe the problem setup... Parameters::AllParameters parameters; - // ...the volume of the reference and - // current configurations... + // ...the volume of the reference and + // current configurations... double vol_reference; double vol_current; - // ...and description of the geometry on which - // the problem is solved: + // ...and description of the geometry on which + // the problem is solved: Triangulation triangulation; - // Also, keep track of the current time and the - // time spent evaluating certain - // functions + // Also, keep track of the current time and the + // time spent evaluating certain + // functions Time time; TimerOutput timer; - // A storage object for quadrature point - // information. See step-18 for more on - // this: + // A storage object for quadrature point + // information. See step-18 for more on + // this: std::vector > quadrature_point_history; - // A description of the finite-element - // system including the displacement - // polynomial degree, the - // degree-of-freedom handler, number of - // dof's per cell and the extractor - // objects used to retrieve information - // from the solution vectors: + // A description of the finite-element + // system including the displacement + // polynomial degree, the + // degree-of-freedom handler, number of + // dof's per cell and the extractor + // objects used to retrieve information + // from the solution vectors: const unsigned int degree; const FESystem fe; DoFHandler dof_handler_ref; @@ -1194,12 +1194,12 @@ namespace Step44 const FEValuesExtractors::Scalar p_fe; const FEValuesExtractors::Scalar J_fe; - // Description of how the block-system is - // arranged. There are 3 blocks, the first - // contains a vector DOF $\mathbf{u}$ - // while the other two describe scalar - // DOFs, $\widetilde{p}$ and - // $\widetilde{J}$. + // Description of how the block-system is + // arranged. There are 3 blocks, the first + // contains a vector DOF $\mathbf{u}$ + // while the other two describe scalar + // DOFs, $\widetilde{p}$ and + // $\widetilde{J}$. static const unsigned int n_blocks = 3; static const unsigned int n_components = dim + 2; static const unsigned int first_u_component = 0; @@ -1208,9 +1208,9 @@ namespace Step44 enum { - u_dof = 0, - p_dof = 1, - J_dof = 2 + u_dof = 0, + p_dof = 1, + J_dof = 2 }; std::vector dofs_per_block; @@ -1218,76 +1218,76 @@ namespace Step44 std::vector element_indices_p; std::vector element_indices_J; - // Rules for Gauss-quadrature on both the - // cell and faces. The number of - // quadrature points on both cells and - // faces is recorded. + // Rules for Gauss-quadrature on both the + // cell and faces. The number of + // quadrature points on both cells and + // faces is recorded. const QGauss qf_cell; const QGauss qf_face; const unsigned int n_q_points; const unsigned int n_q_points_f; - // Objects that store the converged - // solution and right-hand side vectors, - // as well as the tangent matrix. There - // is a ConstraintMatrix object used to - // keep track of constraints. We make - // use of a sparsity pattern designed for - // a block system. + // Objects that store the converged + // solution and right-hand side vectors, + // as well as the tangent matrix. There + // is a ConstraintMatrix object used to + // keep track of constraints. We make + // use of a sparsity pattern designed for + // a block system. ConstraintMatrix constraints; BlockSparsityPattern sparsity_pattern; BlockSparseMatrix tangent_matrix; BlockVector system_rhs; BlockVector solution_n; - // Then define a number of variables to - // store norms and update norms and - // normalisation factors. + // Then define a number of variables to + // store norms and update norms and + // normalisation factors. struct Errors { - Errors() - : - norm(1.0), u(1.0), p(1.0), J(1.0) - {} - - void reset() - { - norm = 1.0; - u = 1.0; - p = 1.0; - J = 1.0; - } - void normalise(const Errors & rhs) - { - if (rhs.norm != 0.0) - norm /= rhs.norm; - if (rhs.u != 0.0) - u /= rhs.u; - if (rhs.p != 0.0) - p /= rhs.p; - if (rhs.J != 0.0) - J /= rhs.J; - } - - double norm, u, p, J; + Errors() + : + norm(1.0), u(1.0), p(1.0), J(1.0) + {} + + void reset() + { + norm = 1.0; + u = 1.0; + p = 1.0; + J = 1.0; + } + void normalise(const Errors & rhs) + { + if (rhs.norm != 0.0) + norm /= rhs.norm; + if (rhs.u != 0.0) + u /= rhs.u; + if (rhs.p != 0.0) + p /= rhs.p; + if (rhs.J != 0.0) + J /= rhs.J; + } + + double norm, u, p, J; }; Errors error_residual, error_residual_0, error_residual_norm, error_update, error_update_0, error_update_norm; - // Methods to calculate error measures + // Methods to calculate error measures void get_error_residual(Errors & error_residual); void get_error_update(const BlockVector & newton_update, - Errors & error_update); + Errors & error_update); std::pair get_error_dilation(); - // Print information to screen - // in a pleasing way... + // Print information to screen + // in a pleasing way... static void print_conv_header(); @@ -1303,49 +1303,49 @@ namespace Step44 // from the parameter file. template Solid::Solid(const std::string & input_file) - : - parameters(input_file), - triangulation(Triangulation::maximum_smoothing), - time(parameters.end_time, parameters.delta_t), - timer(std::cout, - TimerOutput::summary, - TimerOutput::wall_times), - degree(parameters.poly_degree), - // The Finite Element - // System is composed of - // dim continuous - // displacement DOFs, and - // discontinuous pressure - // and dilatation DOFs. In - // an attempt to satisfy - // the Babuska-Brezzi or LBB stability - // conditions (see Hughes (2000)), we - // setup a $Q_n \times - // DGPM_{n-1} \times DGPM_{n-1}$ - // system. $Q_2 \times DGPM_1 - // \times DGPM_1$ elements - // satisfy this condition, - // while $Q_1 \times DGPM_0 - // \times DGPM_0$ elements do - // not. However, it has - // been shown that the - // latter demonstrate good - // convergence - // characteristics - // nonetheless. - fe(FE_Q(parameters.poly_degree), dim, // displacement - FE_DGPMonomial(parameters.poly_degree - 1), 1, // pressure - FE_DGPMonomial(parameters.poly_degree - 1), 1), // dilatation - dof_handler_ref(triangulation), - dofs_per_cell (fe.dofs_per_cell), - u_fe(first_u_component), - p_fe(p_component), - J_fe(J_component), - dofs_per_block(n_blocks), - qf_cell(parameters.quad_order), - qf_face(parameters.quad_order), - n_q_points (qf_cell.size()), - n_q_points_f (qf_face.size()) + : + parameters(input_file), + triangulation(Triangulation::maximum_smoothing), + time(parameters.end_time, parameters.delta_t), + timer(std::cout, + TimerOutput::summary, + TimerOutput::wall_times), + degree(parameters.poly_degree), + // The Finite Element + // System is composed of + // dim continuous + // displacement DOFs, and + // discontinuous pressure + // and dilatation DOFs. In + // an attempt to satisfy + // the Babuska-Brezzi or LBB stability + // conditions (see Hughes (2000)), we + // setup a $Q_n \times + // DGPM_{n-1} \times DGPM_{n-1}$ + // system. $Q_2 \times DGPM_1 + // \times DGPM_1$ elements + // satisfy this condition, + // while $Q_1 \times DGPM_0 + // \times DGPM_0$ elements do + // not. However, it has + // been shown that the + // latter demonstrate good + // convergence + // characteristics + // nonetheless. + fe(FE_Q(parameters.poly_degree), dim, // displacement + FE_DGPMonomial(parameters.poly_degree - 1), 1, // pressure + FE_DGPMonomial(parameters.poly_degree - 1), 1), // dilatation + dof_handler_ref(triangulation), + dofs_per_cell (fe.dofs_per_cell), + u_fe(first_u_component), + p_fe(p_component), + J_fe(J_component), + dofs_per_block(n_blocks), + qf_cell(parameters.quad_order), + qf_face(parameters.quad_order), + n_q_points (qf_cell.size()), + n_q_points_f (qf_face.size()) { determine_component_extractors(); } @@ -1405,33 +1405,33 @@ namespace Step44 output_results(); time.increment(); - // We then declare the incremental - // solution update $\varDelta - // \mathbf{\Xi}:= \{\varDelta - // \mathbf{u},\varDelta \widetilde{p}, - // \varDelta \widetilde{J} \}$ and start - // the loop over the time domain. - // - // At the beginning, we reset the solution update - // for this time step... + // We then declare the incremental + // solution update $\varDelta + // \mathbf{\Xi}:= \{\varDelta + // \mathbf{u},\varDelta \widetilde{p}, + // \varDelta \widetilde{J} \}$ and start + // the loop over the time domain. + // + // At the beginning, we reset the solution update + // for this time step... BlockVector solution_delta(dofs_per_block); while (time.current() < time.end()) { - solution_delta = 0.0; - - // ...solve the current time step and - // update total solution vector - // $\mathbf{\Xi}_{\textrm{n}} = - // \mathbf{\Xi}_{\textrm{n-1}} + - // \varDelta \mathbf{\Xi}$... - solve_nonlinear_timestep(solution_delta); - solution_n += solution_delta; - - // ...and plot the results before - // moving on happily to the next time - // step: - output_results(); - time.increment(); + solution_delta = 0.0; + + // ...solve the current time step and + // update total solution vector + // $\mathbf{\Xi}_{\textrm{n}} = + // \mathbf{\Xi}_{\textrm{n-1}} + + // \varDelta \mathbf{\Xi}$... + solve_nonlinear_timestep(solution_delta); + solution_n += solution_delta; + + // ...and plot the results before + // moving on happily to the next time + // step: + output_results(); + time.increment(); } } @@ -1457,15 +1457,15 @@ namespace Step44 std::vector local_dof_indices; PerTaskData_K(const unsigned int dofs_per_cell) - : - cell_matrix(dofs_per_cell, dofs_per_cell), - local_dof_indices(dofs_per_cell) - {} + : + cell_matrix(dofs_per_cell, dofs_per_cell), + local_dof_indices(dofs_per_cell) + {} void reset() - { - cell_matrix = 0.0; - } + { + cell_matrix = 0.0; + } }; @@ -1483,48 +1483,48 @@ namespace Step44 std::vector > > symm_grad_Nx; ScratchData_K(const FiniteElement & fe_cell, - const QGauss & qf_cell, - const UpdateFlags uf_cell) - : - fe_values_ref(fe_cell, qf_cell, uf_cell), - Nx(qf_cell.size(), - std::vector(fe_cell.dofs_per_cell)), - grad_Nx(qf_cell.size(), - std::vector >(fe_cell.dofs_per_cell)), - symm_grad_Nx(qf_cell.size(), - std::vector > - (fe_cell.dofs_per_cell)) - {} + const QGauss & qf_cell, + const UpdateFlags uf_cell) + : + fe_values_ref(fe_cell, qf_cell, uf_cell), + Nx(qf_cell.size(), + std::vector(fe_cell.dofs_per_cell)), + grad_Nx(qf_cell.size(), + std::vector >(fe_cell.dofs_per_cell)), + symm_grad_Nx(qf_cell.size(), + std::vector > + (fe_cell.dofs_per_cell)) + {} ScratchData_K(const ScratchData_K & rhs) - : - fe_values_ref(rhs.fe_values_ref.get_fe(), - rhs.fe_values_ref.get_quadrature(), - rhs.fe_values_ref.get_update_flags()), - Nx(rhs.Nx), - grad_Nx(rhs.grad_Nx), - symm_grad_Nx(rhs.symm_grad_Nx) - {} + : + fe_values_ref(rhs.fe_values_ref.get_fe(), + rhs.fe_values_ref.get_quadrature(), + rhs.fe_values_ref.get_update_flags()), + Nx(rhs.Nx), + grad_Nx(rhs.grad_Nx), + symm_grad_Nx(rhs.symm_grad_Nx) + {} void reset() - { - const unsigned int n_q_points = Nx.size(); - const unsigned int n_dofs_per_cell = Nx[0].size(); - for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) - { - Assert( Nx[q_point].size() == n_dofs_per_cell, ExcInternalError()); - Assert( grad_Nx[q_point].size() == n_dofs_per_cell, - ExcInternalError()); - Assert( symm_grad_Nx[q_point].size() == n_dofs_per_cell, - ExcInternalError()); - for (unsigned int k = 0; k < n_dofs_per_cell; ++k) - { - Nx[q_point][k] = 0.0; - grad_Nx[q_point][k] = 0.0; - symm_grad_Nx[q_point][k] = 0.0; - } - } - } + { + const unsigned int n_q_points = Nx.size(); + const unsigned int n_dofs_per_cell = Nx[0].size(); + for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) + { + Assert( Nx[q_point].size() == n_dofs_per_cell, ExcInternalError()); + Assert( grad_Nx[q_point].size() == n_dofs_per_cell, + ExcInternalError()); + Assert( symm_grad_Nx[q_point].size() == n_dofs_per_cell, + ExcInternalError()); + for (unsigned int k = 0; k < n_dofs_per_cell; ++k) + { + Nx[q_point][k] = 0.0; + grad_Nx[q_point][k] = 0.0; + symm_grad_Nx[q_point][k] = 0.0; + } + } + } }; @@ -1540,15 +1540,15 @@ namespace Step44 std::vector local_dof_indices; PerTaskData_RHS(const unsigned int dofs_per_cell) - : - cell_rhs(dofs_per_cell), - local_dof_indices(dofs_per_cell) - {} + : + cell_rhs(dofs_per_cell), + local_dof_indices(dofs_per_cell) + {} void reset() - { - cell_rhs = 0.0; - } + { + cell_rhs = 0.0; + } }; @@ -1562,46 +1562,46 @@ namespace Step44 std::vector > > symm_grad_Nx; ScratchData_RHS(const FiniteElement & fe_cell, - const QGauss & qf_cell, const UpdateFlags uf_cell, - const QGauss & qf_face, const UpdateFlags uf_face) - : - fe_values_ref(fe_cell, qf_cell, uf_cell), - fe_face_values_ref(fe_cell, qf_face, uf_face), - Nx(qf_cell.size(), - std::vector(fe_cell.dofs_per_cell)), - symm_grad_Nx(qf_cell.size(), - std::vector > - (fe_cell.dofs_per_cell)) - {} + const QGauss & qf_cell, const UpdateFlags uf_cell, + const QGauss & qf_face, const UpdateFlags uf_face) + : + fe_values_ref(fe_cell, qf_cell, uf_cell), + fe_face_values_ref(fe_cell, qf_face, uf_face), + Nx(qf_cell.size(), + std::vector(fe_cell.dofs_per_cell)), + symm_grad_Nx(qf_cell.size(), + std::vector > + (fe_cell.dofs_per_cell)) + {} ScratchData_RHS(const ScratchData_RHS & rhs) - : - fe_values_ref(rhs.fe_values_ref.get_fe(), - rhs.fe_values_ref.get_quadrature(), - rhs.fe_values_ref.get_update_flags()), - fe_face_values_ref(rhs.fe_face_values_ref.get_fe(), - rhs.fe_face_values_ref.get_quadrature(), - rhs.fe_face_values_ref.get_update_flags()), - Nx(rhs.Nx), - symm_grad_Nx(rhs.symm_grad_Nx) - {} + : + fe_values_ref(rhs.fe_values_ref.get_fe(), + rhs.fe_values_ref.get_quadrature(), + rhs.fe_values_ref.get_update_flags()), + fe_face_values_ref(rhs.fe_face_values_ref.get_fe(), + rhs.fe_face_values_ref.get_quadrature(), + rhs.fe_face_values_ref.get_update_flags()), + Nx(rhs.Nx), + symm_grad_Nx(rhs.symm_grad_Nx) + {} void reset() - { - const unsigned int n_q_points = Nx.size(); - const unsigned int n_dofs_per_cell = Nx[0].size(); - for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) - { - Assert( Nx[q_point].size() == n_dofs_per_cell, ExcInternalError()); - Assert( symm_grad_Nx[q_point].size() == n_dofs_per_cell, - ExcInternalError()); - for (unsigned int k = 0; k < n_dofs_per_cell; ++k) - { - Nx[q_point][k] = 0.0; - symm_grad_Nx[q_point][k] = 0.0; - } - } - } + { + const unsigned int n_q_points = Nx.size(); + const unsigned int n_dofs_per_cell = Nx[0].size(); + for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) + { + Assert( Nx[q_point].size() == n_dofs_per_cell, ExcInternalError()); + Assert( symm_grad_Nx[q_point].size() == n_dofs_per_cell, + ExcInternalError()); + for (unsigned int k = 0; k < n_dofs_per_cell; ++k) + { + Nx[q_point][k] = 0.0; + symm_grad_Nx[q_point][k] = 0.0; + } + } + } }; @@ -1632,25 +1632,25 @@ namespace Step44 FullMatrix C; PerTaskData_SC(const unsigned int dofs_per_cell, - const unsigned int n_u, - const unsigned int n_p, - const unsigned int n_J) - : - cell_matrix(dofs_per_cell, dofs_per_cell), - local_dof_indices(dofs_per_cell), - k_orig(dofs_per_cell, dofs_per_cell), - k_pu(n_p, n_u), - k_pJ(n_p, n_J), - k_JJ(n_J, n_J), - k_pJ_inv(n_p, n_J), - k_bbar(n_u, n_u), - A(n_J,n_u), - B(n_J, n_u), - C(n_p, n_u) - {} + const unsigned int n_u, + const unsigned int n_p, + const unsigned int n_J) + : + cell_matrix(dofs_per_cell, dofs_per_cell), + local_dof_indices(dofs_per_cell), + k_orig(dofs_per_cell, dofs_per_cell), + k_pu(n_p, n_u), + k_pJ(n_p, n_J), + k_JJ(n_J, n_J), + k_pJ_inv(n_p, n_J), + k_bbar(n_u, n_u), + A(n_J,n_u), + B(n_J, n_u), + C(n_p, n_u) + {} void reset() - {} + {} }; @@ -1662,7 +1662,7 @@ namespace Step44 struct Solid::ScratchData_SC { void reset() - {} + {} }; @@ -1685,7 +1685,7 @@ namespace Step44 struct Solid::PerTaskData_UQPH { void reset() - {} + {} }; @@ -1705,38 +1705,38 @@ namespace Step44 FEValues fe_values_ref; ScratchData_UQPH(const FiniteElement & fe_cell, - const QGauss & qf_cell, - const UpdateFlags uf_cell, - const BlockVector & solution_total) - : - solution_total(solution_total), - solution_grads_u_total(qf_cell.size()), - solution_values_p_total(qf_cell.size()), - solution_values_J_total(qf_cell.size()), - fe_values_ref(fe_cell, qf_cell, uf_cell) - {} + const QGauss & qf_cell, + const UpdateFlags uf_cell, + const BlockVector & solution_total) + : + solution_total(solution_total), + solution_grads_u_total(qf_cell.size()), + solution_values_p_total(qf_cell.size()), + solution_values_J_total(qf_cell.size()), + fe_values_ref(fe_cell, qf_cell, uf_cell) + {} ScratchData_UQPH(const ScratchData_UQPH & rhs) - : - solution_total(rhs.solution_total), - solution_grads_u_total(rhs.solution_grads_u_total), - solution_values_p_total(rhs.solution_values_p_total), - solution_values_J_total(rhs.solution_values_J_total), - fe_values_ref(rhs.fe_values_ref.get_fe(), - rhs.fe_values_ref.get_quadrature(), - rhs.fe_values_ref.get_update_flags()) - {} + : + solution_total(rhs.solution_total), + solution_grads_u_total(rhs.solution_grads_u_total), + solution_values_p_total(rhs.solution_values_p_total), + solution_values_J_total(rhs.solution_values_J_total), + fe_values_ref(rhs.fe_values_ref.get_fe(), + rhs.fe_values_ref.get_quadrature(), + rhs.fe_values_ref.get_update_flags()) + {} void reset() - { - const unsigned int n_q_points = solution_grads_u_total.size(); - for (unsigned int q = 0; q < n_q_points; ++q) - { - solution_grads_u_total[q] = 0.0; - solution_values_p_total[q] = 0.0; - solution_values_J_total[q] = 0.0; - } - } + { + const unsigned int n_q_points = solution_grads_u_total.size(); + for (unsigned int q = 0; q < n_q_points; ++q) + { + solution_grads_u_total[q] = 0.0; + solution_values_p_total[q] = 0.0; + solution_values_J_total[q] = 0.0; + } + } }; @@ -1753,9 +1753,9 @@ namespace Step44 void Solid::make_grid() { GridGenerator::hyper_rectangle(triangulation, - Point(0.0, 0.0, 0.0), - Point(1.0, 1.0, 1.0), - true); + Point(0.0, 0.0, 0.0), + Point(1.0, 1.0, 1.0), + true); GridTools::scale(parameters.scale, triangulation); triangulation.refine_global(std::max (1U, parameters.global_refinement)); @@ -1763,28 +1763,28 @@ namespace Step44 vol_current = vol_reference; std::cout << "Grid:\n\t Reference volume: " << vol_reference << std::endl; - // Since we wish to apply a Neumann BC to - // a patch on the top surface, we must - // find the cell faces in this part of - // the domain and mark them with a - // distinct boundary ID number. The - // faces we are looking for are on the +y - // surface and will get boundary ID 6 - // (zero through five are already used - // when creating the six faces of the - // cube domain): + // Since we wish to apply a Neumann BC to + // a patch on the top surface, we must + // find the cell faces in this part of + // the domain and mark them with a + // distinct boundary ID number. The + // faces we are looking for are on the +y + // surface and will get boundary ID 6 + // (zero through five are already used + // when creating the six faces of the + // cube domain): typename Triangulation::active_cell_iterator cell = triangulation.begin_active(), endc = triangulation.end(); for (; cell != endc; ++cell) for (unsigned int face = 0; - face < GeometryInfo::faces_per_cell; ++face) - if (cell->face(face)->at_boundary() == true - && - cell->face(face)->center()[2] == 1.0 * parameters.scale) - if (cell->face(face)->center()[0] < 0.5 * parameters.scale - && - cell->face(face)->center()[1] < 0.5 * parameters.scale) - cell->face(face)->set_boundary_indicator(6); + face < GeometryInfo::faces_per_cell; ++face) + if (cell->face(face)->at_boundary() == true + && + cell->face(face)->center()[2] == 1.0 * parameters.scale) + if (cell->face(face)->center()[0] < 0.5 * parameters.scale + && + cell->face(face)->center()[1] < 0.5 * parameters.scale) + cell->face(face)->set_boundary_indicator(6); } @@ -1803,22 +1803,22 @@ namespace Step44 block_component[p_component] = p_dof; // Pressure block_component[J_component] = J_dof; // Dilatation - // The DOF handler is then initialised and we - // renumber the grid in an efficient - // manner. We also record the number of - // DOF's per block. + // The DOF handler is then initialised and we + // renumber the grid in an efficient + // manner. We also record the number of + // DOF's per block. dof_handler_ref.distribute_dofs(fe); DoFRenumbering::Cuthill_McKee(dof_handler_ref); DoFRenumbering::component_wise(dof_handler_ref, block_component); DoFTools::count_dofs_per_block(dof_handler_ref, dofs_per_block, - block_component); + block_component); std::cout << "Triangulation:" - << "\n\t Number of active cells: " << triangulation.n_active_cells() - << "\n\t Number of degrees of freedom: " << dof_handler_ref.n_dofs() - << std::endl; + << "\n\t Number of active cells: " << triangulation.n_active_cells() + << "\n\t Number of degrees of freedom: " << dof_handler_ref.n_dofs() + << std::endl; - // Setup the sparsity pattern and tangent matrix + // Setup the sparsity pattern and tangent matrix tangent_matrix.clear(); { const unsigned int n_dofs_u = dofs_per_block[u_dof]; @@ -1840,56 +1840,56 @@ namespace Step44 csp.block(J_dof, J_dof).reinit(n_dofs_J, n_dofs_J); csp.collect_sizes(); - // The global system matrix initially has the following structure - // @f{align*} - // \underbrace{\begin{bmatrix} - // \mathbf{\mathsf{K}}_{uu} & \mathbf{\mathsf{K}}_{u\widetilde{p}} & \mathbf{0} \\ - // \mathbf{\mathsf{K}}_{\widetilde{p}u} & \mathbf{0} & \mathbf{\mathsf{K}}_{\widetilde{p}\widetilde{J}} \\ - // \mathbf{0} & \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}} & \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} - // \end{bmatrix}}_{\mathbf{\mathsf{K}}(\mathbf{\Xi}_{\textrm{i}})} - // \underbrace{\begin{bmatrix} - // d \mathbf{\mathsf{u}} \\ - // d \widetilde{\mathbf{\mathsf{p}}} \\ - // d \widetilde{\mathbf{\mathsf{J}}} - // \end{bmatrix}}_{d \mathbf{\Xi}} - // = - // \underbrace{\begin{bmatrix} - // \mathbf{\mathsf{F}}_{u}(\mathbf{u}_{\textrm{i}}) \\ - // \mathbf{\mathsf{F}}_{\widetilde{p}}(\widetilde{p}_{\textrm{i}}) \\ - // \mathbf{\mathsf{F}}_{\widetilde{J}}(\widetilde{J}_{\textrm{i}}) - //\end{bmatrix}}_{ \mathbf{\mathsf{F}}(\mathbf{\Xi}_{\textrm{i}}) } \, . - // @f} - // We optimise the sparsity pattern to reflect this structure - // and prevent unnecessary data creation for the right-diagonal - // block components. + // The global system matrix initially has the following structure + // @f{align*} + // \underbrace{\begin{bmatrix} + // \mathbf{\mathsf{K}}_{uu} & \mathbf{\mathsf{K}}_{u\widetilde{p}} & \mathbf{0} \\ + // \mathbf{\mathsf{K}}_{\widetilde{p}u} & \mathbf{0} & \mathbf{\mathsf{K}}_{\widetilde{p}\widetilde{J}} \\ + // \mathbf{0} & \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}} & \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} + // \end{bmatrix}}_{\mathbf{\mathsf{K}}(\mathbf{\Xi}_{\textrm{i}})} + // \underbrace{\begin{bmatrix} + // d \mathbf{\mathsf{u}} \\ + // d \widetilde{\mathbf{\mathsf{p}}} \\ + // d \widetilde{\mathbf{\mathsf{J}}} + // \end{bmatrix}}_{d \mathbf{\Xi}} + // = + // \underbrace{\begin{bmatrix} + // \mathbf{\mathsf{F}}_{u}(\mathbf{u}_{\textrm{i}}) \\ + // \mathbf{\mathsf{F}}_{\widetilde{p}}(\widetilde{p}_{\textrm{i}}) \\ + // \mathbf{\mathsf{F}}_{\widetilde{J}}(\widetilde{J}_{\textrm{i}}) + //\end{bmatrix}}_{ \mathbf{\mathsf{F}}(\mathbf{\Xi}_{\textrm{i}}) } \, . + // @f} + // We optimise the sparsity pattern to reflect this structure + // and prevent unnecessary data creation for the right-diagonal + // block components. Table<2, DoFTools::Coupling> coupling(n_components, n_components); for (unsigned int ii = 0; ii < n_components; ++ii) - for (unsigned int jj = 0; jj < n_components; ++jj) - if (((ii < p_component) && (jj == J_component)) - || ((ii == J_component) && (jj < p_component)) - || ((ii == p_component) && (jj == p_component))) - coupling[ii][jj] = DoFTools::none; - else - coupling[ii][jj] = DoFTools::always; + for (unsigned int jj = 0; jj < n_components; ++jj) + if (((ii < p_component) && (jj == J_component)) + || ((ii == J_component) && (jj < p_component)) + || ((ii == p_component) && (jj == p_component))) + coupling[ii][jj] = DoFTools::none; + else + coupling[ii][jj] = DoFTools::always; DoFTools::make_sparsity_pattern(dof_handler_ref, - coupling, - csp, - constraints, - false); + coupling, + csp, + constraints, + false); sparsity_pattern.copy_from(csp); } tangent_matrix.reinit(sparsity_pattern); - // We then set up storage vectors + // We then set up storage vectors system_rhs.reinit(dofs_per_block); system_rhs.collect_sizes(); solution_n.reinit(dofs_per_block); solution_n.collect_sizes(); - // ...and finally set up the quadrature - // point history: + // ...and finally set up the quadrature + // point history: setup_qph(); timer.leave_subsection(); @@ -1915,17 +1915,17 @@ namespace Step44 for (unsigned int k = 0; k < fe.dofs_per_cell; ++k) { - const unsigned int k_group = fe.system_to_base_index(k).first.first; - if (k_group == u_dof) - element_indices_u.push_back(k); - else if (k_group == p_dof) - element_indices_p.push_back(k); - else if (k_group == J_dof) - element_indices_J.push_back(k); - else - { - Assert(k_group <= J_dof, ExcInternalError()); - } + const unsigned int k_group = fe.system_to_base_index(k).first.first; + if (k_group == u_dof) + element_indices_u.push_back(k); + else if (k_group == p_dof) + element_indices_p.push_back(k); + else if (k_group == J_dof) + element_indices_J.push_back(k); + else + { + Assert(k_group <= J_dof, ExcInternalError()); + } } } @@ -1943,39 +1943,39 @@ namespace Step44 { triangulation.clear_user_data(); { - std::vector > tmp; - tmp.swap(quadrature_point_history); + std::vector > tmp; + tmp.swap(quadrature_point_history); } quadrature_point_history - .resize(triangulation.n_active_cells() * n_q_points); + .resize(triangulation.n_active_cells() * n_q_points); unsigned int history_index = 0; for (typename Triangulation::active_cell_iterator cell = - triangulation.begin_active(); cell != triangulation.end(); - ++cell) - { - cell->set_user_pointer(&quadrature_point_history[history_index]); - history_index += n_q_points; - } + triangulation.begin_active(); cell != triangulation.end(); + ++cell) + { + cell->set_user_pointer(&quadrature_point_history[history_index]); + history_index += n_q_points; + } Assert(history_index == quadrature_point_history.size(), - ExcInternalError()); + ExcInternalError()); } - // Next we setup the initial quadrature - // point data: + // Next we setup the initial quadrature + // point data: for (typename Triangulation::active_cell_iterator cell = - triangulation.begin_active(); cell != triangulation.end(); ++cell) + triangulation.begin_active(); cell != triangulation.end(); ++cell) { - PointHistory* lqph = - reinterpret_cast*>(cell->user_pointer()); + PointHistory* lqph = + reinterpret_cast*>(cell->user_pointer()); - Assert(lqph >= &quadrature_point_history.front(), ExcInternalError()); - Assert(lqph <= &quadrature_point_history.back(), ExcInternalError()); + Assert(lqph >= &quadrature_point_history.front(), ExcInternalError()); + Assert(lqph <= &quadrature_point_history.back(), ExcInternalError()); - for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) - lqph[q_point].setup_lqp(parameters); + for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) + lqph[q_point].setup_lqp(parameters); } } @@ -1999,16 +1999,16 @@ namespace Step44 PerTaskData_UQPH per_task_data_UQPH; ScratchData_UQPH scratch_data_UQPH(fe, qf_cell, uf_UQPH, solution_total); - // We then pass them and the one-cell update - // function to the WorkStream to be - // processed: + // We then pass them and the one-cell update + // function to the WorkStream to be + // processed: WorkStream::run(dof_handler_ref.begin_active(), - dof_handler_ref.end(), - *this, - &Solid::update_qph_incremental_one_cell, - &Solid::copy_local_to_global_UQPH, - scratch_data_UQPH, - per_task_data_UQPH); + dof_handler_ref.end(), + *this, + &Solid::update_qph_incremental_one_cell, + &Solid::copy_local_to_global_UQPH, + scratch_data_UQPH, + per_task_data_UQPH); timer.leave_subsection(); } @@ -2019,8 +2019,8 @@ namespace Step44 template void Solid::update_qph_incremental_one_cell(const typename DoFHandler::active_cell_iterator & cell, - ScratchData_UQPH & scratch, - PerTaskData_UQPH & data) + ScratchData_UQPH & scratch, + PerTaskData_UQPH & data) { PointHistory* lqph = reinterpret_cast*>(cell->user_pointer()); @@ -2029,32 +2029,32 @@ namespace Step44 Assert(lqph <= &quadrature_point_history.back(), ExcInternalError()); Assert(scratch.solution_grads_u_total.size() == n_q_points, - ExcInternalError()); + ExcInternalError()); Assert(scratch.solution_values_p_total.size() == n_q_points, - ExcInternalError()); + ExcInternalError()); Assert(scratch.solution_values_J_total.size() == n_q_points, - ExcInternalError()); + ExcInternalError()); scratch.reset(); - // We first need to find the values and - // gradients at quadrature points inside - // the current cell and then we update - // each local QP using the displacement - // gradient and total pressure and - // dilatation solution values: + // We first need to find the values and + // gradients at quadrature points inside + // the current cell and then we update + // each local QP using the displacement + // gradient and total pressure and + // dilatation solution values: scratch.fe_values_ref.reinit(cell); scratch.fe_values_ref[u_fe].get_function_gradients(scratch.solution_total, - scratch.solution_grads_u_total); + scratch.solution_grads_u_total); scratch.fe_values_ref[p_fe].get_function_values(scratch.solution_total, - scratch.solution_values_p_total); + scratch.solution_values_p_total); scratch.fe_values_ref[J_fe].get_function_values(scratch.solution_total, - scratch.solution_values_J_total); + scratch.solution_values_J_total); for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) lqph[q_point].update_values(scratch.solution_grads_u_total[q_point], - scratch.solution_values_p_total[q_point], - scratch.solution_values_J_total[q_point]); + scratch.solution_values_p_total[q_point], + scratch.solution_values_J_total[q_point]); } @@ -2068,7 +2068,7 @@ namespace Step44 Solid::solve_nonlinear_timestep(BlockVector & solution_delta) { std::cout << std::endl << "Timestep " << time.get_timestep() << " @ " - << time.current() << "s" << std::endl; + << time.current() << "s" << std::endl; BlockVector newton_update(dofs_per_block); @@ -2081,115 +2081,115 @@ namespace Step44 print_conv_header(); - // We now perform a number of Newton - // iterations to iteratively solve the - // nonlinear problem. Since the problem - // is fully nonlinear and we are using a - // full Newton method, the data stored in - // the tangent matrix and right-hand side - // vector is not reusable and must be - // cleared at each Newton step. We then - // initially build the right-hand side - // vector to check for convergence (and - // store this value in the first - // iteration). The unconstrained DOFs - // of the rhs vector hold the - // out-of-balance forces. The building is - // done before assembling the system - // matrix as the latter is an expensive - // operation and we can potentially avoid - // an extra assembly process by not - // assembling the tangent matrix when - // convergence is attained. + // We now perform a number of Newton + // iterations to iteratively solve the + // nonlinear problem. Since the problem + // is fully nonlinear and we are using a + // full Newton method, the data stored in + // the tangent matrix and right-hand side + // vector is not reusable and must be + // cleared at each Newton step. We then + // initially build the right-hand side + // vector to check for convergence (and + // store this value in the first + // iteration). The unconstrained DOFs + // of the rhs vector hold the + // out-of-balance forces. The building is + // done before assembling the system + // matrix as the latter is an expensive + // operation and we can potentially avoid + // an extra assembly process by not + // assembling the tangent matrix when + // convergence is attained. unsigned int newton_iteration = 0; for (; newton_iteration < parameters.max_iterations_NR; - ++newton_iteration) + ++newton_iteration) { - std::cout << " " << std::setw(2) << newton_iteration << " " << std::flush; - - tangent_matrix = 0.0; - system_rhs = 0.0; - - assemble_system_rhs(); - get_error_residual(error_residual); - - if (newton_iteration == 0) - error_residual_0 = error_residual; - - // We can now determine the - // normalised residual error and - // check for solution convergence: - error_residual_norm = error_residual; - error_residual_norm.normalise(error_residual_0); - - if (newton_iteration > 0 && error_update_norm.u <= parameters.tol_u - && error_residual_norm.u <= parameters.tol_f) - { - std::cout << " CONVERGED! " << std::endl; - print_conv_footer(); - - break; - } - - // If we have decided that we want to - // continue with the iteration, we - // assemble the tangent, make and - // impose the Dirichlet constraints, - // and do the solve of the linearised - // system: - assemble_system_tangent(); - make_constraints(newton_iteration); - constraints.condense(tangent_matrix, system_rhs); - - const std::pair - lin_solver_output = solve_linear_system(newton_update); - - get_error_update(newton_update, error_update); - if (newton_iteration == 0) - error_update_0 = error_update; - - // We can now determine the - // normalised Newton update error, - // and perform the actual update of - // the solution increment for the - // current time step, update all - // quadrature point information - // pertaining to this new - // displacement and stress state and - // continue iterating: - error_update_norm = error_update; - error_update_norm.normalise(error_update_0); - - solution_delta += newton_update; - update_qph_incremental(solution_delta); - - std::cout << " | " << std::fixed << std::setprecision(3) << std::setw(7) - << std::scientific << lin_solver_output.first << " " - << lin_solver_output.second << " " << error_residual_norm.norm - << " " << error_residual_norm.u << " " - << error_residual_norm.p << " " << error_residual_norm.J - << " " << error_update_norm.norm << " " << error_update_norm.u - << " " << error_update_norm.p << " " << error_update_norm.J - << " " << std::endl; + std::cout << " " << std::setw(2) << newton_iteration << " " << std::flush; + + tangent_matrix = 0.0; + system_rhs = 0.0; + + assemble_system_rhs(); + get_error_residual(error_residual); + + if (newton_iteration == 0) + error_residual_0 = error_residual; + + // We can now determine the + // normalised residual error and + // check for solution convergence: + error_residual_norm = error_residual; + error_residual_norm.normalise(error_residual_0); + + if (newton_iteration > 0 && error_update_norm.u <= parameters.tol_u + && error_residual_norm.u <= parameters.tol_f) + { + std::cout << " CONVERGED! " << std::endl; + print_conv_footer(); + + break; + } + + // If we have decided that we want to + // continue with the iteration, we + // assemble the tangent, make and + // impose the Dirichlet constraints, + // and do the solve of the linearised + // system: + assemble_system_tangent(); + make_constraints(newton_iteration); + constraints.condense(tangent_matrix, system_rhs); + + const std::pair + lin_solver_output = solve_linear_system(newton_update); + + get_error_update(newton_update, error_update); + if (newton_iteration == 0) + error_update_0 = error_update; + + // We can now determine the + // normalised Newton update error, + // and perform the actual update of + // the solution increment for the + // current time step, update all + // quadrature point information + // pertaining to this new + // displacement and stress state and + // continue iterating: + error_update_norm = error_update; + error_update_norm.normalise(error_update_0); + + solution_delta += newton_update; + update_qph_incremental(solution_delta); + + std::cout << " | " << std::fixed << std::setprecision(3) << std::setw(7) + << std::scientific << lin_solver_output.first << " " + << lin_solver_output.second << " " << error_residual_norm.norm + << " " << error_residual_norm.u << " " + << error_residual_norm.p << " " << error_residual_norm.J + << " " << error_update_norm.norm << " " << error_update_norm.u + << " " << error_update_norm.p << " " << error_update_norm.J + << " " << std::endl; } - // At the end, if it turns out that we - // have in fact done more iterations than - // the parameter file allowed, we raise - // an exception that can be caught in the - // main() function. The call - // AssertThrow(condition, - // exc_object) is in essence - // equivalent to if (!cond) throw - // exc_object; but the former form - // fills certain fields in the exception - // object that identify the location - // (filename and line number) where the - // exception was raised to make it - // simpler to identify where the problem - // happened. + // At the end, if it turns out that we + // have in fact done more iterations than + // the parameter file allowed, we raise + // an exception that can be caught in the + // main() function. The call + // AssertThrow(condition, + // exc_object) is in essence + // equivalent to if (!cond) throw + // exc_object; but the former form + // fills certain fields in the exception + // object that identify the location + // (filename and line number) where the + // exception was raised to make it + // simpler to identify where the problem + // happened. AssertThrow (newton_iteration <= parameters.max_iterations_NR, - ExcMessage("No convergence in nonlinear solver!")); + ExcMessage("No convergence in nonlinear solver!")); } @@ -2208,9 +2208,9 @@ namespace Step44 std::cout << std::endl; std::cout << " SOLVER STEP " - << " | LIN_IT LIN_RES RES_NORM " - << " RES_U RES_P RES_J NU_NORM " - << " NU_U NU_P NU_J " << std::endl; + << " | LIN_IT LIN_RES RES_NORM " + << " RES_U RES_P RES_J NU_NORM " + << " NU_U NU_P NU_J " << std::endl; for (unsigned int i = 0; i < l_width; ++i) std::cout << "_"; @@ -2231,11 +2231,11 @@ namespace Step44 const std::pair error_dil = get_error_dilation(); std::cout << "Relative errors:" << std::endl - << "Displacement:\t" << error_update.u / error_update_0.u << std::endl - << "Force: \t\t" << error_residual.u / error_residual_0.u << std::endl - << "Dilatation:\t" << error_dil.first << std::endl - << "v / V_0:\t" << vol_current << " / " << vol_reference - << " = " << error_dil.second << std::endl; + << "Displacement:\t" << error_update.u / error_update_0.u << std::endl + << "Force: \t\t" << error_residual.u / error_residual_0.u << std::endl + << "Dilatation:\t" << error_dil.first << std::endl + << "v / V_0:\t" << vol_current << " / " << vol_reference + << " = " << error_dil.second << std::endl; } @@ -2258,29 +2258,29 @@ namespace Step44 FEValues fe_values_ref(fe, qf_cell, update_JxW_values); for (typename Triangulation::active_cell_iterator - cell = triangulation.begin_active(); - cell != triangulation.end(); ++cell) + cell = triangulation.begin_active(); + cell != triangulation.end(); ++cell) { - fe_values_ref.reinit(cell); - - PointHistory* lqph = - reinterpret_cast*>(cell->user_pointer()); - - Assert(lqph >= &quadrature_point_history.front(), ExcInternalError()); - Assert(lqph <= &quadrature_point_history.back(), ExcInternalError()); - - for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) - { - const double det_F_qp = lqph[q_point].get_det_F(); - const double J_tilde_qp = lqph[q_point].get_J_tilde(); - const double the_error_qp_squared = std::pow((det_F_qp - J_tilde_qp), - 2); - const double JxW = fe_values_ref.JxW(q_point); - - dil_L2_error += the_error_qp_squared * JxW; - vol_current += det_F_qp * JxW; - } - Assert(vol_current > 0, ExcInternalError()); + fe_values_ref.reinit(cell); + + PointHistory* lqph = + reinterpret_cast*>(cell->user_pointer()); + + Assert(lqph >= &quadrature_point_history.front(), ExcInternalError()); + Assert(lqph <= &quadrature_point_history.back(), ExcInternalError()); + + for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) + { + const double det_F_qp = lqph[q_point].get_det_F(); + const double J_tilde_qp = lqph[q_point].get_J_tilde(); + const double the_error_qp_squared = std::pow((det_F_qp - J_tilde_qp), + 2); + const double JxW = fe_values_ref.JxW(q_point); + + dil_L2_error += the_error_qp_squared * JxW; + vol_current += det_F_qp * JxW; + } + Assert(vol_current > 0, ExcInternalError()); } std::pair error_dil; @@ -2304,7 +2304,7 @@ namespace Step44 for (unsigned int i = 0; i < dof_handler_ref.n_dofs(); ++i) if (!constraints.is_constrained(i)) - error_res(i) = system_rhs(i); + error_res(i) = system_rhs(i); error_residual.norm = error_res.l2_norm(); error_residual.u = error_res.block(u_dof).l2_norm(); @@ -2318,12 +2318,12 @@ namespace Step44 // Determine the true Newton update error for the problem template void Solid::get_error_update(const BlockVector & newton_update, - Errors & error_update) + Errors & error_update) { BlockVector error_ud(dofs_per_block); for (unsigned int i = 0; i < dof_handler_ref.n_dofs(); ++i) if (!constraints.is_constrained(i)) - error_ud(i) = newton_update(i); + error_ud(i) = newton_update(i); error_update.norm = error_ud.l2_norm(); error_update.u = error_ud.block(u_dof).l2_norm(); @@ -2364,19 +2364,19 @@ namespace Step44 tangent_matrix = 0.0; const UpdateFlags uf_cell(update_values | - update_gradients | - update_JxW_values); + update_gradients | + update_JxW_values); PerTaskData_K per_task_data(dofs_per_cell); ScratchData_K scratch_data(fe, qf_cell, uf_cell); WorkStream::run(dof_handler_ref.begin_active(), - dof_handler_ref.end(), - *this, - &Solid::assemble_system_tangent_one_cell, - &Solid::copy_local_to_global_K, - scratch_data, - per_task_data); + dof_handler_ref.end(), + *this, + &Solid::assemble_system_tangent_one_cell, + &Solid::copy_local_to_global_K, + scratch_data, + per_task_data); timer.leave_subsection(); } @@ -2390,9 +2390,9 @@ namespace Step44 { for (unsigned int i = 0; i < dofs_per_cell; ++i) for (unsigned int j = 0; j < dofs_per_cell; ++j) - tangent_matrix.add(data.local_dof_indices[i], - data.local_dof_indices[j], - data.cell_matrix(i, j)); + tangent_matrix.add(data.local_dof_indices[i], + data.local_dof_indices[j], + data.cell_matrix(i, j)); } // Of course, we still have to define how we assemble the tangent matrix @@ -2406,8 +2406,8 @@ namespace Step44 template void Solid::assemble_system_tangent_one_cell(const typename DoFHandler::active_cell_iterator & cell, - ScratchData_K & scratch, - PerTaskData_K & data) + ScratchData_K & scratch, + PerTaskData_K & data) { data.reset(); scratch.reset(); @@ -2419,116 +2419,116 @@ namespace Step44 static const SymmetricTensor<2, dim> I = AdditionalTools::StandardTensors::I; for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) { - const Tensor<2, dim> F_inv = lqph[q_point].get_F_inv(); - for (unsigned int k = 0; k < dofs_per_cell; ++k) - { - const unsigned int k_group = fe.system_to_base_index(k).first.first; - - if (k_group == u_dof) - { - scratch.grad_Nx[q_point][k] = scratch.fe_values_ref[u_fe].gradient(k, q_point) - * F_inv; - scratch.symm_grad_Nx[q_point][k] = symmetrize(scratch.grad_Nx[q_point][k]); - } - else if (k_group == p_dof) - scratch.Nx[q_point][k] = scratch.fe_values_ref[p_fe].value(k, - q_point); - else if (k_group == J_dof) - scratch.Nx[q_point][k] = scratch.fe_values_ref[J_fe].value(k, - q_point); - else - Assert(k_group <= J_dof, ExcInternalError()); - } + const Tensor<2, dim> F_inv = lqph[q_point].get_F_inv(); + for (unsigned int k = 0; k < dofs_per_cell; ++k) + { + const unsigned int k_group = fe.system_to_base_index(k).first.first; + + if (k_group == u_dof) + { + scratch.grad_Nx[q_point][k] = scratch.fe_values_ref[u_fe].gradient(k, q_point) + * F_inv; + scratch.symm_grad_Nx[q_point][k] = symmetrize(scratch.grad_Nx[q_point][k]); + } + else if (k_group == p_dof) + scratch.Nx[q_point][k] = scratch.fe_values_ref[p_fe].value(k, + q_point); + else if (k_group == J_dof) + scratch.Nx[q_point][k] = scratch.fe_values_ref[J_fe].value(k, + q_point); + else + Assert(k_group <= J_dof, ExcInternalError()); + } } - // Now we build the local cell stiffness - // matrix. Since the global and local - // system matrices are symmetric, we can - // exploit this property by building only - // the lower half of the local matrix and - // copying the values to the upper half. - // So we only assemble half of the - // $\mathsf{\mathbf{k}}_{uu}$, - // $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{p}} = \mathbf{0}$, - // $\mathsf{\mathbf{k}}_{\widetilde{J} \widetilde{J}}$ - // blocks, while the whole $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{J}}$, - // $\mathsf{\mathbf{k}}_{\mathbf{u} \widetilde{J}} = \mathbf{0}$, - // $\mathsf{\mathbf{k}}_{\mathbf{u} \widetilde{p}}$ - // blocks are built. - // - // In doing so, we first extract some - // configuration dependent variables from - // our QPH history objects for the - // current quadrature point. + // Now we build the local cell stiffness + // matrix. Since the global and local + // system matrices are symmetric, we can + // exploit this property by building only + // the lower half of the local matrix and + // copying the values to the upper half. + // So we only assemble half of the + // $\mathsf{\mathbf{k}}_{uu}$, + // $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{p}} = \mathbf{0}$, + // $\mathsf{\mathbf{k}}_{\widetilde{J} \widetilde{J}}$ + // blocks, while the whole $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{J}}$, + // $\mathsf{\mathbf{k}}_{\mathbf{u} \widetilde{J}} = \mathbf{0}$, + // $\mathsf{\mathbf{k}}_{\mathbf{u} \widetilde{p}}$ + // blocks are built. + // + // In doing so, we first extract some + // configuration dependent variables from + // our QPH history objects for the + // current quadrature point. for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) { - const Tensor<2, dim> tau = lqph[q_point].get_tau(); - const SymmetricTensor<4, dim> Jc = lqph[q_point].get_Jc(); - const double d2Psi_vol_dJ2 = lqph[q_point].get_d2Psi_vol_dJ2(); - const double det_F = lqph[q_point].get_det_F(); - - // Next we define some aliases to make - // the assembly process easier to follow - const std::vector - & N = scratch.Nx[q_point]; - const std::vector > - & symm_grad_Nx = scratch.symm_grad_Nx[q_point]; - const std::vector > - & grad_Nx = scratch.grad_Nx[q_point]; - const double JxW = scratch.fe_values_ref.JxW(q_point); - - for (unsigned int i = 0; i < dofs_per_cell; ++i) - { - const unsigned int component_i = fe.system_to_component_index(i).first; - const unsigned int i_group = fe.system_to_base_index(i).first.first; - - for (unsigned int j = 0; j <= i; ++j) - { - const unsigned int component_j = fe.system_to_component_index(j).first; - const unsigned int j_group = fe.system_to_base_index(j).first.first; - - // This is the $\mathsf{\mathbf{k}}_{\mathbf{u} \mathbf{u}}$ - // contribution. It comprises a - // material contribution, and a - // geometrical stress contribution - // which is only added along the - // local matrix diagonals: - if ((i_group == j_group) && (i_group == u_dof)) - { - data.cell_matrix(i, j) += symm_grad_Nx[i] * Jc // The material contribution: - * symm_grad_Nx[j] * JxW; - if (component_i == component_j) // geometrical stress contribution - data.cell_matrix(i, j) += grad_Nx[i][component_i] * tau - * grad_Nx[j][component_j] * JxW; - } - // Next is the $\mathsf{\mathbf{k}}_{ \widetilde{p} \mathbf{u}}$ contribution - else if ((i_group == p_dof) && (j_group == u_dof)) - { - data.cell_matrix(i, j) += N[i] * det_F - * (symm_grad_Nx[j] - * AdditionalTools::StandardTensors::I) - * JxW; - } - // and lastly the $\mathsf{\mathbf{k}}_{ \widetilde{J} \widetilde{p}}$ - // and $\mathsf{\mathbf{k}}_{ \widetilde{J} \widetilde{J}}$ - // contributions: - else if ((i_group == J_dof) && (j_group == p_dof)) - data.cell_matrix(i, j) -= N[i] * N[j] * JxW; - else if ((i_group == j_group) && (i_group == J_dof)) - data.cell_matrix(i, j) += N[i] * d2Psi_vol_dJ2 * N[j] * JxW; - else - Assert((i_group <= J_dof) && (j_group <= J_dof), - ExcInternalError()); - } - } + const Tensor<2, dim> tau = lqph[q_point].get_tau(); + const SymmetricTensor<4, dim> Jc = lqph[q_point].get_Jc(); + const double d2Psi_vol_dJ2 = lqph[q_point].get_d2Psi_vol_dJ2(); + const double det_F = lqph[q_point].get_det_F(); + + // Next we define some aliases to make + // the assembly process easier to follow + const std::vector + & N = scratch.Nx[q_point]; + const std::vector > + & symm_grad_Nx = scratch.symm_grad_Nx[q_point]; + const std::vector > + & grad_Nx = scratch.grad_Nx[q_point]; + const double JxW = scratch.fe_values_ref.JxW(q_point); + + for (unsigned int i = 0; i < dofs_per_cell; ++i) + { + const unsigned int component_i = fe.system_to_component_index(i).first; + const unsigned int i_group = fe.system_to_base_index(i).first.first; + + for (unsigned int j = 0; j <= i; ++j) + { + const unsigned int component_j = fe.system_to_component_index(j).first; + const unsigned int j_group = fe.system_to_base_index(j).first.first; + + // This is the $\mathsf{\mathbf{k}}_{\mathbf{u} \mathbf{u}}$ + // contribution. It comprises a + // material contribution, and a + // geometrical stress contribution + // which is only added along the + // local matrix diagonals: + if ((i_group == j_group) && (i_group == u_dof)) + { + data.cell_matrix(i, j) += symm_grad_Nx[i] * Jc // The material contribution: + * symm_grad_Nx[j] * JxW; + if (component_i == component_j) // geometrical stress contribution + data.cell_matrix(i, j) += grad_Nx[i][component_i] * tau + * grad_Nx[j][component_j] * JxW; + } + // Next is the $\mathsf{\mathbf{k}}_{ \widetilde{p} \mathbf{u}}$ contribution + else if ((i_group == p_dof) && (j_group == u_dof)) + { + data.cell_matrix(i, j) += N[i] * det_F + * (symm_grad_Nx[j] + * AdditionalTools::StandardTensors::I) + * JxW; + } + // and lastly the $\mathsf{\mathbf{k}}_{ \widetilde{J} \widetilde{p}}$ + // and $\mathsf{\mathbf{k}}_{ \widetilde{J} \widetilde{J}}$ + // contributions: + else if ((i_group == J_dof) && (j_group == p_dof)) + data.cell_matrix(i, j) -= N[i] * N[j] * JxW; + else if ((i_group == j_group) && (i_group == J_dof)) + data.cell_matrix(i, j) += N[i] * d2Psi_vol_dJ2 * N[j] * JxW; + else + Assert((i_group <= J_dof) && (j_group <= J_dof), + ExcInternalError()); + } + } } - // Finally, we need to copy the lower - // half of the local matrix into the - // upper half: + // Finally, we need to copy the lower + // half of the local matrix into the + // upper half: for (unsigned int i = 0; i < dofs_per_cell; ++i) for (unsigned int j = i + 1; j < dofs_per_cell; ++j) - data.cell_matrix(i, j) = data.cell_matrix(j, i); + data.cell_matrix(i, j) = data.cell_matrix(j, i); } // @sect4{Solid::assemble_system_rhs} @@ -2546,22 +2546,22 @@ namespace Step44 system_rhs = 0.0; const UpdateFlags uf_cell(update_values | - update_gradients | - update_JxW_values); + update_gradients | + update_JxW_values); const UpdateFlags uf_face(update_values | - update_normal_vectors | - update_JxW_values); + update_normal_vectors | + update_JxW_values); PerTaskData_RHS per_task_data(dofs_per_cell); ScratchData_RHS scratch_data(fe, qf_cell, uf_cell, qf_face, uf_face); WorkStream::run(dof_handler_ref.begin_active(), - dof_handler_ref.end(), - *this, - &Solid::assemble_system_rhs_one_cell, - &Solid::copy_local_to_global_rhs, - scratch_data, - per_task_data); + dof_handler_ref.end(), + *this, + &Solid::assemble_system_rhs_one_cell, + &Solid::copy_local_to_global_rhs, + scratch_data, + per_task_data); timer.leave_subsection(); } @@ -2580,8 +2580,8 @@ namespace Step44 template void Solid::assemble_system_rhs_one_cell(const typename DoFHandler::active_cell_iterator & cell, - ScratchData_RHS & scratch, - PerTaskData_RHS & data) + ScratchData_RHS & scratch, + PerTaskData_RHS & data) { data.reset(); scratch.reset(); @@ -2592,132 +2592,132 @@ namespace Step44 for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) { - const Tensor<2, dim> F_inv = lqph[q_point].get_F_inv(); - - for (unsigned int k = 0; k < dofs_per_cell; ++k) { - const unsigned int k_group = fe.system_to_base_index(k).first.first; - - if (k_group == u_dof) - scratch.symm_grad_Nx[q_point][k] - = symmetrize(scratch.fe_values_ref[u_fe].gradient(k, q_point) - * F_inv); - else if (k_group == p_dof) - scratch.Nx[q_point][k] = scratch.fe_values_ref[p_fe].value(k, - q_point); - else if (k_group == J_dof) - scratch.Nx[q_point][k] = scratch.fe_values_ref[J_fe].value(k, - q_point); - else - Assert(k_group <= J_dof, ExcInternalError()); - } + const Tensor<2, dim> F_inv = lqph[q_point].get_F_inv(); + + for (unsigned int k = 0; k < dofs_per_cell; ++k) { + const unsigned int k_group = fe.system_to_base_index(k).first.first; + + if (k_group == u_dof) + scratch.symm_grad_Nx[q_point][k] + = symmetrize(scratch.fe_values_ref[u_fe].gradient(k, q_point) + * F_inv); + else if (k_group == p_dof) + scratch.Nx[q_point][k] = scratch.fe_values_ref[p_fe].value(k, + q_point); + else if (k_group == J_dof) + scratch.Nx[q_point][k] = scratch.fe_values_ref[J_fe].value(k, + q_point); + else + Assert(k_group <= J_dof, ExcInternalError()); + } } for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) { - const SymmetricTensor<2, dim> tau = lqph[q_point].get_tau(); - const double det_F = lqph[q_point].get_det_F(); - const double J_tilde = lqph[q_point].get_J_tilde(); - const double p_tilde = lqph[q_point].get_p_tilde(); - const double dPsi_vol_dJ = lqph[q_point].get_dPsi_vol_dJ(); - - const std::vector - & N = scratch.Nx[q_point]; - const std::vector > - & symm_grad_Nx = scratch.symm_grad_Nx[q_point]; - const double JxW = scratch.fe_values_ref.JxW(q_point); - - // We first compute the contributions - // from the internal forces. Note, by - // definition of the rhs as the negative - // of the residual, these contributions - // are subtracted. - for (unsigned int i = 0; i < dofs_per_cell; ++i) - { - const unsigned int i_group = fe.system_to_base_index(i).first.first; - - if (i_group == u_dof) - data.cell_rhs(i) -= (symm_grad_Nx[i] * tau) * JxW; - else if (i_group == p_dof) - data.cell_rhs(i) -= N[i] * (det_F - J_tilde) * JxW; - else if (i_group == J_dof) - data.cell_rhs(i) -= N[i] * (dPsi_vol_dJ - p_tilde) * JxW; - else - Assert(i_group <= J_dof, ExcInternalError()); - } + const SymmetricTensor<2, dim> tau = lqph[q_point].get_tau(); + const double det_F = lqph[q_point].get_det_F(); + const double J_tilde = lqph[q_point].get_J_tilde(); + const double p_tilde = lqph[q_point].get_p_tilde(); + const double dPsi_vol_dJ = lqph[q_point].get_dPsi_vol_dJ(); + + const std::vector + & N = scratch.Nx[q_point]; + const std::vector > + & symm_grad_Nx = scratch.symm_grad_Nx[q_point]; + const double JxW = scratch.fe_values_ref.JxW(q_point); + + // We first compute the contributions + // from the internal forces. Note, by + // definition of the rhs as the negative + // of the residual, these contributions + // are subtracted. + for (unsigned int i = 0; i < dofs_per_cell; ++i) + { + const unsigned int i_group = fe.system_to_base_index(i).first.first; + + if (i_group == u_dof) + data.cell_rhs(i) -= (symm_grad_Nx[i] * tau) * JxW; + else if (i_group == p_dof) + data.cell_rhs(i) -= N[i] * (det_F - J_tilde) * JxW; + else if (i_group == J_dof) + data.cell_rhs(i) -= N[i] * (dPsi_vol_dJ - p_tilde) * JxW; + else + Assert(i_group <= J_dof, ExcInternalError()); + } } - // Next we assemble the Neumann - // contribution. We first check to see it - // the cell face exists on a boundary on - // which a traction is applied and add the - // contribution if this is the case. + // Next we assemble the Neumann + // contribution. We first check to see it + // the cell face exists on a boundary on + // which a traction is applied and add the + // contribution if this is the case. for (unsigned int face = 0; face < GeometryInfo::faces_per_cell; - ++face) + ++face) if (cell->face(face)->at_boundary() == true - && cell->face(face)->boundary_indicator() == 6) - { - scratch.fe_face_values_ref.reinit(cell, face); - - for (unsigned int f_q_point = 0; f_q_point < n_q_points_f; - ++f_q_point) - { - const Tensor<1, dim> & N = - scratch.fe_face_values_ref.normal_vector(f_q_point); - - // Using the face normal at - // this quadrature point - // we specify - // the traction in reference - // configuration. For this - // problem, a defined pressure - // is applied in the reference - // configuration. The - // direction of the applied - // traction is assumed not to - // evolve with the deformation - // of the domain. The traction - // is defined using the first - // Piola-Kirchhoff stress is - // simply - // $\mathbf{t} = \mathbf{P}\mathbf{N} - // = [p_0 \mathbf{I}] \mathbf{N} = p_0 \mathbf{N}$ - // We use the - // time variable to linearly - // ramp up the pressure load. - // - // Note that the contributions - // to the right hand side - // vector we compute here only - // exist in the displacement - // components of the vector. - static const double p0 = -4.0 - / - (parameters.scale * parameters.scale); - const double time_ramp = (time.current() / time.end()); - const double pressure = p0 * parameters.p_p0 * time_ramp; - const Tensor<1, dim> traction = pressure * N; - - for (unsigned int i = 0; i < dofs_per_cell; ++i) - { - const unsigned int i_group = - fe.system_to_base_index(i).first.first; - - if (i_group == u_dof) - { - const unsigned int component_i = - fe.system_to_component_index(i).first; - const double Ni = - scratch.fe_face_values_ref.shape_value(i, - f_q_point); - const double JxW = scratch.fe_face_values_ref.JxW( - f_q_point); - - data.cell_rhs(i) += (Ni * traction[component_i]) - * JxW; - } - } - } - } + && cell->face(face)->boundary_indicator() == 6) + { + scratch.fe_face_values_ref.reinit(cell, face); + + for (unsigned int f_q_point = 0; f_q_point < n_q_points_f; + ++f_q_point) + { + const Tensor<1, dim> & N = + scratch.fe_face_values_ref.normal_vector(f_q_point); + + // Using the face normal at + // this quadrature point + // we specify + // the traction in reference + // configuration. For this + // problem, a defined pressure + // is applied in the reference + // configuration. The + // direction of the applied + // traction is assumed not to + // evolve with the deformation + // of the domain. The traction + // is defined using the first + // Piola-Kirchhoff stress is + // simply + // $\mathbf{t} = \mathbf{P}\mathbf{N} + // = [p_0 \mathbf{I}] \mathbf{N} = p_0 \mathbf{N}$ + // We use the + // time variable to linearly + // ramp up the pressure load. + // + // Note that the contributions + // to the right hand side + // vector we compute here only + // exist in the displacement + // components of the vector. + static const double p0 = -4.0 + / + (parameters.scale * parameters.scale); + const double time_ramp = (time.current() / time.end()); + const double pressure = p0 * parameters.p_p0 * time_ramp; + const Tensor<1, dim> traction = pressure * N; + + for (unsigned int i = 0; i < dofs_per_cell; ++i) + { + const unsigned int i_group = + fe.system_to_base_index(i).first.first; + + if (i_group == u_dof) + { + const unsigned int component_i = + fe.system_to_component_index(i).first; + const double Ni = + scratch.fe_face_values_ref.shape_value(i, + f_q_point); + const double JxW = scratch.fe_face_values_ref.JxW( + f_q_point); + + data.cell_rhs(i) += (Ni * traction[component_i]) + * JxW; + } + } + } + } } // @sect4{Solid::make_constraints} @@ -2732,32 +2732,32 @@ namespace Step44 { std::cout << " CST " << std::flush; - // Since the constraints are different at - // different Newton iterations, we need - // to clear the constraints matrix and - // completely rebuild it. However, after - // the first iteration, the constraints - // remain the same and we can simply skip - // the rebuilding step if we do not clear - // it. + // Since the constraints are different at + // different Newton iterations, we need + // to clear the constraints matrix and + // completely rebuild it. However, after + // the first iteration, the constraints + // remain the same and we can simply skip + // the rebuilding step if we do not clear + // it. if (it_nr > 1) return; constraints.clear(); const bool apply_dirichlet_bc = (it_nr == 0); - // The boundary conditions for the - // indentation problem are as follows: On - // the -x, -y and -z faces (ID's 0,2,4) we - // set up a symmetry condition to allow - // only planar movement while the +x and +y - // faces (ID's 1,3) are traction free. In - // this contrived problem, part of the +z - // face (ID 5) is set to have no motion in - // the x- and y-component. Finally, as - // described earlier, the other part of the - // +z face has an the applied pressure but - // is also constrained in the x- and - // y-directions. + // The boundary conditions for the + // indentation problem are as follows: On + // the -x, -y and -z faces (ID's 0,2,4) we + // set up a symmetry condition to allow + // only planar movement while the +x and +y + // faces (ID's 1,3) are traction free. In + // this contrived problem, part of the +z + // face (ID 5) is set to have no motion in + // the x- and y-component. Finally, as + // described earlier, the other part of the + // +z face has an the applied pressure but + // is also constrained in the x- and + // y-directions. { const int boundary_id = 0; @@ -2765,17 +2765,17 @@ namespace Step44 components[0] = true; if (apply_dirichlet_bc == true) - VectorTools::interpolate_boundary_values(dof_handler_ref, - boundary_id, - ZeroFunction(n_components), - constraints, - components); + VectorTools::interpolate_boundary_values(dof_handler_ref, + boundary_id, + ZeroFunction(n_components), + constraints, + components); else - VectorTools::interpolate_boundary_values(dof_handler_ref, - boundary_id, - ZeroFunction(n_components), - constraints, - components); + VectorTools::interpolate_boundary_values(dof_handler_ref, + boundary_id, + ZeroFunction(n_components), + constraints, + components); } { const int boundary_id = 2; @@ -2784,17 +2784,17 @@ namespace Step44 components[1] = true; if (apply_dirichlet_bc == true) - VectorTools::interpolate_boundary_values(dof_handler_ref, - boundary_id, - ZeroFunction(n_components), - constraints, - components); + VectorTools::interpolate_boundary_values(dof_handler_ref, + boundary_id, + ZeroFunction(n_components), + constraints, + components); else - VectorTools::interpolate_boundary_values(dof_handler_ref, - boundary_id, - ZeroFunction(n_components), - constraints, - components); + VectorTools::interpolate_boundary_values(dof_handler_ref, + boundary_id, + ZeroFunction(n_components), + constraints, + components); } { const int boundary_id = 4; @@ -2802,17 +2802,17 @@ namespace Step44 components[2] = true; if (apply_dirichlet_bc == true) - VectorTools::interpolate_boundary_values(dof_handler_ref, - boundary_id, - ZeroFunction(n_components), - constraints, - components); + VectorTools::interpolate_boundary_values(dof_handler_ref, + boundary_id, + ZeroFunction(n_components), + constraints, + components); else - VectorTools::interpolate_boundary_values(dof_handler_ref, - boundary_id, - ZeroFunction(n_components), - constraints, - components); + VectorTools::interpolate_boundary_values(dof_handler_ref, + boundary_id, + ZeroFunction(n_components), + constraints, + components); } { const int boundary_id = 5; @@ -2820,17 +2820,17 @@ namespace Step44 components[2] = false; if (apply_dirichlet_bc == true) - VectorTools::interpolate_boundary_values(dof_handler_ref, - boundary_id, - ZeroFunction(n_components), - constraints, - components); + VectorTools::interpolate_boundary_values(dof_handler_ref, + boundary_id, + ZeroFunction(n_components), + constraints, + components); else - VectorTools::interpolate_boundary_values(dof_handler_ref, - boundary_id, - ZeroFunction(n_components), - constraints, - components); + VectorTools::interpolate_boundary_values(dof_handler_ref, + boundary_id, + ZeroFunction(n_components), + constraints, + components); } { const int boundary_id = 6; @@ -2838,17 +2838,17 @@ namespace Step44 components[2] = false; if (apply_dirichlet_bc == true) - VectorTools::interpolate_boundary_values(dof_handler_ref, - boundary_id, - ZeroFunction(n_components), - constraints, - components); + VectorTools::interpolate_boundary_values(dof_handler_ref, + boundary_id, + ZeroFunction(n_components), + constraints, + components); else - VectorTools::interpolate_boundary_values(dof_handler_ref, - boundary_id, - ZeroFunction(n_components), - constraints, - components); + VectorTools::interpolate_boundary_values(dof_handler_ref, + boundary_id, + ZeroFunction(n_components), + constraints, + components); } constraints.close(); @@ -2872,44 +2872,44 @@ namespace Step44 // \mathbf{\mathsf{K}}_{\textrm{store}} //:= // \begin{bmatrix} -// \mathbf{\mathsf{K}}_{\textrm{con}} & \mathbf{\mathsf{K}}_{u\widetilde{p}} & \mathbf{0} \\ -// \mathbf{\mathsf{K}}_{\widetilde{p}u} & \mathbf{0} & \mathbf{\mathsf{K}}_{\widetilde{p}\widetilde{J}}^{-1} \\ -// \mathbf{0} & \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}} & \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} -// \end{bmatrix} \, . +// \mathbf{\mathsf{K}}_{\textrm{con}} & \mathbf{\mathsf{K}}_{u\widetilde{p}} & \mathbf{0} \\ +// \mathbf{\mathsf{K}}_{\widetilde{p}u} & \mathbf{0} & \mathbf{\mathsf{K}}_{\widetilde{p}\widetilde{J}}^{-1} \\ +// \mathbf{0} & \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}} & \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} +// \end{bmatrix} \, . // @f} // and // @f{align*} -// d \widetilde{\mathbf{\mathsf{p}}} -// & = \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}}^{-1} \bigl[ -// \mathbf{\mathsf{F}}_{\widetilde{J}} -// - \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} d \widetilde{\mathbf{\mathsf{J}}} \bigr] \\ -// d \widetilde{\mathbf{\mathsf{J}}} -// & = \mathbf{\mathsf{K}}_{\widetilde{p}\widetilde{J}}^{-1} \bigl[ -// \mathbf{\mathsf{F}}_{\widetilde{p}} -// - \mathbf{\mathsf{K}}_{\widetilde{p}u} d \mathbf{\mathsf{u}} -// \bigr] \\ -// \Rightarrow d \widetilde{\mathbf{\mathsf{p}}} -// &= \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}}^{-1} \mathbf{\mathsf{F}}_{\widetilde{J}} -// - \underbrace{\bigl[\mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}}^{-1} \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} -// \mathbf{\mathsf{K}}_{\widetilde{p}\widetilde{J}}^{-1}\bigr]}_{\overline{\mathbf{\mathsf{K}}}}\bigl[ \mathbf{\mathsf{F}}_{\widetilde{p}} -// - \mathbf{\mathsf{K}}_{\widetilde{p}u} d \mathbf{\mathsf{u}} \bigr] +// d \widetilde{\mathbf{\mathsf{p}}} +// & = \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}}^{-1} \bigl[ +// \mathbf{\mathsf{F}}_{\widetilde{J}} +// - \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} d \widetilde{\mathbf{\mathsf{J}}} \bigr] \\ +// d \widetilde{\mathbf{\mathsf{J}}} +// & = \mathbf{\mathsf{K}}_{\widetilde{p}\widetilde{J}}^{-1} \bigl[ +// \mathbf{\mathsf{F}}_{\widetilde{p}} +// - \mathbf{\mathsf{K}}_{\widetilde{p}u} d \mathbf{\mathsf{u}} +// \bigr] \\ +// \Rightarrow d \widetilde{\mathbf{\mathsf{p}}} +// &= \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}}^{-1} \mathbf{\mathsf{F}}_{\widetilde{J}} +// - \underbrace{\bigl[\mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}}^{-1} \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} +// \mathbf{\mathsf{K}}_{\widetilde{p}\widetilde{J}}^{-1}\bigr]}_{\overline{\mathbf{\mathsf{K}}}}\bigl[ \mathbf{\mathsf{F}}_{\widetilde{p}} +// - \mathbf{\mathsf{K}}_{\widetilde{p}u} d \mathbf{\mathsf{u}} \bigr] // @f} // and thus // @f[ -// \underbrace{\bigl[ \mathbf{\mathsf{K}}_{uu} + \overline{\overline{\mathbf{\mathsf{K}}}}~ \bigr] -// }_{\mathbf{\mathsf{K}}_{\textrm{con}}} d \mathbf{\mathsf{u}} -// = +// \underbrace{\bigl[ \mathbf{\mathsf{K}}_{uu} + \overline{\overline{\mathbf{\mathsf{K}}}}~ \bigr] +// }_{\mathbf{\mathsf{K}}_{\textrm{con}}} d \mathbf{\mathsf{u}} +// = // \underbrace{ -// \Bigl[ -// \mathbf{\mathsf{F}}_{u} -// - \mathbf{\mathsf{K}}_{u\widetilde{p}} \bigl[ \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}}^{-1} \mathbf{\mathsf{F}}_{\widetilde{J}} -// - \overline{\mathbf{\mathsf{K}}}\mathbf{\mathsf{F}}_{\widetilde{p}} \bigr] -// \Bigr]}_{\mathbf{\mathsf{F}}_{\textrm{con}}} +// \Bigl[ +// \mathbf{\mathsf{F}}_{u} +// - \mathbf{\mathsf{K}}_{u\widetilde{p}} \bigl[ \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}}^{-1} \mathbf{\mathsf{F}}_{\widetilde{J}} +// - \overline{\mathbf{\mathsf{K}}}\mathbf{\mathsf{F}}_{\widetilde{p}} \bigr] +// \Bigr]}_{\mathbf{\mathsf{F}}_{\textrm{con}}} // @f] // where // @f[ -// \overline{\overline{\mathbf{\mathsf{K}}}} := -// \mathbf{\mathsf{K}}_{u\widetilde{p}} \overline{\mathbf{\mathsf{K}}} \mathbf{\mathsf{K}}_{\widetilde{p}u} \, . +// \overline{\overline{\mathbf{\mathsf{K}}}} := +// \mathbf{\mathsf{K}}_{u\widetilde{p}} \overline{\mathbf{\mathsf{K}}} \mathbf{\mathsf{K}}_{\widetilde{p}u} \, . // @f] template std::pair @@ -2921,256 +2921,256 @@ namespace Step44 unsigned int lin_it = 0; double lin_res = 0.0; - // In the first step of this function, we solve for the incremental displacement $d\mathbf{u}$. - // To this end, we perform static condensation to make - // $\mathbf{\mathsf{K}}_{\textrm{con}} - // = \bigl[ \mathbf{\mathsf{K}}_{uu} + \overline{\overline{\mathbf{\mathsf{K}}}}~ \bigr]$ - // and put - // $\mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}}$ - // in the original $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{J}}$ block. - // That is, we make $\mathbf{\mathsf{K}}_{\textrm{store}}$. + // In the first step of this function, we solve for the incremental displacement $d\mathbf{u}$. + // To this end, we perform static condensation to make + // $\mathbf{\mathsf{K}}_{\textrm{con}} + // = \bigl[ \mathbf{\mathsf{K}}_{uu} + \overline{\overline{\mathbf{\mathsf{K}}}}~ \bigr]$ + // and put + // $\mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}}$ + // in the original $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{J}}$ block. + // That is, we make $\mathbf{\mathsf{K}}_{\textrm{store}}$. { assemble_sc(); - // $ - // \mathsf{\mathbf{A}}_{\widetilde{J}} - // = - // \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}} - // \mathsf{\mathbf{F}}_{\widetilde{p}} - // $ + // $ + // \mathsf{\mathbf{A}}_{\widetilde{J}} + // = + // \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}} + // \mathsf{\mathbf{F}}_{\widetilde{p}} + // $ tangent_matrix.block(p_dof, J_dof).vmult(A.block(J_dof), - system_rhs.block(p_dof)); - // $ - // \mathsf{\mathbf{B}}_{\widetilde{J}} - // = - // \mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{J}} - // \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}} - // \mathsf{\mathbf{F}}_{\widetilde{p}} - // $ + system_rhs.block(p_dof)); + // $ + // \mathsf{\mathbf{B}}_{\widetilde{J}} + // = + // \mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{J}} + // \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}} + // \mathsf{\mathbf{F}}_{\widetilde{p}} + // $ tangent_matrix.block(J_dof, J_dof).vmult(B.block(J_dof), - A.block(J_dof)); - // $ - // \mathsf{\mathbf{A}}_{\widetilde{J}} - // = - // \mathsf{\mathbf{F}}_{\widetilde{J}} - // - - // \mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{J}} - // \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}} - // \mathsf{\mathbf{F}}_{\widetilde{p}} - // $ + A.block(J_dof)); + // $ + // \mathsf{\mathbf{A}}_{\widetilde{J}} + // = + // \mathsf{\mathbf{F}}_{\widetilde{J}} + // - + // \mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{J}} + // \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}} + // \mathsf{\mathbf{F}}_{\widetilde{p}} + // $ A.block(J_dof).equ(1.0, system_rhs.block(J_dof), -1.0, B.block(J_dof)); - // $ - // \mathsf{\mathbf{A}}_{\widetilde{J}} - // = - // \mathsf{\mathbf{K}}^{-1}_{\widetilde{J} \widetilde{p}} - // [ - // \mathsf{\mathbf{F}}_{\widetilde{J}} - // - - // \mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{J}} - // \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}} - // \mathsf{\mathbf{F}}_{\widetilde{p}} - // ] - // $ + // $ + // \mathsf{\mathbf{A}}_{\widetilde{J}} + // = + // \mathsf{\mathbf{K}}^{-1}_{\widetilde{J} \widetilde{p}} + // [ + // \mathsf{\mathbf{F}}_{\widetilde{J}} + // - + // \mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{J}} + // \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}} + // \mathsf{\mathbf{F}}_{\widetilde{p}} + // ] + // $ tangent_matrix.block(p_dof, J_dof).Tvmult(A.block(p_dof), - A.block(J_dof)); - // $ - // \mathsf{\mathbf{A}}_{\mathbf{u}} - // = - // \mathsf{\mathbf{K}}_{\mathbf{u} \widetilde{p}} - // \mathsf{\mathbf{K}}^{-1}_{\widetilde{J} \widetilde{p}} - // [ - // \mathsf{\mathbf{F}}_{\widetilde{J}} - // - - // \mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{J}} - // \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}} - // \mathsf{\mathbf{F}}_{\widetilde{p}} - // ] - // $ + A.block(J_dof)); + // $ + // \mathsf{\mathbf{A}}_{\mathbf{u}} + // = + // \mathsf{\mathbf{K}}_{\mathbf{u} \widetilde{p}} + // \mathsf{\mathbf{K}}^{-1}_{\widetilde{J} \widetilde{p}} + // [ + // \mathsf{\mathbf{F}}_{\widetilde{J}} + // - + // \mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{J}} + // \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}} + // \mathsf{\mathbf{F}}_{\widetilde{p}} + // ] + // $ tangent_matrix.block(u_dof, p_dof).vmult(A.block(u_dof), - A.block(p_dof)); - // $ - // \mathsf{\mathbf{F}}_{\text{con}} - // = - // \mathsf{\mathbf{F}}_{\mathbf{u}} - // - - // \mathsf{\mathbf{K}}_{\mathbf{u} \widetilde{p}} - // \mathsf{\mathbf{K}}^{-1}_{\widetilde{J} \widetilde{p}} - // [ - // \mathsf{\mathbf{F}}_{\widetilde{J}} - // - - // \mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{J}} - // \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}} - // \mathsf{\mathbf{K}}_{\widetilde{p}} - // ] - // $ + A.block(p_dof)); + // $ + // \mathsf{\mathbf{F}}_{\text{con}} + // = + // \mathsf{\mathbf{F}}_{\mathbf{u}} + // - + // \mathsf{\mathbf{K}}_{\mathbf{u} \widetilde{p}} + // \mathsf{\mathbf{K}}^{-1}_{\widetilde{J} \widetilde{p}} + // [ + // \mathsf{\mathbf{F}}_{\widetilde{J}} + // - + // \mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{J}} + // \mathsf{\mathbf{K}}^{-1}_{\widetilde{p} \widetilde{J}} + // \mathsf{\mathbf{K}}_{\widetilde{p}} + // ] + // $ system_rhs.block(u_dof) -= A.block(u_dof); timer.enter_subsection("Linear solver"); std::cout << " SLV " << std::flush; if (parameters.type_lin == "CG") - { - const int solver_its = tangent_matrix.block(u_dof, u_dof).m() - * parameters.max_iterations_lin; - const double tol_sol = parameters.tol_lin - * system_rhs.block(u_dof).l2_norm(); - - SolverControl solver_control(solver_its, tol_sol); - - GrowingVectorMemory > GVM; - SolverCG > solver_CG(solver_control, GVM); - - // We've chosen by default a SSOR - // preconditioner as it appears to - // provide the fastest solver - // convergence characteristics for this - // problem on a single-thread machine. - // However, for multicore - // computing, the Jacobi preconditioner - // which is multithreaded may converge - // quicker for larger linear systems. - PreconditionSelector, Vector > - preconditioner (parameters.preconditioner_type, - parameters.preconditioner_relaxation); - preconditioner.use_matrix(tangent_matrix.block(u_dof, u_dof)); - - solver_CG.solve(tangent_matrix.block(u_dof, u_dof), - newton_update.block(u_dof), - system_rhs.block(u_dof), - preconditioner); - - lin_it = solver_control.last_step(); - lin_res = solver_control.last_value(); - } + { + const int solver_its = tangent_matrix.block(u_dof, u_dof).m() + * parameters.max_iterations_lin; + const double tol_sol = parameters.tol_lin + * system_rhs.block(u_dof).l2_norm(); + + SolverControl solver_control(solver_its, tol_sol); + + GrowingVectorMemory > GVM; + SolverCG > solver_CG(solver_control, GVM); + + // We've chosen by default a SSOR + // preconditioner as it appears to + // provide the fastest solver + // convergence characteristics for this + // problem on a single-thread machine. + // However, for multicore + // computing, the Jacobi preconditioner + // which is multithreaded may converge + // quicker for larger linear systems. + PreconditionSelector, Vector > + preconditioner (parameters.preconditioner_type, + parameters.preconditioner_relaxation); + preconditioner.use_matrix(tangent_matrix.block(u_dof, u_dof)); + + solver_CG.solve(tangent_matrix.block(u_dof, u_dof), + newton_update.block(u_dof), + system_rhs.block(u_dof), + preconditioner); + + lin_it = solver_control.last_step(); + lin_res = solver_control.last_value(); + } else if (parameters.type_lin == "Direct") - { - // Otherwise if the problem is small - // enough, a direct solver can be - // utilised. - SparseDirectUMFPACK A_direct; - A_direct.initialize(tangent_matrix.block(u_dof, u_dof)); - A_direct.vmult(newton_update.block(u_dof), system_rhs.block(u_dof)); - - lin_it = 1; - lin_res = 0.0; - } + { + // Otherwise if the problem is small + // enough, a direct solver can be + // utilised. + SparseDirectUMFPACK A_direct; + A_direct.initialize(tangent_matrix.block(u_dof, u_dof)); + A_direct.vmult(newton_update.block(u_dof), system_rhs.block(u_dof)); + + lin_it = 1; + lin_res = 0.0; + } else - Assert (false, ExcMessage("Linear solver type not implemented")); + Assert (false, ExcMessage("Linear solver type not implemented")); timer.leave_subsection(); } - // Now that we have the displacement - // update, distribute the constraints - // back to the Newton update: + // Now that we have the displacement + // update, distribute the constraints + // back to the Newton update: constraints.distribute(newton_update); timer.enter_subsection("Linear solver postprocessing"); std::cout << " PP " << std::flush; - // The next step after solving the displacement - // problem is to post-process to get the - // dilatation solution from the - // substitution: - // $ - // d \widetilde{\mathbf{\mathsf{J}}} - // = \mathbf{\mathsf{K}}_{\widetilde{p}\widetilde{J}}^{-1} \bigl[ - // \mathbf{\mathsf{F}}_{\widetilde{p}} - // - \mathbf{\mathsf{K}}_{\widetilde{p}u} d \mathbf{\mathsf{u}} - // \bigr] - // $ + // The next step after solving the displacement + // problem is to post-process to get the + // dilatation solution from the + // substitution: + // $ + // d \widetilde{\mathbf{\mathsf{J}}} + // = \mathbf{\mathsf{K}}_{\widetilde{p}\widetilde{J}}^{-1} \bigl[ + // \mathbf{\mathsf{F}}_{\widetilde{p}} + // - \mathbf{\mathsf{K}}_{\widetilde{p}u} d \mathbf{\mathsf{u}} + // \bigr] + // $ { - // $ - // \mathbf{\mathsf{A}}_{\widetilde{p}} - // = - // \mathbf{\mathsf{K}}_{\widetilde{p}u} d \mathbf{\mathsf{u}} - // $ + // $ + // \mathbf{\mathsf{A}}_{\widetilde{p}} + // = + // \mathbf{\mathsf{K}}_{\widetilde{p}u} d \mathbf{\mathsf{u}} + // $ tangent_matrix.block(p_dof, u_dof).vmult(A.block(p_dof), - newton_update.block(u_dof)); - // $ - // \mathbf{\mathsf{A}}_{\widetilde{p}} - // = - // -\mathbf{\mathsf{K}}_{\widetilde{p}u} d \mathbf{\mathsf{u}} - // $ + newton_update.block(u_dof)); + // $ + // \mathbf{\mathsf{A}}_{\widetilde{p}} + // = + // -\mathbf{\mathsf{K}}_{\widetilde{p}u} d \mathbf{\mathsf{u}} + // $ A.block(p_dof) *= -1.0; - // $ - // \mathbf{\mathsf{A}}_{\widetilde{p}} - // = - // \mathbf{\mathsf{F}}_{\widetilde{p}} - // -\mathbf{\mathsf{K}}_{\widetilde{p}u} d \mathbf{\mathsf{u}} - // $ + // $ + // \mathbf{\mathsf{A}}_{\widetilde{p}} + // = + // \mathbf{\mathsf{F}}_{\widetilde{p}} + // -\mathbf{\mathsf{K}}_{\widetilde{p}u} d \mathbf{\mathsf{u}} + // $ A.block(p_dof) += system_rhs.block(p_dof); - // $ - // d\mathbf{\mathsf{\widetilde{J}}} - // = - // \mathbf{\mathsf{K}}^{-1}_{\widetilde{p}\widetilde{J}} - // [ - // \mathbf{\mathsf{F}}_{\widetilde{p}} - // -\mathbf{\mathsf{K}}_{\widetilde{p}u} d \mathbf{\mathsf{u}} - // ] - // $ + // $ + // d\mathbf{\mathsf{\widetilde{J}}} + // = + // \mathbf{\mathsf{K}}^{-1}_{\widetilde{p}\widetilde{J}} + // [ + // \mathbf{\mathsf{F}}_{\widetilde{p}} + // -\mathbf{\mathsf{K}}_{\widetilde{p}u} d \mathbf{\mathsf{u}} + // ] + // $ tangent_matrix.block(p_dof, J_dof).vmult(newton_update.block(J_dof), - A.block(p_dof)); + A.block(p_dof)); } // we insure here that any Dirichlet constraints // are distributed on the updated solution: constraints.distribute(newton_update); - // Finally we solve for the pressure - // update with the substitution: - // $ - // d \widetilde{\mathbf{\mathsf{p}}} - // = - // \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}}^{-1} - // \bigl[ - // \mathbf{\mathsf{F}}_{\widetilde{J}} - // - \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} - // d \widetilde{\mathbf{\mathsf{J}}} - // \bigr] - // $ + // Finally we solve for the pressure + // update with the substitution: + // $ + // d \widetilde{\mathbf{\mathsf{p}}} + // = + // \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}}^{-1} + // \bigl[ + // \mathbf{\mathsf{F}}_{\widetilde{J}} + // - \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} + // d \widetilde{\mathbf{\mathsf{J}}} + // \bigr] + // $ { - // $ - // \mathsf{\mathbf{A}}_{\widetilde{J}} - // = - // \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} - // d \widetilde{\mathbf{\mathsf{J}}} - // $ + // $ + // \mathsf{\mathbf{A}}_{\widetilde{J}} + // = + // \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} + // d \widetilde{\mathbf{\mathsf{J}}} + // $ tangent_matrix.block(J_dof, J_dof).vmult(A.block(J_dof), - newton_update.block(J_dof)); - // $ - // \mathsf{\mathbf{A}}_{\widetilde{J}} - // = - // -\mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} - // d \widetilde{\mathbf{\mathsf{J}}} - // $ + newton_update.block(J_dof)); + // $ + // \mathsf{\mathbf{A}}_{\widetilde{J}} + // = + // -\mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} + // d \widetilde{\mathbf{\mathsf{J}}} + // $ A.block(J_dof) *= -1.0; - // $ - // \mathsf{\mathbf{A}}_{\widetilde{J}} - // = - // \mathsf{\mathbf{F}}_{\widetilde{J}} - // - - // \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} - // d \widetilde{\mathbf{\mathsf{J}}} - // $ + // $ + // \mathsf{\mathbf{A}}_{\widetilde{J}} + // = + // \mathsf{\mathbf{F}}_{\widetilde{J}} + // - + // \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} + // d \widetilde{\mathbf{\mathsf{J}}} + // $ A.block(J_dof) += system_rhs.block(J_dof); - // and finally.... - // $ - // d \widetilde{\mathbf{\mathsf{p}}} - // = - // \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}}^{-1} - // \bigl[ - // \mathbf{\mathsf{F}}_{\widetilde{J}} - // - \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} - // d \widetilde{\mathbf{\mathsf{J}}} - // \bigr] - // $ + // and finally.... + // $ + // d \widetilde{\mathbf{\mathsf{p}}} + // = + // \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}}^{-1} + // \bigl[ + // \mathbf{\mathsf{F}}_{\widetilde{J}} + // - \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}} + // d \widetilde{\mathbf{\mathsf{J}}} + // \bigr] + // $ tangent_matrix.block(p_dof, J_dof).Tvmult(newton_update.block(p_dof), - A.block(J_dof)); + A.block(J_dof)); } - // We are now at the end, so we distribute all - // constrained dofs back to the Newton - // update: + // We are now at the end, so we distribute all + // constrained dofs back to the Newton + // update: constraints.distribute(newton_update); timer.leave_subsection(); @@ -3203,17 +3203,17 @@ namespace Step44 std::cout << " ASM_SC " << std::flush; PerTaskData_SC per_task_data(dofs_per_cell, element_indices_u.size(), - element_indices_p.size(), - element_indices_J.size()); + element_indices_p.size(), + element_indices_J.size()); ScratchData_SC scratch_data; WorkStream::run(dof_handler_ref.begin_active(), - dof_handler_ref.end(), - *this, - &Solid::assemble_sc_one_cell, - &Solid::copy_local_to_global_sc, - scratch_data, - per_task_data); + dof_handler_ref.end(), + *this, + &Solid::assemble_sc_one_cell, + &Solid::copy_local_to_global_sc, + scratch_data, + per_task_data); timer.leave_subsection(); } @@ -3224,9 +3224,9 @@ namespace Step44 { for (unsigned int i = 0; i < dofs_per_cell; ++i) for (unsigned int j = 0; j < dofs_per_cell; ++j) - tangent_matrix.add(data.local_dof_indices[i], - data.local_dof_indices[j], - data.cell_matrix(i, j)); + tangent_matrix.add(data.local_dof_indices[i], + data.local_dof_indices[j], + data.cell_matrix(i, j)); } @@ -3236,208 +3236,208 @@ namespace Step44 template void Solid::assemble_sc_one_cell(const typename DoFHandler::active_cell_iterator & cell, - ScratchData_SC & scratch, - PerTaskData_SC & data) + ScratchData_SC & scratch, + PerTaskData_SC & data) { data.reset(); scratch.reset(); cell->get_dof_indices(data.local_dof_indices); - // We now extract the contribution of - // the dofs associated with the current cell - // to the global stiffness matrix. - // The discontinuous nature of the $\widetilde{p}$ - // and $\widetilde{J}$ - // interpolations mean that their is no - // coupling of the local contributions at the - // global level. This is not the case with the u dof. - // In other words, - // $\mathsf{\mathbf{k}}_{\widetilde{J} \widetilde{p}}$, - // $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{p}}$ - // and - // $\mathsf{\mathbf{k}}_{\widetilde{J} \widetilde{p}}$, - // when extracted - // from the global stiffness matrix are the element - // contributions. - // This is not the case for - // $\mathsf{\mathbf{k}}_{\mathbf{u} \mathbf{u}}$ - // - // Note: a lower-case symbol is used to denote - // element stiffness matrices. - - // Currently the matrix corresponding to - // the dof associated with the current element - // (denoted somewhat loosely as $\mathsf{\mathbf{k}}$) - // is of the form: - // @f{align*} - // \begin{bmatrix} - // \mathbf{\mathsf{k}}_{uu} & \mathbf{\mathsf{k}}_{u\widetilde{p}} & \mathbf{0} \\ - // \mathbf{\mathsf{k}}_{\widetilde{p}u} & \mathbf{0} & \mathbf{\mathsf{k}}_{\widetilde{p}\widetilde{J}} \\ - // \mathbf{0} & \mathbf{\mathsf{k}}_{\widetilde{J}\widetilde{p}} & \mathbf{\mathsf{k}}_{\widetilde{J}\widetilde{J}} - // \end{bmatrix} - // @f} - // - // We now need to modify it such that it appear as - // @f{align*} - // \begin{bmatrix} - // \mathbf{\mathsf{k}}_{\textrm{con}} & \mathbf{\mathsf{k}}_{u\widetilde{p}} & \mathbf{0} \\ - // \mathbf{\mathsf{k}}_{\widetilde{p}u} & \mathbf{0} & \mathbf{\mathsf{k}}_{\widetilde{p}\widetilde{J}}^{-1} \\ - // \mathbf{0} & \mathbf{\mathsf{k}}_{\widetilde{J}\widetilde{p}} & \mathbf{\mathsf{k}}_{\widetilde{J}\widetilde{J}} - // \end{bmatrix} - // @f} - // with $\mathbf{\mathsf{k}}_{\textrm{con}} = \bigl[ \mathbf{\mathsf{k}}_{uu} +\overline{\overline{\mathbf{\mathsf{k}}}}~ \bigr]$ - // where - // $ \overline{\overline{\mathbf{\mathsf{k}}}} := - // \mathbf{\mathsf{k}}_{u\widetilde{p}} \overline{\mathbf{\mathsf{k}}} \mathbf{\mathsf{k}}_{\widetilde{p}u} - // $ - // and - // $ - // \overline{\mathbf{\mathsf{k}}} = - // \mathbf{\mathsf{k}}_{\widetilde{J}\widetilde{p}}^{-1} \mathbf{\mathsf{k}}_{\widetilde{J}\widetilde{J}} - // \mathbf{\mathsf{k}}_{\widetilde{p}\widetilde{J}}^{-1} - // $. - // - // At this point, we need to take note of - // the fact that global data already exists - // in the $\mathsf{\mathbf{K}}_{uu}$, - // $\mathsf{\mathbf{K}}_{\widetilde{p} \widetilde{J}}$ - // and - // $\mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{p}}$ - // sub-blocks. So - // if we are to modify them, we must - // account for the data that is already - // there (i.e. simply add to it or remove - // it if necessary). Since the - // copy_local_to_global operation is a "+=" - // operation, we need to take this into - // account - // - // For the $\mathsf{\mathbf{K}}_{uu}$ block in particular, this - // means that contributions have been added - // from the surrounding cells, so we need - // to be careful when we manipulate this - // block. We can't just erase the - // sub-blocks. - // - // This is the strategy we will employ to - // get the sub-blocks we want: - // - // - $ {\mathbf{\mathsf{k}}}_{\textrm{store}}$: - // Since we don't have access to $\mathsf{\mathbf{k}}_{uu}$, - // but we know its contribution is added to - // the global $\mathsf{\mathbf{K}}_{uu}$ matrix, we just want - // to add the element wise - // static-condensation $\overline{\overline{\mathbf{\mathsf{k}}}}$. - // - // - $\mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}}$: - // Similarly, $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{J}}$ exists in - // the subblock. Since the copy - // operation is a += operation, we - // need to subtract the existing - // $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{J}}$ - // submatrix in addition to - // "adding" that which we wish to - // replace it with. - // - // - $\mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{p}}$: - // Since the global matrix - // is symmetric, this block is the - // same as the one above and we - // can simply use - // $\mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}}$ - // as a substitute for this one. - // - // We first extract element data from the - // system matrix. So first we get the - // entire subblock for the cell, then - // extract $\mathsf{\mathbf{k}}$ - // for the dofs associated with - // the current element + // We now extract the contribution of + // the dofs associated with the current cell + // to the global stiffness matrix. + // The discontinuous nature of the $\widetilde{p}$ + // and $\widetilde{J}$ + // interpolations mean that their is no + // coupling of the local contributions at the + // global level. This is not the case with the u dof. + // In other words, + // $\mathsf{\mathbf{k}}_{\widetilde{J} \widetilde{p}}$, + // $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{p}}$ + // and + // $\mathsf{\mathbf{k}}_{\widetilde{J} \widetilde{p}}$, + // when extracted + // from the global stiffness matrix are the element + // contributions. + // This is not the case for + // $\mathsf{\mathbf{k}}_{\mathbf{u} \mathbf{u}}$ + // + // Note: a lower-case symbol is used to denote + // element stiffness matrices. + + // Currently the matrix corresponding to + // the dof associated with the current element + // (denoted somewhat loosely as $\mathsf{\mathbf{k}}$) + // is of the form: + // @f{align*} + // \begin{bmatrix} + // \mathbf{\mathsf{k}}_{uu} & \mathbf{\mathsf{k}}_{u\widetilde{p}} & \mathbf{0} \\ + // \mathbf{\mathsf{k}}_{\widetilde{p}u} & \mathbf{0} & \mathbf{\mathsf{k}}_{\widetilde{p}\widetilde{J}} \\ + // \mathbf{0} & \mathbf{\mathsf{k}}_{\widetilde{J}\widetilde{p}} & \mathbf{\mathsf{k}}_{\widetilde{J}\widetilde{J}} + // \end{bmatrix} + // @f} + // + // We now need to modify it such that it appear as + // @f{align*} + // \begin{bmatrix} + // \mathbf{\mathsf{k}}_{\textrm{con}} & \mathbf{\mathsf{k}}_{u\widetilde{p}} & \mathbf{0} \\ + // \mathbf{\mathsf{k}}_{\widetilde{p}u} & \mathbf{0} & \mathbf{\mathsf{k}}_{\widetilde{p}\widetilde{J}}^{-1} \\ + // \mathbf{0} & \mathbf{\mathsf{k}}_{\widetilde{J}\widetilde{p}} & \mathbf{\mathsf{k}}_{\widetilde{J}\widetilde{J}} + // \end{bmatrix} + // @f} + // with $\mathbf{\mathsf{k}}_{\textrm{con}} = \bigl[ \mathbf{\mathsf{k}}_{uu} +\overline{\overline{\mathbf{\mathsf{k}}}}~ \bigr]$ + // where + // $ \overline{\overline{\mathbf{\mathsf{k}}}} := + // \mathbf{\mathsf{k}}_{u\widetilde{p}} \overline{\mathbf{\mathsf{k}}} \mathbf{\mathsf{k}}_{\widetilde{p}u} + // $ + // and + // $ + // \overline{\mathbf{\mathsf{k}}} = + // \mathbf{\mathsf{k}}_{\widetilde{J}\widetilde{p}}^{-1} \mathbf{\mathsf{k}}_{\widetilde{J}\widetilde{J}} + // \mathbf{\mathsf{k}}_{\widetilde{p}\widetilde{J}}^{-1} + // $. + // + // At this point, we need to take note of + // the fact that global data already exists + // in the $\mathsf{\mathbf{K}}_{uu}$, + // $\mathsf{\mathbf{K}}_{\widetilde{p} \widetilde{J}}$ + // and + // $\mathsf{\mathbf{K}}_{\widetilde{J} \widetilde{p}}$ + // sub-blocks. So + // if we are to modify them, we must + // account for the data that is already + // there (i.e. simply add to it or remove + // it if necessary). Since the + // copy_local_to_global operation is a "+=" + // operation, we need to take this into + // account + // + // For the $\mathsf{\mathbf{K}}_{uu}$ block in particular, this + // means that contributions have been added + // from the surrounding cells, so we need + // to be careful when we manipulate this + // block. We can't just erase the + // sub-blocks. + // + // This is the strategy we will employ to + // get the sub-blocks we want: + // + // - $ {\mathbf{\mathsf{k}}}_{\textrm{store}}$: + // Since we don't have access to $\mathsf{\mathbf{k}}_{uu}$, + // but we know its contribution is added to + // the global $\mathsf{\mathbf{K}}_{uu}$ matrix, we just want + // to add the element wise + // static-condensation $\overline{\overline{\mathbf{\mathsf{k}}}}$. + // + // - $\mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}}$: + // Similarly, $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{J}}$ exists in + // the subblock. Since the copy + // operation is a += operation, we + // need to subtract the existing + // $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{J}}$ + // submatrix in addition to + // "adding" that which we wish to + // replace it with. + // + // - $\mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{p}}$: + // Since the global matrix + // is symmetric, this block is the + // same as the one above and we + // can simply use + // $\mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}}$ + // as a substitute for this one. + // + // We first extract element data from the + // system matrix. So first we get the + // entire subblock for the cell, then + // extract $\mathsf{\mathbf{k}}$ + // for the dofs associated with + // the current element AdditionalTools::extract_submatrix(data.local_dof_indices, - data.local_dof_indices, - tangent_matrix, - data.k_orig); - // and next the local matrices for - // $\mathsf{\mathbf{k}}_{ \widetilde{p} \mathbf{u}}$ - // $\mathsf{\mathbf{k}}_{ \widetilde{p} \widetilde{J}}$ - // and - // $\mathsf{\mathbf{k}}_{ \widetilde{J} \widetilde{J}}$: + data.local_dof_indices, + tangent_matrix, + data.k_orig); + // and next the local matrices for + // $\mathsf{\mathbf{k}}_{ \widetilde{p} \mathbf{u}}$ + // $\mathsf{\mathbf{k}}_{ \widetilde{p} \widetilde{J}}$ + // and + // $\mathsf{\mathbf{k}}_{ \widetilde{J} \widetilde{J}}$: AdditionalTools::extract_submatrix(element_indices_p, - element_indices_u, - data.k_orig, - data.k_pu); + element_indices_u, + data.k_orig, + data.k_pu); AdditionalTools::extract_submatrix(element_indices_p, - element_indices_J, - data.k_orig, - data.k_pJ); + element_indices_J, + data.k_orig, + data.k_pJ); AdditionalTools::extract_submatrix(element_indices_J, - element_indices_J, - data.k_orig, - data.k_JJ); - - // To get the inverse of - // $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{J}}$, - // we invert it - // directly. This operation is relatively - // inexpensive since $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{J}}$ - // since block-diagonal. + element_indices_J, + data.k_orig, + data.k_JJ); + + // To get the inverse of + // $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{J}}$, + // we invert it + // directly. This operation is relatively + // inexpensive since $\mathsf{\mathbf{k}}_{\widetilde{p} \widetilde{J}}$ + // since block-diagonal. data.k_pJ_inv.invert(data.k_pJ); - // Now we can make condensation terms to - // add to the $\mathsf{\mathbf{k}}_{\mathbf{u} \mathbf{u}}$ - // block and put them in - // the cell local matrix - // $ - // \mathsf{\mathbf{A}} - // = - // \mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}} - // \mathsf{\mathbf{k}}_{\widetilde{p} \mathbf{u}} - // $: - data.k_pJ_inv.mmult(data.A, data.k_pu); - // $ - // \mathsf{\mathbf{B}} - // = - // \mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{J}} - // \mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}} - // \mathsf{\mathbf{k}}_{\widetilde{p} \mathbf{u}} - // $ + // Now we can make condensation terms to + // add to the $\mathsf{\mathbf{k}}_{\mathbf{u} \mathbf{u}}$ + // block and put them in + // the cell local matrix + // $ + // \mathsf{\mathbf{A}} + // = + // \mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}} + // \mathsf{\mathbf{k}}_{\widetilde{p} \mathbf{u}} + // $: + data.k_pJ_inv.mmult(data.A, data.k_pu); + // $ + // \mathsf{\mathbf{B}} + // = + // \mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{J}} + // \mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}} + // \mathsf{\mathbf{k}}_{\widetilde{p} \mathbf{u}} + // $ data.k_JJ.mmult(data.B, data.A); - // $ - // \mathsf{\mathbf{C}} - // = - // \mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{p}} - // \mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{J}} - // \mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}} - // \mathsf{\mathbf{k}}_{\widetilde{p} \mathbf{u}} - // $ + // $ + // \mathsf{\mathbf{C}} + // = + // \mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{p}} + // \mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{J}} + // \mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}} + // \mathsf{\mathbf{k}}_{\widetilde{p} \mathbf{u}} + // $ data.k_pJ_inv.Tmmult(data.C, data.B); - // $ - // \overline{\overline{\mathsf{\mathbf{k}}}} - // = - // \mathsf{\mathbf{k}}_{\mathbf{u} \widetilde{p}} - // \mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{p}} - // \mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{J}} - // \mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}} - // \mathsf{\mathbf{k}}_{\widetilde{p} \mathbf{u}} - // $ + // $ + // \overline{\overline{\mathsf{\mathbf{k}}}} + // = + // \mathsf{\mathbf{k}}_{\mathbf{u} \widetilde{p}} + // \mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{p}} + // \mathsf{\mathbf{k}}^{-1}_{\widetilde{J} \widetilde{J}} + // \mathsf{\mathbf{k}}^{-1}_{\widetilde{p} \widetilde{J}} + // \mathsf{\mathbf{k}}_{\widetilde{p} \mathbf{u}} + // $ data.k_pu.Tmmult(data.k_bbar, data.C); AdditionalTools::replace_submatrix(element_indices_u, - element_indices_u, - data.k_bbar, - data.cell_matrix); - - // Next we place - // $\mathsf{\mathbf{k}}^{-1}_{ \widetilde{p} \widetilde{J}}$ - // in the - // $\mathsf{\mathbf{k}}_{ \widetilde{p} \widetilde{J}}$ - // block for post-processing. Note again - // that we need to remove the - // contribution that already exists there. + element_indices_u, + data.k_bbar, + data.cell_matrix); + + // Next we place + // $\mathsf{\mathbf{k}}^{-1}_{ \widetilde{p} \widetilde{J}}$ + // in the + // $\mathsf{\mathbf{k}}_{ \widetilde{p} \widetilde{J}}$ + // block for post-processing. Note again + // that we need to remove the + // contribution that already exists there. data.k_pJ_inv.add(-1.0, data.k_pJ); AdditionalTools::replace_submatrix(element_indices_p, - element_indices_J, - data.k_pJ_inv, - data.cell_matrix); + element_indices_J, + data.k_pJ_inv, + data.cell_matrix); } // @sect4{Solid::output_results} @@ -3450,7 +3450,7 @@ namespace Step44 DataOut data_out; std::vector data_component_interpretation(dim, - DataComponentInterpretation::component_is_part_of_vector); + DataComponentInterpretation::component_is_part_of_vector); data_component_interpretation.push_back(DataComponentInterpretation::component_is_scalar); data_component_interpretation.push_back(DataComponentInterpretation::component_is_scalar); @@ -3460,26 +3460,26 @@ namespace Step44 data_out.attach_dof_handler(dof_handler_ref); data_out.add_data_vector(solution_n, - solution_name, - DataOut::type_dof_data, - data_component_interpretation); - - // Since we are dealing with a large - // deformation problem, it would be nice - // to display the result on a displaced - // grid! The MappingQEulerian class - // linked with the DataOut class provides - // an interface through which this can be - // achieved without physically moving the - // grid points in the Triangulation - // object ourselves. We first need to - // copy the solution to a temporary - // vector and then create the Eulerian - // mapping. We also specify the - // polynomial degree to the DataOut - // object in order to produce a more - // refined output data set when higher - // order polynomials are used. + solution_name, + DataOut::type_dof_data, + data_component_interpretation); + + // Since we are dealing with a large + // deformation problem, it would be nice + // to display the result on a displaced + // grid! The MappingQEulerian class + // linked with the DataOut class provides + // an interface through which this can be + // achieved without physically moving the + // grid points in the Triangulation + // object ourselves. We first need to + // copy the solution to a temporary + // vector and then create the Eulerian + // mapping. We also specify the + // polynomial degree to the DataOut + // object in order to produce a more + // refined output data set when higher + // order polynomials are used. Vector soln(solution_n.size()); for (unsigned int i = 0; i < soln.size(); ++i) soln(i) = solution_n(i); @@ -3516,24 +3516,24 @@ int main (int argc, char *argv[]) catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl << exc.what() - << std::endl << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << std::endl << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl << "Aborting!" - << std::endl - << "----------------------------------------------------" - << std::endl; + << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-45/step-45.cc b/deal.II/examples/step-45/step-45.cc index c39e62abf8..9a01ff0c5b 100644 --- a/deal.II/examples/step-45/step-45.cc +++ b/deal.II/examples/step-45/step-45.cc @@ -12,10 +12,10 @@ // @sect3{Include files} - // The include files are already known. The - // one critical for the current program is - // the one that contains the ConstraintMatrix - // in the lac/ directory: + // The include files are already known. The + // one critical for the current program is + // the one that contains the ConstraintMatrix + // in the lac/ directory: #include #include @@ -47,22 +47,22 @@ namespace Step45 { using namespace dealii; - // @sect3{The LaplaceProblem class} - - // The class LaplaceProblem is - // the main class of this problem. As - // mentioned in the introduction, it is - // fashioned after the corresponding class in - // step-3. Correspondingly, the documentation - // from that tutorial program applies here as - // well. The only new member variable is the - // constraints variables that - // will hold the constraints from the - // periodic boundary condition. We will - // initialize it in the - // make_periodicity_constraints() - // function which we call from - // make_grid_and_dofs(). + // @sect3{The LaplaceProblem class} + + // The class LaplaceProblem is + // the main class of this problem. As + // mentioned in the introduction, it is + // fashioned after the corresponding class in + // step-3. Correspondingly, the documentation + // from that tutorial program applies here as + // well. The only new member variable is the + // constraints variables that + // will hold the constraints from the + // periodic boundary condition. We will + // initialize it in the + // make_periodicity_constraints() + // function which we call from + // make_grid_and_dofs(). class LaplaceProblem { public: @@ -90,71 +90,71 @@ namespace Step45 }; - // @sect3{The RightHandSide class} + // @sect3{The RightHandSide class} - // The following implements the right hand - // side function discussed in the - // introduction. Its implementation is - // obvious given what has been shown in - // step-4 before: + // The following implements the right hand + // side function discussed in the + // introduction. Its implementation is + // obvious given what has been shown in + // step-4 before: class RightHandSide: public Function<2> { public: RightHandSide (); virtual double value (const Point<2>& p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; RightHandSide::RightHandSide () - : - Function<2> () + : + Function<2> () {} double RightHandSide::value (const Point<2>&p, - const unsigned int) const + const unsigned int) const { return (std::cos (2 * numbers::PI * p(0)) * - std::exp (- 2 * p(0)) * - std::cos (2 * numbers::PI * p(1)) * - std::exp (- 2 * p(1))); + std::exp (- 2 * p(0)) * + std::cos (2 * numbers::PI * p(1)) * + std::exp (- 2 * p(1))); } - // @sect3{Implementation of the LaplaceProblem class} + // @sect3{Implementation of the LaplaceProblem class} - // The first part of implementing the main - // class is the constructor. It is unchanged - // from step-3 and step-4: + // The first part of implementing the main + // class is the constructor. It is unchanged + // from step-3 and step-4: LaplaceProblem::LaplaceProblem () - : - fe (1), - dof_handler (triangulation) + : + fe (1), + dof_handler (triangulation) {} - // @sect4{LaplaceProblem::make_grid_and_dofs} - - // The following is the first function to be - // called in run(). It sets up - // the mesh and degrees of freedom. - // - // We start by creating the usual square mesh - // and changing the boundary indicator on the - // parts of the boundary where we have - // Dirichlet boundary conditions (top and - // bottom, i.e. faces two and three of the - // reference cell as defined by - // GeometryInfo), so that we can distinguish - // between the parts of the boundary where - // periodic and where Dirichlet boundary - // conditions hold. We then refine the mesh a - // fixed number of times, with child faces - // inheriting the boundary indicators - // previously set on the coarse mesh from - // their parents. + // @sect4{LaplaceProblem::make_grid_and_dofs} + + // The following is the first function to be + // called in run(). It sets up + // the mesh and degrees of freedom. + // + // We start by creating the usual square mesh + // and changing the boundary indicator on the + // parts of the boundary where we have + // Dirichlet boundary conditions (top and + // bottom, i.e. faces two and three of the + // reference cell as defined by + // GeometryInfo), so that we can distinguish + // between the parts of the boundary where + // periodic and where Dirichlet boundary + // conditions hold. We then refine the mesh a + // fixed number of times, with child faces + // inheriting the boundary indicators + // previously set on the coarse mesh from + // their parents. void LaplaceProblem::make_grid_and_dofs () { GridGenerator::hyper_cube (triangulation); @@ -162,47 +162,47 @@ namespace Step45 triangulation.begin_active ()->face (3)->set_boundary_indicator (1); triangulation.refine_global (5); - // The next step is to distribute the - // degrees of freedom and produce a little - // bit of graphical output: + // The next step is to distribute the + // degrees of freedom and produce a little + // bit of graphical output: dof_handler.distribute_dofs (fe); std::cout << "Number of active cells: " - << triangulation.n_active_cells () - << std::endl - << "Degrees of freedom: " << dof_handler.n_dofs () - << std::endl; - - // Now it is the time for the constraints - // that come from the periodicity - // constraints. We do this in the - // following, separate function, after - // clearing any possible prior content from - // the constraints object: + << triangulation.n_active_cells () + << std::endl + << "Degrees of freedom: " << dof_handler.n_dofs () + << std::endl; + + // Now it is the time for the constraints + // that come from the periodicity + // constraints. We do this in the + // following, separate function, after + // clearing any possible prior content from + // the constraints object: constraints.clear (); make_periodicity_constraints (); - // We also incorporate the homogeneous - // Dirichlet boundary conditions on the - // upper and lower parts of the boundary - // (i.e. the ones with boundary indicator - // 1) and close the - // ConstraintMatrix object: + // We also incorporate the homogeneous + // Dirichlet boundary conditions on the + // upper and lower parts of the boundary + // (i.e. the ones with boundary indicator + // 1) and close the + // ConstraintMatrix object: VectorTools::interpolate_boundary_values (dof_handler, 1, - ZeroFunction<2> (), - constraints); + ZeroFunction<2> (), + constraints); constraints.close (); - // Then we create the sparsity pattern and - // the system matrix and initialize the - // solution and right-hand side - // vectors. This is again as in step-3 or - // step-6, for example: + // Then we create the sparsity pattern and + // the system matrix and initialize the + // solution and right-hand side + // vectors. This is again as in step-3 or + // step-6, for example: CompressedSparsityPattern c_sparsity_pattern (dof_handler.n_dofs(), - dof_handler.n_dofs()); + dof_handler.n_dofs()); DoFTools::make_sparsity_pattern (dof_handler, - c_sparsity_pattern, - constraints, - false); + c_sparsity_pattern, + constraints, + false); c_sparsity_pattern.compress (); sparsity_pattern.copy_from (c_sparsity_pattern); @@ -213,179 +213,179 @@ namespace Step45 - // @sect4{LaplaceProblem::make_periodicity_constraints} - - // This is the function that provides the new - // material of this tutorial program. The - // general outline of the algorithm is as - // follows: we first loop over all the - // degrees of freedom on the right boundary - // and record their $y$-locations in a map - // together with their global indices. Then - // we go along the left boundary, find - // matching $y$-locations for each degree of - // freedom, and then add constraints that - // identify these matched degrees of freedom. - // - // In this function, we make use of the fact - // that we have a scalar element (i.e. the - // only valid vector component that can be - // passed to DoFAccessor::vertex_dof_index is - // zero) and that we have a $Q_1$ element for - // which all degrees of freedom live in the - // vertices of the cell. Furthermore, we have - // assumed that we are in 2d and that meshes - // were not refined adaptively — the - // latter assumption would imply that there - // may be vertices that aren't matched - // one-to-one and for which we won't be able - // to compute constraints this easily. We - // will discuss in the "outlook" part of the - // results section below other strategies to - // write the current function that can work - // in cases like this as well. + // @sect4{LaplaceProblem::make_periodicity_constraints} + + // This is the function that provides the new + // material of this tutorial program. The + // general outline of the algorithm is as + // follows: we first loop over all the + // degrees of freedom on the right boundary + // and record their $y$-locations in a map + // together with their global indices. Then + // we go along the left boundary, find + // matching $y$-locations for each degree of + // freedom, and then add constraints that + // identify these matched degrees of freedom. + // + // In this function, we make use of the fact + // that we have a scalar element (i.e. the + // only valid vector component that can be + // passed to DoFAccessor::vertex_dof_index is + // zero) and that we have a $Q_1$ element for + // which all degrees of freedom live in the + // vertices of the cell. Furthermore, we have + // assumed that we are in 2d and that meshes + // were not refined adaptively — the + // latter assumption would imply that there + // may be vertices that aren't matched + // one-to-one and for which we won't be able + // to compute constraints this easily. We + // will discuss in the "outlook" part of the + // results section below other strategies to + // write the current function that can work + // in cases like this as well. void LaplaceProblem::make_periodicity_constraints () { - // To start with the actual implementation, - // we loop over all active cells and check - // whether the cell is located at the right - // boundary (i.e. face 1 — the one at - // the right end of the cell — is at - // the boundary). If that is so, then we - // use that for the currently used finite - // element, each degree of freedom of the - // face is located on one vertex, and store - // their $y$-coordinate along with the - // global number of this degree of freedom - // in the following map: + // To start with the actual implementation, + // we loop over all active cells and check + // whether the cell is located at the right + // boundary (i.e. face 1 — the one at + // the right end of the cell — is at + // the boundary). If that is so, then we + // use that for the currently used finite + // element, each degree of freedom of the + // face is located on one vertex, and store + // their $y$-coordinate along with the + // global number of this degree of freedom + // in the following map: std::map dof_locations; for (DoFHandler<2>::active_cell_iterator cell = dof_handler.begin_active (); - cell != dof_handler.end (); ++cell) + cell != dof_handler.end (); ++cell) if (cell->at_boundary () - && - cell->face(1)->at_boundary ()) - { - dof_locations[cell->face(1)->vertex_dof_index(0, 0)] - = cell->face(1)->vertex(0)[1]; - dof_locations[cell->face(1)->vertex_dof_index(1, 0)] - = cell->face(1)->vertex(1)[1]; - } - // Note that in the above block, we add - // vertices zero and one of the affected - // face to the map. This means that we will - // add each vertex twice, once from each of - // the two adjacent cells (unless the - // vertex is a corner of the domain). Since - // the coordinates of the vertex are the - // same both times of course, there is no - // harm: we replace one value in the map - // with itself the second time we visit an - // entry. - // - // The same will be true below where we add - // the same constraint twice to the - // ConstraintMatrix — again, we will - // overwrite the constraint with itself, - // and no harm is done. - - // Now we have to find the corresponding - // degrees of freedom on the left part of - // the boundary. Therefore we loop over all - // cells again and choose the ones where - // face 0 is at the boundary: + && + cell->face(1)->at_boundary ()) + { + dof_locations[cell->face(1)->vertex_dof_index(0, 0)] + = cell->face(1)->vertex(0)[1]; + dof_locations[cell->face(1)->vertex_dof_index(1, 0)] + = cell->face(1)->vertex(1)[1]; + } + // Note that in the above block, we add + // vertices zero and one of the affected + // face to the map. This means that we will + // add each vertex twice, once from each of + // the two adjacent cells (unless the + // vertex is a corner of the domain). Since + // the coordinates of the vertex are the + // same both times of course, there is no + // harm: we replace one value in the map + // with itself the second time we visit an + // entry. + // + // The same will be true below where we add + // the same constraint twice to the + // ConstraintMatrix — again, we will + // overwrite the constraint with itself, + // and no harm is done. + + // Now we have to find the corresponding + // degrees of freedom on the left part of + // the boundary. Therefore we loop over all + // cells again and choose the ones where + // face 0 is at the boundary: for (DoFHandler<2>::active_cell_iterator cell = dof_handler.begin_active (); - cell != dof_handler.end (); ++cell) + cell != dof_handler.end (); ++cell) if (cell->at_boundary () - && - cell->face (0)->at_boundary ()) - { - // Every degree of freedom on this - // face needs to have a corresponding - // one on the right side of the face, - // and our goal is to add a - // constraint for the one on the left - // in terms of the one on the - // right. To this end we first add a - // new line to the constraint matrix - // for this one degree of - // freedom. Then we identify it with - // the corresponding degree of - // freedom on the right part of the - // boundary by constraining the - // degree of freedom on the left with - // the one on the right times a - // weight of 1.0. - // - // Consequently, we loop over the two - // vertices of each face we find and - // then loop over all the - // $y$-locations we've previously - // recorded to find which degree of - // freedom on the right boundary - // corresponds to the one we - // currently look at. Note that we - // have entered these into a map, and - // when looping over the iterators - // p of this map, - // p-@>first corresponds - // to the "key" of an entry (the - // global number of the degree of - // freedom), whereas - // p-@>second is the - // "value" (the $y$-location we have - // entered above). - // - // We are quite sure here that we - // should be finding such a - // corresponding degree of - // freedom. However, sometimes stuff - // happens and so the bottom of the - // block contains an assertion that - // our assumption was indeed correct - // and that a vertex was found. - for (unsigned int face_vertex = 0; face_vertex<2; ++face_vertex) - { - constraints.add_line (cell->face(0)->vertex_dof_index (face_vertex, 0)); - - std::map::const_iterator p = dof_locations.begin(); - for (; p != dof_locations.end(); ++p) - if (std::fabs(p->second - cell->face(0)->vertex(face_vertex)[1]) < 1e-8) - { - constraints.add_entry (cell->face(0)->vertex_dof_index (face_vertex, 0), - p->first, 1.0); - break; - } - Assert (p != dof_locations.end(), - ExcMessage ("No corresponding degree of freedom was found!")); - } - } + && + cell->face (0)->at_boundary ()) + { + // Every degree of freedom on this + // face needs to have a corresponding + // one on the right side of the face, + // and our goal is to add a + // constraint for the one on the left + // in terms of the one on the + // right. To this end we first add a + // new line to the constraint matrix + // for this one degree of + // freedom. Then we identify it with + // the corresponding degree of + // freedom on the right part of the + // boundary by constraining the + // degree of freedom on the left with + // the one on the right times a + // weight of 1.0. + // + // Consequently, we loop over the two + // vertices of each face we find and + // then loop over all the + // $y$-locations we've previously + // recorded to find which degree of + // freedom on the right boundary + // corresponds to the one we + // currently look at. Note that we + // have entered these into a map, and + // when looping over the iterators + // p of this map, + // p-@>first corresponds + // to the "key" of an entry (the + // global number of the degree of + // freedom), whereas + // p-@>second is the + // "value" (the $y$-location we have + // entered above). + // + // We are quite sure here that we + // should be finding such a + // corresponding degree of + // freedom. However, sometimes stuff + // happens and so the bottom of the + // block contains an assertion that + // our assumption was indeed correct + // and that a vertex was found. + for (unsigned int face_vertex = 0; face_vertex<2; ++face_vertex) + { + constraints.add_line (cell->face(0)->vertex_dof_index (face_vertex, 0)); + + std::map::const_iterator p = dof_locations.begin(); + for (; p != dof_locations.end(); ++p) + if (std::fabs(p->second - cell->face(0)->vertex(face_vertex)[1]) < 1e-8) + { + constraints.add_entry (cell->face(0)->vertex_dof_index (face_vertex, 0), + p->first, 1.0); + break; + } + Assert (p != dof_locations.end(), + ExcMessage ("No corresponding degree of freedom was found!")); + } + } } - // @sect4{LaplaceProblem::assemble_system} - - // Assembling the system matrix and the - // right-hand side vector is done as in other - // tutorials before. - // - // The only difference here is that we don't - // copy elements from local contributions - // into the global matrix and later fix up - // constrained degrees of freedom, but that - // we let the ConstraintMatrix do this job in - // one swoop for us using the - // ConstraintMatrix::distribute_local_to_global - // function(). This was previously already - // demonstrated in step-16, step-22, for - // example, along with a discussion in the - // introduction of step-27. + // @sect4{LaplaceProblem::assemble_system} + + // Assembling the system matrix and the + // right-hand side vector is done as in other + // tutorials before. + // + // The only difference here is that we don't + // copy elements from local contributions + // into the global matrix and later fix up + // constrained degrees of freedom, but that + // we let the ConstraintMatrix do this job in + // one swoop for us using the + // ConstraintMatrix::distribute_local_to_global + // function(). This was previously already + // demonstrated in step-16, step-22, for + // example, along with a discussion in the + // introduction of step-27. void LaplaceProblem::assemble_system () { QGauss<2> quadrature_formula(2); FEValues<2> fe_values (fe, quadrature_formula, - update_values | update_gradients | + update_values | update_gradients | update_quadrature_points | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; @@ -399,44 +399,44 @@ namespace Step45 const RightHandSide right_hand_side; DoFHandler<2>::active_cell_iterator cell = dof_handler.begin_active(), - endc = dof_handler.end(); + endc = dof_handler.end(); for (; cell!=endc; ++cell) { - fe_values.reinit (cell); - cell_matrix = 0; - cell_rhs = 0; - - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - constraints.distribute_local_to_global (cell_matrix, cell_rhs, - local_dof_indices, - system_matrix, system_rhs); + fe_values.reinit (cell); + cell_matrix = 0; + cell_rhs = 0; + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + constraints.distribute_local_to_global (cell_matrix, cell_rhs, + local_dof_indices, + system_matrix, system_rhs); } } - // @sect4{LaplaceProblem::solve} + // @sect4{LaplaceProblem::solve} - // To solve the linear system of equations - // $Au=b$ we use the CG solver with an - // SSOR-preconditioner. This is, again, - // copied almost verbatim from step-6. As in - // step-6, we need to make sure that - // constrained degrees of freedom get their - // correct values after solving by calling - // the ConstraintMatrix::distribute function: + // To solve the linear system of equations + // $Au=b$ we use the CG solver with an + // SSOR-preconditioner. This is, again, + // copied almost verbatim from step-6. As in + // step-6, we need to make sure that + // constrained degrees of freedom get their + // correct values after solving by calling + // the ConstraintMatrix::distribute function: void LaplaceProblem::solve () { SolverControl solver_control (dof_handler.n_dofs (), 1e-12); @@ -451,11 +451,11 @@ namespace Step45 } - // @sect4{LaplaceProblem::output_results} + // @sect4{LaplaceProblem::output_results} - // This is another function copied from - // previous tutorial programs. It generates - // graphical output in VTK format: + // This is another function copied from + // previous tutorial programs. It generates + // graphical output in VTK format: void LaplaceProblem::output_results () { DataOut<2> data_out; @@ -471,10 +471,10 @@ namespace Step45 - // @sect4{LaplaceProblem::run} + // @sect4{LaplaceProblem::run} - // And another function copied from previous - // programs: + // And another function copied from previous + // programs: void LaplaceProblem::run () { make_grid_and_dofs(); @@ -486,8 +486,8 @@ namespace Step45 // @sect3{The main function} - // And at the end we have the main function - // as usual, this time copied from step-6: + // And at the end we have the main function + // as usual, this time copied from step-6: int main () { try @@ -503,25 +503,25 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-46/step-46.cc b/deal.II/examples/step-46/step-46.cc index 24aba9d3c0..f6cf493adf 100644 --- a/deal.II/examples/step-46/step-46.cc +++ b/deal.II/examples/step-46/step-46.cc @@ -12,12 +12,12 @@ // @sect3{Include files} - // The include files for this program are the - // same as for many others before. The only - // new one is the one that declares - // FE_Nothing as discussed in the - // introduction. The ones in the hp directory - // have already been discussed in step-27. + // The include files for this program are the + // same as for many others before. The only + // new one is the one that declares + // FE_Nothing as discussed in the + // introduction. The ones in the hp directory + // have already been discussed in step-27. #include #include @@ -60,53 +60,53 @@ namespace Step46 { using namespace dealii; - // @sect3{The FluidStructureProblem class template} - - // This is the main class. It is, if you - // want, a combination of step-8 and step-22 - // in that it has member variables that - // either address the global problem (the - // Triangulation and hp::DoFHandler objects, - // as well as the hp::FECollection and - // various linear algebra objects) or that - // pertain to either the elasticity or Stokes - // sub-problems. The general structure of the - // class, however, is like that of most of - // the other programs implementing stationary - // problems. - // - // There are a few helper functions - // (cell_is_in_fluid_domain, - // cell_is_in_solid_domain) of - // self-explanatory nature (operating on the - // symbolic names for the two subdomains that - // will be used as material_ids for cells - // belonging to the subdomains, as explained - // in the introduction) and a few functions - // (make_grid, set_active_fe_indices, - // assemble_interface_terms) that have - // been broken out of other functions that - // can be found in many of the other tutorial - // programs and that will be discussed as we - // get to their implementation. - // - // The final set of variables - // (viscosity, lambda, eta) - // describes the material properties used for - // the two physics models. + // @sect3{The FluidStructureProblem class template} + + // This is the main class. It is, if you + // want, a combination of step-8 and step-22 + // in that it has member variables that + // either address the global problem (the + // Triangulation and hp::DoFHandler objects, + // as well as the hp::FECollection and + // various linear algebra objects) or that + // pertain to either the elasticity or Stokes + // sub-problems. The general structure of the + // class, however, is like that of most of + // the other programs implementing stationary + // problems. + // + // There are a few helper functions + // (cell_is_in_fluid_domain, + // cell_is_in_solid_domain) of + // self-explanatory nature (operating on the + // symbolic names for the two subdomains that + // will be used as material_ids for cells + // belonging to the subdomains, as explained + // in the introduction) and a few functions + // (make_grid, set_active_fe_indices, + // assemble_interface_terms) that have + // been broken out of other functions that + // can be found in many of the other tutorial + // programs and that will be discussed as we + // get to their implementation. + // + // The final set of variables + // (viscosity, lambda, eta) + // describes the material properties used for + // the two physics models. template class FluidStructureProblem { public: FluidStructureProblem (const unsigned int stokes_degree, - const unsigned int elasticity_degree); + const unsigned int elasticity_degree); void run (); private: enum { - fluid_domain_id, - solid_domain_id + fluid_domain_id, + solid_domain_id }; static bool @@ -121,11 +121,11 @@ namespace Step46 void setup_dofs (); void assemble_system (); void assemble_interface_term (const FEFaceValuesBase &elasticity_fe_face_values, - const FEFaceValuesBase &stokes_fe_face_values, - std::vector > &elasticity_phi, - std::vector > &stokes_phi_grads_u, - std::vector &stokes_phi_p, - FullMatrix &local_interface_matrix) const; + const FEFaceValuesBase &stokes_fe_face_values, + std::vector > &elasticity_phi, + std::vector > &stokes_phi_grads_u, + std::vector &stokes_phi_p, + FullMatrix &local_interface_matrix) const; void solve (); void output_results (const unsigned int refinement_cycle) const; void refine_mesh (); @@ -153,19 +153,19 @@ namespace Step46 }; - // @sect3{Boundary values and right hand side} + // @sect3{Boundary values and right hand side} - // The following classes do as their names - // suggest. The boundary values for the - // velocity are $\mathbf u=(0, \sin(\pi - // x))^T$ in 2d and $\mathbf u=(0, 0, - // \sin(\pi x)\sin(\pi y))^T$ in 3d, - // respectively. The remaining boundary - // conditions for this problem are all - // homogenous and have been discussed in the - // introduction. The right hand side forcing - // term is zero for both the fluid and the - // solid. + // The following classes do as their names + // suggest. The boundary values for the + // velocity are $\mathbf u=(0, \sin(\pi + // x))^T$ in 2d and $\mathbf u=(0, 0, + // \sin(\pi x)\sin(\pi y))^T$ in 3d, + // respectively. The remaining boundary + // conditions for this problem are all + // homogenous and have been discussed in the + // introduction. The right hand side forcing + // term is zero for both the fluid and the + // solid. template class StokesBoundaryValues : public Function { @@ -173,31 +173,31 @@ namespace Step46 StokesBoundaryValues () : Function(dim+1+dim) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void vector_value (const Point &p, - Vector &value) const; + Vector &value) const; }; template double StokesBoundaryValues::value (const Point &p, - const unsigned int component) const + const unsigned int component) const { Assert (component < this->n_components, - ExcIndexRange (component, 0, this->n_components)); + ExcIndexRange (component, 0, this->n_components)); if (component == dim-1) switch (dim) - { - case 2: - return std::sin(numbers::PI*p[0]); - case 3: - return std::sin(numbers::PI*p[0]) * std::sin(numbers::PI*p[1]); - default: - Assert (false, ExcNotImplemented()); - } + { + case 2: + return std::sin(numbers::PI*p[0]); + case 3: + return std::sin(numbers::PI*p[0]) * std::sin(numbers::PI*p[1]); + default: + Assert (false, ExcNotImplemented()); + } return 0; } @@ -206,7 +206,7 @@ namespace Step46 template void StokesBoundaryValues::vector_value (const Point &p, - Vector &values) const + Vector &values) const { for (unsigned int c=0; cn_components; ++c) values(c) = StokesBoundaryValues::value (p, c); @@ -221,10 +221,10 @@ namespace Step46 RightHandSide () : Function(dim+1) {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void vector_value (const Point &p, - Vector &value) const; + Vector &value) const; }; @@ -232,7 +232,7 @@ namespace Step46 template double RightHandSide::value (const Point &/*p*/, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { return 0; } @@ -241,7 +241,7 @@ namespace Step46 template void RightHandSide::vector_value (const Point &p, - Vector &values) const + Vector &values) const { for (unsigned int c=0; cn_components; ++c) values(c) = RightHandSide::value (p, c); @@ -249,45 +249,45 @@ namespace Step46 - // @sect3{The FluidStructureProblem implementation} - - // @sect4{Constructors and helper functions} - - // Let's now get to the implementation of the - // primary class of this program. The first - // few functions are the constructor and the - // helper functions that can be used to - // determine which part of the domain a cell - // is in. Given the discussion of these - // topics in the introduction, their - // implementation is rather obvious. In the - // constructor, note that we have to - // construct the hp::FECollection object from - // the base elements for Stokes and - // elasticity; using the - // hp::FECollection::push_back function - // assigns them spots zero and one in this - // collection, an order that we have to - // remember and use consistently in the rest - // of the program. + // @sect3{The FluidStructureProblem implementation} + + // @sect4{Constructors and helper functions} + + // Let's now get to the implementation of the + // primary class of this program. The first + // few functions are the constructor and the + // helper functions that can be used to + // determine which part of the domain a cell + // is in. Given the discussion of these + // topics in the introduction, their + // implementation is rather obvious. In the + // constructor, note that we have to + // construct the hp::FECollection object from + // the base elements for Stokes and + // elasticity; using the + // hp::FECollection::push_back function + // assigns them spots zero and one in this + // collection, an order that we have to + // remember and use consistently in the rest + // of the program. template FluidStructureProblem:: FluidStructureProblem (const unsigned int stokes_degree, - const unsigned int elasticity_degree) - : - stokes_degree (stokes_degree), - elasticity_degree (elasticity_degree), - triangulation (Triangulation::maximum_smoothing), - stokes_fe (FE_Q(stokes_degree+1), dim, - FE_Q(stokes_degree), 1, - FE_Nothing(), dim), - elasticity_fe (FE_Nothing(), dim, - FE_Nothing(), 1, - FE_Q(elasticity_degree), dim), - dof_handler (triangulation), - viscosity (2), - lambda (1), - mu (1) + const unsigned int elasticity_degree) + : + stokes_degree (stokes_degree), + elasticity_degree (elasticity_degree), + triangulation (Triangulation::maximum_smoothing), + stokes_fe (FE_Q(stokes_degree+1), dim, + FE_Q(stokes_degree), 1, + FE_Nothing(), dim), + elasticity_fe (FE_Nothing(), dim, + FE_Nothing(), 1, + FE_Q(elasticity_degree), dim), + dof_handler (triangulation), + viscosity (2), + lambda (1), + mu (1) { fe_collection.push_back (stokes_fe); fe_collection.push_back (elasticity_fe); @@ -314,26 +314,26 @@ namespace Step46 } - // @sect4{Meshes and assigning subdomains} - - // The next pair of functions deals with - // generating a mesh and making sure all - // flags that denote subdomains are - // correct. make_grid, as - // discussed in the introduction, generates - // an $8\times 8$ mesh (or an $8\times - // 8\times 8$ mesh in 3d) to make sure that - // each coarse mesh cell is completely within - // one of the subdomains. After generating - // this mesh, we loop over its boundary and - // set the boundary indicator to one at the - // top boundary, the only place where we set - // nonzero Dirichlet boundary - // conditions. After this, we loop again over - // all cells to set the material indicator - // — used to denote which part of the - // domain we are in, to either the fluid or - // solid indicator. + // @sect4{Meshes and assigning subdomains} + + // The next pair of functions deals with + // generating a mesh and making sure all + // flags that denote subdomains are + // correct. make_grid, as + // discussed in the introduction, generates + // an $8\times 8$ mesh (or an $8\times + // 8\times 8$ mesh in 3d) to make sure that + // each coarse mesh cell is completely within + // one of the subdomains. After generating + // this mesh, we loop over its boundary and + // set the boundary indicator to one at the + // top boundary, the only place where we set + // nonzero Dirichlet boundary + // conditions. After this, we loop again over + // all cells to set the material indicator + // — used to denote which part of the + // domain we are in, to either the fluid or + // solid indicator. template void FluidStructureProblem::make_grid () @@ -341,81 +341,81 @@ namespace Step46 GridGenerator::subdivided_hyper_cube (triangulation, 8, -1, 1); for (typename Triangulation::active_cell_iterator - cell = triangulation.begin_active(); - cell != triangulation.end(); ++cell) + cell = triangulation.begin_active(); + cell != triangulation.end(); ++cell) for (unsigned int f=0; f::faces_per_cell; ++f) - if (cell->face(f)->at_boundary() - && - (cell->face(f)->center()[dim-1] == 1)) - cell->face(f)->set_all_boundary_indicators(1); + if (cell->face(f)->at_boundary() + && + (cell->face(f)->center()[dim-1] == 1)) + cell->face(f)->set_all_boundary_indicators(1); for (typename Triangulation::active_cell_iterator - cell = dof_handler.begin_active(); - cell != dof_handler.end(); ++cell) + cell = dof_handler.begin_active(); + cell != dof_handler.end(); ++cell) if (((std::fabs(cell->center()[0]) < 0.25) - && - (cell->center()[dim-1] > 0.5)) - || - ((std::fabs(cell->center()[0]) >= 0.25) - && - (cell->center()[dim-1] > -0.5))) - cell->set_material_id (fluid_domain_id); + && + (cell->center()[dim-1] > 0.5)) + || + ((std::fabs(cell->center()[0]) >= 0.25) + && + (cell->center()[dim-1] > -0.5))) + cell->set_material_id (fluid_domain_id); else - cell->set_material_id (solid_domain_id); + cell->set_material_id (solid_domain_id); } - // The second part of this pair of functions - // determines which finite element to use on - // each cell. Above we have set the material - // indicator for each coarse mesh cell, and - // as mentioned in the introduction, this - // information is inherited from mother to - // child cell upon mesh refinement. - // - // In other words, whenever we have refined - // (or created) the mesh, we can rely on the - // material indicators to be a correct - // description of which part of the domain a - // cell is in. We then use this to set the - // active FE index of the cell to the - // corresponding element of the - // hp::FECollection member variable of this - // class: zero for fluid cells, one for solid - // cells. + // The second part of this pair of functions + // determines which finite element to use on + // each cell. Above we have set the material + // indicator for each coarse mesh cell, and + // as mentioned in the introduction, this + // information is inherited from mother to + // child cell upon mesh refinement. + // + // In other words, whenever we have refined + // (or created) the mesh, we can rely on the + // material indicators to be a correct + // description of which part of the domain a + // cell is in. We then use this to set the + // active FE index of the cell to the + // corresponding element of the + // hp::FECollection member variable of this + // class: zero for fluid cells, one for solid + // cells. template void FluidStructureProblem::set_active_fe_indices () { for (typename hp::DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(); - cell != dof_handler.end(); ++cell) + cell = dof_handler.begin_active(); + cell != dof_handler.end(); ++cell) { - if (cell_is_in_fluid_domain(cell)) - cell->set_active_fe_index (0); - else if (cell_is_in_solid_domain(cell)) - cell->set_active_fe_index (1); - else - Assert (false, ExcNotImplemented()); + if (cell_is_in_fluid_domain(cell)) + cell->set_active_fe_index (0); + else if (cell_is_in_solid_domain(cell)) + cell->set_active_fe_index (1); + else + Assert (false, ExcNotImplemented()); } } - // @sect4{FluidStructureProblem::setup_dofs} - - // The next step is to setup the data - // structures for the linear system. To this - // end, we first have to set the active FE - // indices with the function immediately - // above, then distribute degrees of freedom, - // and then determine constraints on the - // linear system. The latter includes hanging - // node constraints as usual, but also the - // inhomogenous boundary values at the top - // fluid boundary, and zero boundary values - // along the perimeter of the solid - // subdomain. + // @sect4{FluidStructureProblem::setup_dofs} + + // The next step is to setup the data + // structures for the linear system. To this + // end, we first have to set the active FE + // indices with the function immediately + // above, then distribute degrees of freedom, + // and then determine constraints on the + // linear system. The latter includes hanging + // node constraints as usual, but also the + // inhomogenous boundary values at the top + // fluid boundary, and zero boundary values + // along the perimeter of the solid + // subdomain. template void FluidStructureProblem::setup_dofs () @@ -426,112 +426,112 @@ namespace Step46 { constraints.clear (); DoFTools::make_hanging_node_constraints (dof_handler, - constraints); + constraints); std::vector velocity_mask (dim+1+dim, false); for (unsigned int d=0; d(), - constraints, - velocity_mask); + 1, + StokesBoundaryValues(), + constraints, + velocity_mask); std::vector elasticity_mask (dim+1+dim, false); for (unsigned int d=dim+1; d(dim+1+dim), - constraints, - elasticity_mask); + 0, + ZeroFunction(dim+1+dim), + constraints, + elasticity_mask); } - // There are more constraints we have to - // handle, though: we have to make sure - // that the velocity is zero at the - // interface between fluid and solid. The - // following piece of code was already - // presented in the introduction: + // There are more constraints we have to + // handle, though: we have to make sure + // that the velocity is zero at the + // interface between fluid and solid. The + // following piece of code was already + // presented in the introduction: { std::vector local_face_dof_indices (stokes_fe.dofs_per_face); for (typename hp::DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(); - cell != dof_handler.end(); ++cell) - if (cell_is_in_fluid_domain (cell)) - for (unsigned int f=0; f::faces_per_cell; ++f) - if (!cell->at_boundary(f)) - { - bool face_is_on_interface = false; - - if ((cell->neighbor(f)->has_children() == false) - && - (cell_is_in_solid_domain (cell->neighbor(f)))) - face_is_on_interface = true; - else if (cell->neighbor(f)->has_children() == true) - { - for (unsigned int sf=0; sfface(f)->n_children(); ++sf) - if (cell_is_in_solid_domain (cell->neighbor_child_on_subface - (f, sf))) - { - face_is_on_interface = true; - break; - } - } - - if (face_is_on_interface) - { - cell->face(f)->get_dof_indices (local_face_dof_indices, 0); - for (unsigned int i=0; i::faces_per_cell; ++f) + if (!cell->at_boundary(f)) + { + bool face_is_on_interface = false; + + if ((cell->neighbor(f)->has_children() == false) + && + (cell_is_in_solid_domain (cell->neighbor(f)))) + face_is_on_interface = true; + else if (cell->neighbor(f)->has_children() == true) + { + for (unsigned int sf=0; sfface(f)->n_children(); ++sf) + if (cell_is_in_solid_domain (cell->neighbor_child_on_subface + (f, sf))) + { + face_is_on_interface = true; + break; + } + } + + if (face_is_on_interface) + { + cell->face(f)->get_dof_indices (local_face_dof_indices, 0); + for (unsigned int i=0; i cell_coupling (fe_collection.n_components(), - fe_collection.n_components()); + fe_collection.n_components()); Table<2,DoFTools::Coupling> face_coupling (fe_collection.n_components(), - fe_collection.n_components()); + fe_collection.n_components()); for (unsigned int c=0; c=dim+1) && (d>=dim+1))) - cell_coupling[c][d] = DoFTools::always; - - if ((c>=dim+1) && (d=dim+1) && (d>=dim+1))) + cell_coupling[c][d] = DoFTools::always; + + if ((c>=dim+1) && (dFluidStructureProblem::assemble_system} + // @sect4{FluidStructureProblem::assemble_system} - // Following is the central function of this - // program: the one that assembles the linear - // system. It has a long section of setting - // up auxiliary functions at the beginning: - // from creating the quadrature formulas and - // setting up the FEValues, FEFaceValues and - // FESubfaceValues objects necessary to - // integrate the cell terms as well as the - // interface terms for the case where cells - // along the interface come together at same - // size or with differing levels of - // refinement... + // Following is the central function of this + // program: the one that assembles the linear + // system. It has a long section of setting + // up auxiliary functions at the beginning: + // from creating the quadrature formulas and + // setting up the FEValues, FEFaceValues and + // FESubfaceValues objects necessary to + // integrate the cell terms as well as the + // interface terms for the case where cells + // along the interface come together at same + // size or with differing levels of + // refinement... template void FluidStructureProblem::assemble_system () { @@ -572,40 +572,40 @@ namespace Step46 q_collection.push_back (elasticity_quadrature); hp::FEValues hp_fe_values (fe_collection, q_collection, - update_values | - update_quadrature_points | - update_JxW_values | - update_gradients); + update_values | + update_quadrature_points | + update_JxW_values | + update_gradients); const QGauss common_face_quadrature(std::max (stokes_degree+2, - elasticity_degree+2)); + elasticity_degree+2)); FEFaceValues stokes_fe_face_values (stokes_fe, - common_face_quadrature, - update_JxW_values | - update_normal_vectors | - update_gradients); + common_face_quadrature, + update_JxW_values | + update_normal_vectors | + update_gradients); FEFaceValues elasticity_fe_face_values (elasticity_fe, - common_face_quadrature, - update_values); + common_face_quadrature, + update_values); FESubfaceValues stokes_fe_subface_values (stokes_fe, - common_face_quadrature, - update_JxW_values | - update_normal_vectors | - update_gradients); + common_face_quadrature, + update_JxW_values | + update_normal_vectors | + update_gradients); FESubfaceValues elasticity_fe_subface_values (elasticity_fe, - common_face_quadrature, - update_values); + common_face_quadrature, + update_values); - // ...to objects that are needed to - // describe the local contributions to the - // global linear system... + // ...to objects that are needed to + // describe the local contributions to the + // global linear system... const unsigned int stokes_dofs_per_cell = stokes_fe.dofs_per_cell; const unsigned int elasticity_dofs_per_cell = elasticity_fe.dofs_per_cell; FullMatrix local_matrix; FullMatrix local_interface_matrix (elasticity_dofs_per_cell, - stokes_dofs_per_cell); + stokes_dofs_per_cell); Vector local_rhs; std::vector local_dof_indices; @@ -613,11 +613,11 @@ namespace Step46 const RightHandSide right_hand_side; - // ...to variables that allow us to extract - // certain components of the shape - // functions and cache their values rather - // than having to recompute them at every - // quadrature point: + // ...to variables that allow us to extract + // certain components of the shape + // functions and cache their values rather + // than having to recompute them at every + // quadrature point: const FEValuesExtractors::Vector velocities (0); const FEValuesExtractors::Scalar pressure (dim); const FEValuesExtractors::Vector displacements (dim+1); @@ -630,328 +630,328 @@ namespace Step46 std::vector elasticity_phi_div (elasticity_dofs_per_cell); std::vector > elasticity_phi (elasticity_dofs_per_cell); - // Then comes the main loop over all cells - // and, as in step-27, the initialization - // of the hp::FEValues object for the - // current cell and the extraction of a - // FEValues object that is appropriate for - // the current cell: + // Then comes the main loop over all cells + // and, as in step-27, the initialization + // of the hp::FEValues object for the + // current cell and the extraction of a + // FEValues object that is appropriate for + // the current cell: typename hp::DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), endc = dof_handler.end(); for (; cell!=endc; ++cell) { - hp_fe_values.reinit (cell); - - const FEValues &fe_values = hp_fe_values.get_present_fe_values(); - - local_matrix.reinit (cell->get_fe().dofs_per_cell, - cell->get_fe().dofs_per_cell); - local_rhs.reinit (cell->get_fe().dofs_per_cell); - - // With all of this done, we continue - // to assemble the cell terms for cells - // that are part of the Stokes and - // elastic regions. While we could in - // principle do this in one formula, in - // effect implementing the one bilinear - // form stated in the introduction, we - // realize that our finite element - // spaces are chosen in such a way that - // on each cell, one set of variables - // (either velocities and pressure, or - // displacements) are always zero, and - // consequently a more efficient way of - // computing local integrals is to do - // only what's necessary based on an - // if clause that tests - // which part of the domain we are in. - // - // The actual computation of the local - // matrix is the same as in step-22 as - // well as that given in the @ref - // vector_valued documentation module - // for the elasticity equations: - if (cell_is_in_fluid_domain (cell)) - { - const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; - Assert (dofs_per_cell == stokes_dofs_per_cell, - ExcInternalError()); - - for (unsigned int q=0; qget_fe().dofs_per_cell; - Assert (dofs_per_cell == elasticity_dofs_per_cell, - ExcInternalError()); - - for (unsigned int q=0; qlocal_rhs variable, - // though we still need to pass it - // along since the elimination of - // nonzero boundary values requires the - // modification of local and - // consequently also global right hand - // side values: - local_dof_indices.resize (cell->get_fe().dofs_per_cell); - cell->get_dof_indices (local_dof_indices); - constraints.distribute_local_to_global (local_matrix, local_rhs, - local_dof_indices, - system_matrix, system_rhs); - - // The more interesting part of this - // function is where we see about face - // terms along the interface between - // the two subdomains. To this end, we - // first have to make sure that we only - // assemble them once even though a - // loop over all faces of all cells - // would encounter each part of the - // interface twice. We arbitrarily make - // the decision that we will only - // evaluate interface terms if the - // current cell is part of the solid - // subdomain and if, consequently, a - // face is not at the boundary and the - // potential neighbor behind it is part - // of the fluid domain. Let's start - // with these conditions: - if (cell_is_in_solid_domain (cell)) - for (unsigned int f=0; f::faces_per_cell; ++f) - if (cell->at_boundary(f) == false) - { - // At this point we know that - // the current cell is a - // candidate for integration - // and that a neighbor behind - // face f - // exists. There are now three - // possibilities: - // - // - The neighbor is at the - // same refinement level and - // has no children. - // - The neighbor has children. - // - The neighbor is coarser. - // - // In all three cases, we are - // only interested in it if it - // is part of the fluid - // subdomain. So let us start - // with the first and simplest - // case: if the neighbor is at - // the same level, has no - // children, and is a fluid - // cell, then the two cells - // share a boundary that is - // part of the interface along - // which we want to integrate - // interface terms. All we have - // to do is initialize two - // FEFaceValues object with the - // current face and the face of - // the neighboring cell (note - // how we find out which face - // of the neighboring cell - // borders on the current cell) - // and pass things off to the - // function that evaluates the - // interface terms (the third - // through fifth arguments to - // this function provide it - // with scratch arrays). The - // result is then again copied - // into the global matrix, - // using a function that knows - // that the DoF indices of rows - // and columns of the local - // matrix result from different - // cells: - if ((cell->neighbor(f)->level() == cell->level()) - && - (cell->neighbor(f)->has_children() == false) - && - cell_is_in_fluid_domain (cell->neighbor(f))) - { - elasticity_fe_face_values.reinit (cell, f); - stokes_fe_face_values.reinit (cell->neighbor(f), - cell->neighbor_of_neighbor(f)); - - assemble_interface_term (elasticity_fe_face_values, stokes_fe_face_values, - elasticity_phi, stokes_phi_grads_u, stokes_phi_p, - local_interface_matrix); - - cell->neighbor(f)->get_dof_indices (neighbor_dof_indices); - constraints.distribute_local_to_global(local_interface_matrix, - local_dof_indices, - neighbor_dof_indices, - system_matrix); - } - - // The second case is if the - // neighbor has further - // children. In that case, we - // have to loop over all the - // children of the neighbor to - // see if they are part of the - // fluid subdomain. If they - // are, then we integrate over - // the common interface, which - // is a face for the neighbor - // and a subface of the current - // cell, requiring us to use an - // FEFaceValues for the - // neighbor and an - // FESubfaceValues for the - // current cell: - else if ((cell->neighbor(f)->level() == cell->level()) - && - (cell->neighbor(f)->has_children() == true)) - { - for (unsigned int subface=0; - subfaceface(f)->n_children(); - ++subface) - if (cell_is_in_fluid_domain (cell->neighbor_child_on_subface - (f, subface))) - { - elasticity_fe_subface_values.reinit (cell, - f, - subface); - stokes_fe_face_values.reinit (cell->neighbor_child_on_subface (f, subface), - cell->neighbor_of_neighbor(f)); - - assemble_interface_term (elasticity_fe_subface_values, - stokes_fe_face_values, - elasticity_phi, - stokes_phi_grads_u, stokes_phi_p, - local_interface_matrix); - - cell->neighbor_child_on_subface (f, subface) - ->get_dof_indices (neighbor_dof_indices); - constraints.distribute_local_to_global(local_interface_matrix, - local_dof_indices, - neighbor_dof_indices, - system_matrix); - } - } - - // The last option is that the - // neighbor is coarser. In that - // case we have to use an - // FESubfaceValues object for - // the neighbor and a - // FEFaceValues for the current - // cell; the rest is the same - // as before: - else if (cell->neighbor_is_coarser(f) - && - cell_is_in_fluid_domain(cell->neighbor(f))) - { - elasticity_fe_face_values.reinit (cell, f); - stokes_fe_subface_values.reinit (cell->neighbor(f), - cell->neighbor_of_coarser_neighbor(f).first, - cell->neighbor_of_coarser_neighbor(f).second); - - assemble_interface_term (elasticity_fe_face_values, - stokes_fe_subface_values, - elasticity_phi, - stokes_phi_grads_u, stokes_phi_p, - local_interface_matrix); - - cell->neighbor(f)->get_dof_indices (neighbor_dof_indices); - constraints.distribute_local_to_global(local_interface_matrix, - local_dof_indices, - neighbor_dof_indices, - system_matrix); - - } - } + hp_fe_values.reinit (cell); + + const FEValues &fe_values = hp_fe_values.get_present_fe_values(); + + local_matrix.reinit (cell->get_fe().dofs_per_cell, + cell->get_fe().dofs_per_cell); + local_rhs.reinit (cell->get_fe().dofs_per_cell); + + // With all of this done, we continue + // to assemble the cell terms for cells + // that are part of the Stokes and + // elastic regions. While we could in + // principle do this in one formula, in + // effect implementing the one bilinear + // form stated in the introduction, we + // realize that our finite element + // spaces are chosen in such a way that + // on each cell, one set of variables + // (either velocities and pressure, or + // displacements) are always zero, and + // consequently a more efficient way of + // computing local integrals is to do + // only what's necessary based on an + // if clause that tests + // which part of the domain we are in. + // + // The actual computation of the local + // matrix is the same as in step-22 as + // well as that given in the @ref + // vector_valued documentation module + // for the elasticity equations: + if (cell_is_in_fluid_domain (cell)) + { + const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; + Assert (dofs_per_cell == stokes_dofs_per_cell, + ExcInternalError()); + + for (unsigned int q=0; qget_fe().dofs_per_cell; + Assert (dofs_per_cell == elasticity_dofs_per_cell, + ExcInternalError()); + + for (unsigned int q=0; qlocal_rhs variable, + // though we still need to pass it + // along since the elimination of + // nonzero boundary values requires the + // modification of local and + // consequently also global right hand + // side values: + local_dof_indices.resize (cell->get_fe().dofs_per_cell); + cell->get_dof_indices (local_dof_indices); + constraints.distribute_local_to_global (local_matrix, local_rhs, + local_dof_indices, + system_matrix, system_rhs); + + // The more interesting part of this + // function is where we see about face + // terms along the interface between + // the two subdomains. To this end, we + // first have to make sure that we only + // assemble them once even though a + // loop over all faces of all cells + // would encounter each part of the + // interface twice. We arbitrarily make + // the decision that we will only + // evaluate interface terms if the + // current cell is part of the solid + // subdomain and if, consequently, a + // face is not at the boundary and the + // potential neighbor behind it is part + // of the fluid domain. Let's start + // with these conditions: + if (cell_is_in_solid_domain (cell)) + for (unsigned int f=0; f::faces_per_cell; ++f) + if (cell->at_boundary(f) == false) + { + // At this point we know that + // the current cell is a + // candidate for integration + // and that a neighbor behind + // face f + // exists. There are now three + // possibilities: + // + // - The neighbor is at the + // same refinement level and + // has no children. + // - The neighbor has children. + // - The neighbor is coarser. + // + // In all three cases, we are + // only interested in it if it + // is part of the fluid + // subdomain. So let us start + // with the first and simplest + // case: if the neighbor is at + // the same level, has no + // children, and is a fluid + // cell, then the two cells + // share a boundary that is + // part of the interface along + // which we want to integrate + // interface terms. All we have + // to do is initialize two + // FEFaceValues object with the + // current face and the face of + // the neighboring cell (note + // how we find out which face + // of the neighboring cell + // borders on the current cell) + // and pass things off to the + // function that evaluates the + // interface terms (the third + // through fifth arguments to + // this function provide it + // with scratch arrays). The + // result is then again copied + // into the global matrix, + // using a function that knows + // that the DoF indices of rows + // and columns of the local + // matrix result from different + // cells: + if ((cell->neighbor(f)->level() == cell->level()) + && + (cell->neighbor(f)->has_children() == false) + && + cell_is_in_fluid_domain (cell->neighbor(f))) + { + elasticity_fe_face_values.reinit (cell, f); + stokes_fe_face_values.reinit (cell->neighbor(f), + cell->neighbor_of_neighbor(f)); + + assemble_interface_term (elasticity_fe_face_values, stokes_fe_face_values, + elasticity_phi, stokes_phi_grads_u, stokes_phi_p, + local_interface_matrix); + + cell->neighbor(f)->get_dof_indices (neighbor_dof_indices); + constraints.distribute_local_to_global(local_interface_matrix, + local_dof_indices, + neighbor_dof_indices, + system_matrix); + } + + // The second case is if the + // neighbor has further + // children. In that case, we + // have to loop over all the + // children of the neighbor to + // see if they are part of the + // fluid subdomain. If they + // are, then we integrate over + // the common interface, which + // is a face for the neighbor + // and a subface of the current + // cell, requiring us to use an + // FEFaceValues for the + // neighbor and an + // FESubfaceValues for the + // current cell: + else if ((cell->neighbor(f)->level() == cell->level()) + && + (cell->neighbor(f)->has_children() == true)) + { + for (unsigned int subface=0; + subfaceface(f)->n_children(); + ++subface) + if (cell_is_in_fluid_domain (cell->neighbor_child_on_subface + (f, subface))) + { + elasticity_fe_subface_values.reinit (cell, + f, + subface); + stokes_fe_face_values.reinit (cell->neighbor_child_on_subface (f, subface), + cell->neighbor_of_neighbor(f)); + + assemble_interface_term (elasticity_fe_subface_values, + stokes_fe_face_values, + elasticity_phi, + stokes_phi_grads_u, stokes_phi_p, + local_interface_matrix); + + cell->neighbor_child_on_subface (f, subface) + ->get_dof_indices (neighbor_dof_indices); + constraints.distribute_local_to_global(local_interface_matrix, + local_dof_indices, + neighbor_dof_indices, + system_matrix); + } + } + + // The last option is that the + // neighbor is coarser. In that + // case we have to use an + // FESubfaceValues object for + // the neighbor and a + // FEFaceValues for the current + // cell; the rest is the same + // as before: + else if (cell->neighbor_is_coarser(f) + && + cell_is_in_fluid_domain(cell->neighbor(f))) + { + elasticity_fe_face_values.reinit (cell, f); + stokes_fe_subface_values.reinit (cell->neighbor(f), + cell->neighbor_of_coarser_neighbor(f).first, + cell->neighbor_of_coarser_neighbor(f).second); + + assemble_interface_term (elasticity_fe_face_values, + stokes_fe_subface_values, + elasticity_phi, + stokes_phi_grads_u, stokes_phi_p, + local_interface_matrix); + + cell->neighbor(f)->get_dof_indices (neighbor_dof_indices); + constraints.distribute_local_to_global(local_interface_matrix, + local_dof_indices, + neighbor_dof_indices, + system_matrix); + + } + } } } - // In the function that assembles the global - // system, we passed computing interface - // terms to a separate function we discuss - // here. The key is that even though we can't - // predict the combination of FEFaceValues - // and FESubfaceValues objects, they are both - // derived from the FEFaceValuesBase class - // and consequently we don't have to care: - // the function is simply called with two - // such objects denoting the values of the - // shape functions on the quadrature points - // of the two sides of the face. We then do - // what we always do: we fill the scratch - // arrays with the values of shape functions - // and their derivatives, and then loop over - // all entries of the matrix to compute the - // local integrals. The details of the - // bilinear form we evaluate here are given - // in the introduction. + // In the function that assembles the global + // system, we passed computing interface + // terms to a separate function we discuss + // here. The key is that even though we can't + // predict the combination of FEFaceValues + // and FESubfaceValues objects, they are both + // derived from the FEFaceValuesBase class + // and consequently we don't have to care: + // the function is simply called with two + // such objects denoting the values of the + // shape functions on the quadrature points + // of the two sides of the face. We then do + // what we always do: we fill the scratch + // arrays with the values of shape functions + // and their derivatives, and then loop over + // all entries of the matrix to compute the + // local integrals. The details of the + // bilinear form we evaluate here are given + // in the introduction. template void FluidStructureProblem:: assemble_interface_term (const FEFaceValuesBase &elasticity_fe_face_values, - const FEFaceValuesBase &stokes_fe_face_values, - std::vector > &elasticity_phi, - std::vector > &stokes_phi_grads_u, - std::vector &stokes_phi_p, - FullMatrix &local_interface_matrix) const + const FEFaceValuesBase &stokes_fe_face_values, + std::vector > &elasticity_phi, + std::vector > &stokes_phi_grads_u, + std::vector &stokes_phi_p, + FullMatrix &local_interface_matrix) const { Assert (stokes_fe_face_values.n_quadrature_points == - elasticity_fe_face_values.n_quadrature_points, - ExcInternalError()); + elasticity_fe_face_values.n_quadrature_points, + ExcInternalError()); const unsigned int n_face_quadrature_points = elasticity_fe_face_values.n_quadrature_points; @@ -962,37 +962,37 @@ namespace Step46 local_interface_matrix = 0; for (unsigned int q=0; q normal_vector = stokes_fe_face_values.normal_vector(q); - - for (unsigned int k=0; k normal_vector = stokes_fe_face_values.normal_vector(q); + + for (unsigned int k=0; kFluidStructureProblem::solve} + // @sect4{FluidStructureProblem::solve} - // As discussed in the introduction, we use a - // rather trivial solver here: we just pass - // the linear system off to the - // SparseDirectUMFPACK direct solver (see, - // for example, step-29). The only thing we - // have to do after solving is ensure that - // hanging node and boundary value - // constraints are correct. + // As discussed in the introduction, we use a + // rather trivial solver here: we just pass + // the linear system off to the + // SparseDirectUMFPACK direct solver (see, + // for example, step-29). The only thing we + // have to do after solving is ensure that + // hanging node and boundary value + // constraints are correct. template void FluidStructureProblem::solve () @@ -1006,17 +1006,17 @@ namespace Step46 - // @sect4{FluidStructureProblem::output_results} + // @sect4{FluidStructureProblem::output_results} - // Generating graphical output is rather - // trivial here: all we have to do is - // identify which components of the solution - // vector belong to scalars and/or vectors - // (see, for example, step-22 for a previous - // example), and then pass it all on to the - // DataOut class (with the second template - // argument equal to hp::DoFHandler instead - // of the usual default DoFHandler): + // Generating graphical output is rather + // trivial here: all we have to do is + // identify which components of the solution + // vector belong to scalars and/or vectors + // (see, for example, step-22 for a previous + // example), and then pass it all on to the + // DataOut class (with the second template + // argument equal to hp::DoFHandler instead + // of the usual default DoFHandler): template void FluidStructureProblem:: @@ -1034,42 +1034,42 @@ namespace Step46 .push_back (DataComponentInterpretation::component_is_scalar); for (unsigned int d=0; d > data_out; data_out.attach_dof_handler (dof_handler); data_out.add_data_vector (solution, solution_names, - DataOut >::type_dof_data, - data_component_interpretation); + DataOut >::type_dof_data, + data_component_interpretation); data_out.build_patches (); std::ostringstream filename; filename << "solution-" - << Utilities::int_to_string (refinement_cycle, 2) - << ".vtk"; + << Utilities::int_to_string (refinement_cycle, 2) + << ".vtk"; std::ofstream output (filename.str().c_str()); data_out.write_vtk (output); } - // @sect4{FluidStructureProblem::refine_mesh} - - // The next step is to refine the mesh. As - // was discussed in the introduction, this is - // a bit tricky primarily because the fluid - // and the solid subdomains use variables - // that have different physical dimensions - // and for which the absolute magnitude of - // error estimates is consequently not - // directly comparable. We will therefore - // have to scale them. At the top of the - // function, we therefore first compute error - // estimates for the different variables - // separately (using the velocities but not - // the pressure for the fluid domain, and the - // displacements in the solid domain): + // @sect4{FluidStructureProblem::refine_mesh} + + // The next step is to refine the mesh. As + // was discussed in the introduction, this is + // a bit tricky primarily because the fluid + // and the solid subdomains use variables + // that have different physical dimensions + // and for which the absolute magnitude of + // error estimates is consequently not + // directly comparable. We will therefore + // have to scale them. At the top of the + // function, we therefore first compute error + // estimates for the different variables + // separately (using the velocities but not + // the pressure for the fluid domain, and the + // displacements in the solid domain): template void FluidStructureProblem::refine_mesh () @@ -1090,29 +1090,29 @@ namespace Step46 for (unsigned int d=0; d::estimate (dof_handler, - face_q_collection, - typename FunctionMap::type(), - solution, - stokes_estimated_error_per_cell, - stokes_component_mask); + face_q_collection, + typename FunctionMap::type(), + solution, + stokes_estimated_error_per_cell, + stokes_component_mask); std::vector elasticity_component_mask (dim+1+dim, false); for (unsigned int d=0; d::estimate (dof_handler, - face_q_collection, - typename FunctionMap::type(), - solution, - elasticity_estimated_error_per_cell, - elasticity_component_mask); - - // We then normalize error estimates by - // dividing by their norm and scale the - // fluid error indicators by a factor of 4 - // as discussed in the introduction. The - // results are then added together into a - // vector that contains error indicators - // for all cells: + face_q_collection, + typename FunctionMap::type(), + solution, + elasticity_estimated_error_per_cell, + elasticity_component_mask); + + // We then normalize error estimates by + // dividing by their norm and scale the + // fluid error indicators by a factor of 4 + // as discussed in the introduction. The + // results are then added together into a + // vector that contains error indicators + // for all cells: stokes_estimated_error_per_cell *= 4. / stokes_estimated_error_per_cell.l2_norm(); elasticity_estimated_error_per_cell @@ -1124,129 +1124,129 @@ namespace Step46 estimated_error_per_cell += stokes_estimated_error_per_cell; estimated_error_per_cell += elasticity_estimated_error_per_cell; - // The second to last part of the function, - // before actually refining the mesh, - // involves a heuristic that we have - // already mentioned in the introduction: - // because the solution is discontinuous, - // the KellyErrorEstimator class gets all - // confused about cells that sit at the - // boundary between subdomains: it believes - // that the error is large there because - // the jump in the gradient is large, even - // though this is entirely expected and a - // feature that is in fact present in the - // exact solution as well and therefore not - // indicative of any numerical error. - // - // Consequently, we set the error - // indicators to zero for all cells at the - // interface; the conditions determining - // which cells this affects are slightly - // awkward because we have to account for - // the possibility of adaptively refined - // meshes, meaning that the neighboring - // cell can be coarser than the current - // one, or could in fact be refined some - // more. The structure of these nested - // conditions is much the same as we - // encountered when assembling interface - // terms in assemble_system. + // The second to last part of the function, + // before actually refining the mesh, + // involves a heuristic that we have + // already mentioned in the introduction: + // because the solution is discontinuous, + // the KellyErrorEstimator class gets all + // confused about cells that sit at the + // boundary between subdomains: it believes + // that the error is large there because + // the jump in the gradient is large, even + // though this is entirely expected and a + // feature that is in fact present in the + // exact solution as well and therefore not + // indicative of any numerical error. + // + // Consequently, we set the error + // indicators to zero for all cells at the + // interface; the conditions determining + // which cells this affects are slightly + // awkward because we have to account for + // the possibility of adaptively refined + // meshes, meaning that the neighboring + // cell can be coarser than the current + // one, or could in fact be refined some + // more. The structure of these nested + // conditions is much the same as we + // encountered when assembling interface + // terms in assemble_system. { unsigned int cell_index = 0; for (typename hp::DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(); - cell != dof_handler.end(); ++cell, ++cell_index) - for (unsigned int f=0; f::faces_per_cell; ++f) - if (cell_is_in_solid_domain (cell)) - { - if ((cell->at_boundary(f) == false) - && - (((cell->neighbor(f)->level() == cell->level()) - && - (cell->neighbor(f)->has_children() == false) - && - cell_is_in_fluid_domain (cell->neighbor(f))) - || - ((cell->neighbor(f)->level() == cell->level()) - && - (cell->neighbor(f)->has_children() == true) - && - (cell_is_in_fluid_domain (cell->neighbor_child_on_subface - (f, 0)))) - || - (cell->neighbor_is_coarser(f) - && - cell_is_in_fluid_domain(cell->neighbor(f))) - )) - estimated_error_per_cell(cell_index) = 0; - } - else - { - if ((cell->at_boundary(f) == false) - && - (((cell->neighbor(f)->level() == cell->level()) - && - (cell->neighbor(f)->has_children() == false) - && - cell_is_in_solid_domain (cell->neighbor(f))) - || - ((cell->neighbor(f)->level() == cell->level()) - && - (cell->neighbor(f)->has_children() == true) - && - (cell_is_in_solid_domain (cell->neighbor_child_on_subface - (f, 0)))) - || - (cell->neighbor_is_coarser(f) - && - cell_is_in_solid_domain(cell->neighbor(f))) - )) - estimated_error_per_cell(cell_index) = 0; - } + cell = dof_handler.begin_active(); + cell != dof_handler.end(); ++cell, ++cell_index) + for (unsigned int f=0; f::faces_per_cell; ++f) + if (cell_is_in_solid_domain (cell)) + { + if ((cell->at_boundary(f) == false) + && + (((cell->neighbor(f)->level() == cell->level()) + && + (cell->neighbor(f)->has_children() == false) + && + cell_is_in_fluid_domain (cell->neighbor(f))) + || + ((cell->neighbor(f)->level() == cell->level()) + && + (cell->neighbor(f)->has_children() == true) + && + (cell_is_in_fluid_domain (cell->neighbor_child_on_subface + (f, 0)))) + || + (cell->neighbor_is_coarser(f) + && + cell_is_in_fluid_domain(cell->neighbor(f))) + )) + estimated_error_per_cell(cell_index) = 0; + } + else + { + if ((cell->at_boundary(f) == false) + && + (((cell->neighbor(f)->level() == cell->level()) + && + (cell->neighbor(f)->has_children() == false) + && + cell_is_in_solid_domain (cell->neighbor(f))) + || + ((cell->neighbor(f)->level() == cell->level()) + && + (cell->neighbor(f)->has_children() == true) + && + (cell_is_in_solid_domain (cell->neighbor_child_on_subface + (f, 0)))) + || + (cell->neighbor_is_coarser(f) + && + cell_is_in_solid_domain(cell->neighbor(f))) + )) + estimated_error_per_cell(cell_index) = 0; + } } GridRefinement::refine_and_coarsen_fixed_number (triangulation, - estimated_error_per_cell, - 0.3, 0.0); + estimated_error_per_cell, + 0.3, 0.0); triangulation.execute_coarsening_and_refinement (); } - // @sect4{FluidStructureProblem::run} + // @sect4{FluidStructureProblem::run} - // This is, as usual, the function that - // controls the overall flow of operation. If - // you've read through tutorial programs - // step-1 through step-6, for example, then - // you are already quite familiar with the - // following structure: + // This is, as usual, the function that + // controls the overall flow of operation. If + // you've read through tutorial programs + // step-1 through step-6, for example, then + // you are already quite familiar with the + // following structure: template void FluidStructureProblem::run () { make_grid (); for (unsigned int refinement_cycle = 0; refinement_cycle<10-2*dim; - ++refinement_cycle) + ++refinement_cycle) { - std::cout << "Refinement cycle " << refinement_cycle << std::endl; + std::cout << "Refinement cycle " << refinement_cycle << std::endl; - if (refinement_cycle > 0) - refine_mesh (); + if (refinement_cycle > 0) + refine_mesh (); - setup_dofs (); + setup_dofs (); - std::cout << " Assembling..." << std::endl; - assemble_system (); + std::cout << " Assembling..." << std::endl; + assemble_system (); - std::cout << " Solving..." << std::endl; - solve (); + std::cout << " Solving..." << std::endl; + solve (); - std::cout << " Writing output..." << std::endl; - output_results (refinement_cycle); + std::cout << " Writing output..." << std::endl; + output_results (refinement_cycle); - std::cout << std::endl; + std::cout << std::endl; } } } @@ -1255,9 +1255,9 @@ namespace Step46 // @sect4{The main() function} - // This, final, function contains pretty much - // exactly what most of the other tutorial - // programs have: + // This, final, function contains pretty much + // exactly what most of the other tutorial + // programs have: int main () { try diff --git a/deal.II/examples/step-47/step-47.cc b/deal.II/examples/step-47/step-47.cc index 86f44cd23e..d2974afb35 100644 --- a/deal.II/examples/step-47/step-47.cc +++ b/deal.II/examples/step-47/step-47.cc @@ -78,9 +78,9 @@ namespace Step47 bool interface_intersects_cell (const typename Triangulation::cell_iterator &cell) const; std::pair > compute_quadrature(const Quadrature &plain_quadrature, const typename hp::DoFHandler::active_cell_iterator &cell, const std::vector &level_set_values); void append_quadrature(const Quadrature &plain_quadrature, - const std::vector > &v , - std::vector > &xfem_points, - std::vector &xfem_weights); + const std::vector > &v , + std::vector > &xfem_points, + std::vector &xfem_weights); void setup_system (); void assemble_system (); @@ -113,18 +113,18 @@ namespace Step47 Coefficient () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void value_list (const std::vector > &points, - std::vector &values, - const unsigned int component = 0) const; + std::vector &values, + const unsigned int component = 0) const; }; template double Coefficient::value (const Point &p, - const unsigned int) const + const unsigned int) const { if (p.square() < 0.5*0.5) return 20; @@ -136,23 +136,23 @@ namespace Step47 template void Coefficient::value_list (const std::vector > &points, - std::vector &values, - const unsigned int component) const + std::vector &values, + const unsigned int component) const { const unsigned int n_points = points.size(); Assert (values.size() == n_points, - ExcDimensionMismatch (values.size(), n_points)); + ExcDimensionMismatch (values.size(), n_points)); Assert (component == 0, - ExcIndexRange (component, 0, 1)); + ExcIndexRange (component, 0, 1)); for (unsigned int i=0; i LaplaceProblem::LaplaceProblem () - : - dof_handler (triangulation) + : + dof_handler (triangulation) { fe_collection.push_back (FESystem (FE_Q(1), 1, - FE_Nothing(), 1)); + FE_Nothing(), 1)); fe_collection.push_back (FESystem (FE_Q(1), 1, - FE_Q(1), 1)); + FE_Q(1), 1)); } @@ -217,11 +217,11 @@ namespace Step47 { for (unsigned int v=0; v::vertices_per_cell-1; ++v) if (level_set(cell->vertex(v)) * level_set(cell->vertex(v+1)) < 0) - return true; + return true; - // we get here only if all vertices - // have the same sign, which means - // that the cell is not intersected + // we get here only if all vertices + // have the same sign, which means + // that the cell is not intersected return false; } @@ -231,12 +231,12 @@ namespace Step47 void LaplaceProblem::setup_system () { for (typename hp::DoFHandler::cell_iterator cell - = dof_handler.begin_active(); - cell != dof_handler.end(); ++cell) + = dof_handler.begin_active(); + cell != dof_handler.end(); ++cell) if (interface_intersects_cell(cell) == false) - cell->set_active_fe_index(0); + cell->set_active_fe_index(0); else - cell->set_active_fe_index(1); + cell->set_active_fe_index(1); dof_handler.distribute_dofs (fe_collection); @@ -246,8 +246,8 @@ namespace Step47 constraints.clear (); //TODO: fix this, it currently crashes - // DoFTools::make_hanging_node_constraints (dof_handler, - // constraints); + // DoFTools::make_hanging_node_constraints (dof_handler, + // constraints); //TODO: component 1 must satisfy zero boundary conditions constraints.close(); @@ -271,11 +271,11 @@ namespace Step47 FEValues plain_fe_values (fe_collection[0], quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); FEValues enriched_fe_values (fe_collection[1], quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); const unsigned int n_q_points = quadrature_formula.size(); @@ -293,148 +293,148 @@ namespace Step47 for (; cell!=endc; ++cell) { - const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; - cell_matrix.reinit (dofs_per_cell, dofs_per_cell); - cell_rhs.reinit (dofs_per_cell); - - cell_matrix = 0; - cell_rhs = 0; - - if (cell->active_fe_index() == 0) - { - plain_fe_values.reinit (cell); - - coefficient_values.resize (plain_fe_values.n_quadrature_points); - coefficient.value_list (plain_fe_values.get_quadrature_points(), - coefficient_values); - - for (unsigned int q_point=0; q_pointget_fe().dofs_per_cell; + cell_matrix.reinit (dofs_per_cell, dofs_per_cell); + cell_rhs.reinit (dofs_per_cell); + + cell_matrix = 0; + cell_rhs = 0; + + if (cell->active_fe_index() == 0) + { + plain_fe_values.reinit (cell); + + coefficient_values.resize (plain_fe_values.n_quadrature_points); + coefficient.value_list (plain_fe_values.get_quadrature_points(), + coefficient_values); + + for (unsigned int q_point=0; q_pointactive_fe_index() == 1, ExcInternalError()); - Assert (interface_intersects_cell(cell) == true, ExcInternalError()); - - std::vector level_set_values (GeometryInfo::vertices_per_cell); - for (unsigned int v=0; v::vertices_per_cell; ++v) - level_set_values[v] = level_set (cell->vertex(v)); - - FEValues this_fe_values (fe_collection[1], - compute_quadrature(quadrature_formula, cell, - level_set_values).second, - update_values | update_gradients | - update_quadrature_points | update_JxW_values ); - - this_fe_values.reinit (cell); - - coefficient_values.resize (this_fe_values.n_quadrature_points); - coefficient.value_list (this_fe_values.get_quadrature_points(), - coefficient_values); - - for (unsigned int q_point=0; q_pointget_fe().system_to_component_index(i).first == 0) - { - for (unsigned int j=0; jget_fe().system_to_component_index(j).first == 0) - cell_matrix(i,j) += (coefficient_values[q_point] * - this_fe_values.shape_grad(i,q_point) * - this_fe_values.shape_grad(j,q_point) * - this_fe_values.JxW(q_point)); - else - cell_matrix(i,j) += (coefficient_values[q_point] * - this_fe_values.shape_grad(i,q_point) - * - ((std::fabs(level_set(this_fe_values.quadrature_point(q_point))) - - - std::fabs(level_set(cell->vertex(cell->get_fe().system_to_component_index(j).second))))* - this_fe_values.shape_grad(j,q_point) - + - grad_level_set(this_fe_values.quadrature_point(q_point)) * - sign(level_set(this_fe_values.quadrature_point(q_point))) * - this_fe_values.shape_value(j,q_point)) * - this_fe_values.JxW(q_point)); - - cell_rhs(i) += (this_fe_values.shape_value(i,q_point) * - 1.0 * - this_fe_values.JxW(q_point)); - } - else - { - for (unsigned int j=0; jget_fe().system_to_component_index(j).first == 0) - cell_matrix(i,j) += (coefficient_values[q_point] * - ((std::fabs(level_set(this_fe_values.quadrature_point(q_point))) - - - std::fabs(level_set(cell->vertex(cell->get_fe().system_to_component_index(i).second))))* - this_fe_values.shape_grad(i,q_point) - + - grad_level_set(this_fe_values.quadrature_point(q_point)) * - sign(level_set(this_fe_values.quadrature_point(q_point))) * - this_fe_values.shape_value(i,q_point)) * - this_fe_values.shape_grad(j,q_point) * - this_fe_values.JxW(q_point)); - else - cell_matrix(i,j) += (coefficient_values[q_point] * - ((std::fabs(level_set(this_fe_values.quadrature_point(q_point))) - - - std::fabs(level_set(cell->vertex(cell->get_fe().system_to_component_index(i).second))))* - this_fe_values.shape_grad(i,q_point) - + - grad_level_set(this_fe_values.quadrature_point(q_point)) * - sign(level_set(this_fe_values.quadrature_point(q_point))) * - this_fe_values.shape_value(i,q_point)) * - ((std::fabs(level_set(this_fe_values.quadrature_point(q_point))) - - - std::fabs(level_set(cell->vertex(cell->get_fe().system_to_component_index(j).second))))* - this_fe_values.shape_grad(j,q_point) - + - grad_level_set(this_fe_values.quadrature_point(q_point)) * - sign(level_set(this_fe_values.quadrature_point(q_point))) * - this_fe_values.shape_value(j,q_point)) * - this_fe_values.JxW(q_point)); - - cell_rhs(i) += ((std::fabs(level_set(this_fe_values.quadrature_point(q_point))) - - - std::fabs(level_set(cell->vertex(cell->get_fe().system_to_component_index(i).second))))* - this_fe_values.shape_value(i,q_point) * - 1.0 * - this_fe_values.JxW(q_point)); - } - } - - local_dof_indices.resize (dofs_per_cell); - cell->get_dof_indices (local_dof_indices); - constraints.distribute_local_to_global (cell_matrix, cell_rhs, - local_dof_indices, - system_matrix, system_rhs); + Assert (cell->active_fe_index() == 1, ExcInternalError()); + Assert (interface_intersects_cell(cell) == true, ExcInternalError()); + + std::vector level_set_values (GeometryInfo::vertices_per_cell); + for (unsigned int v=0; v::vertices_per_cell; ++v) + level_set_values[v] = level_set (cell->vertex(v)); + + FEValues this_fe_values (fe_collection[1], + compute_quadrature(quadrature_formula, cell, + level_set_values).second, + update_values | update_gradients | + update_quadrature_points | update_JxW_values ); + + this_fe_values.reinit (cell); + + coefficient_values.resize (this_fe_values.n_quadrature_points); + coefficient.value_list (this_fe_values.get_quadrature_points(), + coefficient_values); + + for (unsigned int q_point=0; q_pointget_fe().system_to_component_index(i).first == 0) + { + for (unsigned int j=0; jget_fe().system_to_component_index(j).first == 0) + cell_matrix(i,j) += (coefficient_values[q_point] * + this_fe_values.shape_grad(i,q_point) * + this_fe_values.shape_grad(j,q_point) * + this_fe_values.JxW(q_point)); + else + cell_matrix(i,j) += (coefficient_values[q_point] * + this_fe_values.shape_grad(i,q_point) + * + ((std::fabs(level_set(this_fe_values.quadrature_point(q_point))) + - + std::fabs(level_set(cell->vertex(cell->get_fe().system_to_component_index(j).second))))* + this_fe_values.shape_grad(j,q_point) + + + grad_level_set(this_fe_values.quadrature_point(q_point)) * + sign(level_set(this_fe_values.quadrature_point(q_point))) * + this_fe_values.shape_value(j,q_point)) * + this_fe_values.JxW(q_point)); + + cell_rhs(i) += (this_fe_values.shape_value(i,q_point) * + 1.0 * + this_fe_values.JxW(q_point)); + } + else + { + for (unsigned int j=0; jget_fe().system_to_component_index(j).first == 0) + cell_matrix(i,j) += (coefficient_values[q_point] * + ((std::fabs(level_set(this_fe_values.quadrature_point(q_point))) + - + std::fabs(level_set(cell->vertex(cell->get_fe().system_to_component_index(i).second))))* + this_fe_values.shape_grad(i,q_point) + + + grad_level_set(this_fe_values.quadrature_point(q_point)) * + sign(level_set(this_fe_values.quadrature_point(q_point))) * + this_fe_values.shape_value(i,q_point)) * + this_fe_values.shape_grad(j,q_point) * + this_fe_values.JxW(q_point)); + else + cell_matrix(i,j) += (coefficient_values[q_point] * + ((std::fabs(level_set(this_fe_values.quadrature_point(q_point))) + - + std::fabs(level_set(cell->vertex(cell->get_fe().system_to_component_index(i).second))))* + this_fe_values.shape_grad(i,q_point) + + + grad_level_set(this_fe_values.quadrature_point(q_point)) * + sign(level_set(this_fe_values.quadrature_point(q_point))) * + this_fe_values.shape_value(i,q_point)) * + ((std::fabs(level_set(this_fe_values.quadrature_point(q_point))) + - + std::fabs(level_set(cell->vertex(cell->get_fe().system_to_component_index(j).second))))* + this_fe_values.shape_grad(j,q_point) + + + grad_level_set(this_fe_values.quadrature_point(q_point)) * + sign(level_set(this_fe_values.quadrature_point(q_point))) * + this_fe_values.shape_value(j,q_point)) * + this_fe_values.JxW(q_point)); + + cell_rhs(i) += ((std::fabs(level_set(this_fe_values.quadrature_point(q_point))) + - + std::fabs(level_set(cell->vertex(cell->get_fe().system_to_component_index(i).second))))* + this_fe_values.shape_value(i,q_point) * + 1.0 * + this_fe_values.JxW(q_point)); + } + } + + local_dof_indices.resize (dofs_per_cell); + cell->get_dof_indices (local_dof_indices); + constraints.distribute_local_to_global (cell_matrix, cell_rhs, + local_dof_indices, + system_matrix, system_rhs); } std::map boundary_values; VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction(2), - boundary_values); + 0, + ZeroFunction(2), + boundary_values); MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); + system_matrix, + solution, + system_rhs); } @@ -451,26 +451,26 @@ namespace Step47 template std::pair > LaplaceProblem::compute_quadrature (const Quadrature &plain_quadrature, - const typename hp::DoFHandler::active_cell_iterator &cell, - const std::vector &level_set_values ) + const typename hp::DoFHandler::active_cell_iterator &cell, + const std::vector &level_set_values ) { unsigned int type = 0; - // find the type of cut + // find the type of cut int sign_ls[GeometryInfo::vertices_per_cell]; for (unsigned int v=0; v::vertices_per_cell; ++v) { - if (level_set_values[v] > 0) sign_ls[v] = 1; - else if (level_set_values[v] < 0) sign_ls[v] = -1; - else sign_ls[v] = 0; + if (level_set_values[v] > 0) sign_ls[v] = 1; + else if (level_set_values[v] < 0) sign_ls[v] = -1; + else sign_ls[v] = 0; } - // the sign of the level set function at the 4 nodes of the elements can be positive + or negative - - // depending on the sign of the level set function we have the folloing three classes of decomposition - // type 1: ++++, ---- - // type 2: -+++, +-++, ++-+, +++-, +---, -+--, --+-, ---+ - // type 3: +--+, ++--, +-+-, -++-, --++, -+-+ + // the sign of the level set function at the 4 nodes of the elements can be positive + or negative - + // depending on the sign of the level set function we have the folloing three classes of decomposition + // type 1: ++++, ---- + // type 2: -+++, +-++, ++-+, +++-, +---, -+--, --+-, ---+ + // type 3: +--+, ++--, +-+-, -++-, --++, -+-+ if ( sign_ls[0]==sign_ls[1] & sign_ls[0]==sign_ls[2] & sign_ls[0]==sign_ls[3] ) type =1; else if ( sign_ls[0]*sign_ls[1]*sign_ls[2]*sign_ls[3] < 0 ) type = 2; @@ -495,275 +495,275 @@ namespace Step47 if (type==2) { - const unsigned int n_q_points = plain_quadrature.size(); - - // loop over all subelements for integration - // in type 2 there are 5 subelements - - Quadrature xfem_quadrature(5*n_q_points); - - std::vector > v(GeometryInfo::vertices_per_cell); - - if (sign_ls[0]!=sign_ls[1] && sign_ls[0]!=sign_ls[2] && sign_ls[0]!=sign_ls[3]) Pos = 0; - else if (sign_ls[1]!=sign_ls[0] && sign_ls[1]!=sign_ls[2] && sign_ls[1]!=sign_ls[3]) Pos = 1; - else if (sign_ls[2]!=sign_ls[0] && sign_ls[2]!=sign_ls[1] && sign_ls[2]!=sign_ls[3]) Pos = 2; - else if (sign_ls[3]!=sign_ls[0] && sign_ls[3]!=sign_ls[1] && sign_ls[3]!=sign_ls[2]) Pos = 3; - else assert(0); // error message - - // Find cut coordinates - - // deal.ii local coordinates - - // 2-------3 - // | | - // | | - // | | - // 0-------1 - - if (Pos == 0) - { - A[0] = 1. - level_set_values[1]/(level_set_values[1]-level_set_values[0]); - B[1] = 1. - level_set_values[2]/(level_set_values[2]-level_set_values[0]); - A(1) = 0.; - B(0) = 0.; - C(0) = 0.5*( A(0) + B(0) ); - C(1) = 0.5*( A(1) + B(1) ); - D(0) = 2./3. * C(0); - D(1) = 2./3. * C(1); - E(0) = 0.5*A(0); - E(1) = 0.; - F(0) = 0.; - F(1) = 0.5*B(1); - } - else if (Pos == 1) - { - A[0] = level_set_values[0]/(level_set_values[0]-level_set_values[1]); - B[1] = 1 - level_set_values[3]/(level_set_values[3]-level_set_values[1]); - A(1) = 0.; - B(0) = 1.; - C(0) = 0.5*( A(0) + B(0) ); - C(1) = 0.5*( A(1) + B(1) ); - D(0) = 1./3. + 2./3. * C(0); - D(1) = 2./3. * C(1); - E(0) = 0.5*(1 + A(0)); - E(1) = 0.; - F(0) = 1.; - F(1) = 0.5*B(1); - } - else if (Pos == 2) - { - A[0] = 1 - level_set_values[3]/(level_set_values[3]-level_set_values[2]); - B[1] = level_set_values[0]/(level_set_values[0]-level_set_values[2]); - A(1) = 1.; - B(0) = 0.; - C(0) = 0.5*( A(0) + B(0) ); - C(1) = 0.5*( A(1) + B(1) ); - D(0) = 2./3. * C(0); - D(1) = 1./3. + 2./3. * C(1); - E(0) = 0.5* A(0); - E(1) = 1.; - F(0) = 0.; - F(1) = 0.5*( 1. + B(1) ); - } - else if (Pos == 3) - { - A[0] = level_set_values[2]/(level_set_values[2]-level_set_values[3]); - B[1] = level_set_values[1]/(level_set_values[1]-level_set_values[3]); - A(1) = 1.; - B(0) = 1.; - C(0) = 0.5*( A(0) + B(0) ); - C(1) = 0.5*( A(1) + B(1) ); - D(0) = 1./3. + 2./3. * C(0); - D(1) = 1./3. + 2./3. * C(1); - E(0) = 0.5*( 1. + A(0) ); - E(1) = 1.; - F(0) = 1.; - F(1) = 0.5*( 1. + B(1) ); - } - - //std::cout << A << std::endl; - //std::cout << B << std::endl; - //std::cout << C << std::endl; - //std::cout << D << std::endl; - //std::cout << E << std::endl; - //std::cout << F << std::endl; - - std::string filename = "vertices.dat"; - std::ofstream output (filename.c_str()); - output << "#vertices of xfem subcells" << std::endl; - output << v0(0) << " " << v0(1) << std::endl; - output << v1(0) << " " << v1(1) << std::endl; - output << v3(0) << " " << v3(1) << std::endl; - output << v2(0) << " " << v2(1) << std::endl; - output << std::endl; - output << A(0) << " " << A(1) << std::endl; - output << B(0) << " " << B(1) << std::endl; - output << std::endl; - output << C(0) << " " << C(1) << std::endl; - output << D(0) << " " << D(1) << std::endl; - output << std::endl; - output << D(0) << " " << D(1) << std::endl; - output << E(0) << " " << E(1) << std::endl; - output << std::endl; - output << D(0) << " " << D(1) << std::endl; - output << F(0) << " " << F(1) << std::endl; - output << std::endl; - - if (Pos==0) - output << v3(0) << " " << v3(1) << std::endl; - else if (Pos==1) - output << v2(0) << " " << v2(1) << std::endl; - else if (Pos==2) - output << v1(0) << " " << v1(1) << std::endl; - else if (Pos==3) - output << v0(0) << " " << v0(1) << std::endl; - output << C(0) << " " << C(1) << std::endl; - - Point subcell_vertices[10]; - subcell_vertices[0] = v0; - subcell_vertices[1] = v1; - subcell_vertices[2] = v2; - subcell_vertices[3] = v3; - subcell_vertices[4] = A; - subcell_vertices[5] = B; - subcell_vertices[6] = C; - subcell_vertices[7] = D; - subcell_vertices[8] = E; - subcell_vertices[9] = F; - - std::vector > xfem_points; - std::vector xfem_weights; - - // lookup table for the decomposition - - if (dim==2) - { - unsigned int subcell_v_indices[4][5][4] = { - {{0,8,9,7}, {9,7,5,6}, {8,4,7,6}, {5,6,2,3}, {6,4,3,1}}, - {{8,1,7,9}, {4,8,6,7}, {6,7,5,9}, {0,4,2,6}, {2,6,3,5}}, - {{9,7,2,8}, {5,6,9,7}, {6,4,7,8}, {0,1,5,6}, {6,1,4,3}}, - {{7,9,8,3}, {4,6,8,7}, {6,5,7,9}, {0,6,2,4}, {0,1,6,5}} - }; - - for (unsigned int subcell = 0; subcell<5; subcell++) - { - //std::cout << "subcell : " << subcell << std::endl; - std::vector > vertices; - for (unsigned int i=0; i<4; i++) - { - vertices.push_back( subcell_vertices[subcell_v_indices[Pos][subcell][i]] ); - //std::cout << "i : " << i << std::endl; - //std::cout << "subcell v : " << subcell_v_indices[Pos][subcell][i] << std::endl; - //std::cout << vertices[i](0) << " " << vertices[i](1) << std::endl; - } - //std::cout << std::endl; - // create quadrature rule - append_quadrature( plain_quadrature, - vertices, - xfem_points, - xfem_weights); - //initialize xfem_quadrature with quadrature points of all subelements - xfem_quadrature.initialize(xfem_points, xfem_weights); - } - } - - Assert (xfem_quadrature.size() == plain_quadrature.size() * 5, ExcInternalError()); - return std::pair >(2, xfem_quadrature); + const unsigned int n_q_points = plain_quadrature.size(); + + // loop over all subelements for integration + // in type 2 there are 5 subelements + + Quadrature xfem_quadrature(5*n_q_points); + + std::vector > v(GeometryInfo::vertices_per_cell); + + if (sign_ls[0]!=sign_ls[1] && sign_ls[0]!=sign_ls[2] && sign_ls[0]!=sign_ls[3]) Pos = 0; + else if (sign_ls[1]!=sign_ls[0] && sign_ls[1]!=sign_ls[2] && sign_ls[1]!=sign_ls[3]) Pos = 1; + else if (sign_ls[2]!=sign_ls[0] && sign_ls[2]!=sign_ls[1] && sign_ls[2]!=sign_ls[3]) Pos = 2; + else if (sign_ls[3]!=sign_ls[0] && sign_ls[3]!=sign_ls[1] && sign_ls[3]!=sign_ls[2]) Pos = 3; + else assert(0); // error message + + // Find cut coordinates + + // deal.ii local coordinates + + // 2-------3 + // | | + // | | + // | | + // 0-------1 + + if (Pos == 0) + { + A[0] = 1. - level_set_values[1]/(level_set_values[1]-level_set_values[0]); + B[1] = 1. - level_set_values[2]/(level_set_values[2]-level_set_values[0]); + A(1) = 0.; + B(0) = 0.; + C(0) = 0.5*( A(0) + B(0) ); + C(1) = 0.5*( A(1) + B(1) ); + D(0) = 2./3. * C(0); + D(1) = 2./3. * C(1); + E(0) = 0.5*A(0); + E(1) = 0.; + F(0) = 0.; + F(1) = 0.5*B(1); + } + else if (Pos == 1) + { + A[0] = level_set_values[0]/(level_set_values[0]-level_set_values[1]); + B[1] = 1 - level_set_values[3]/(level_set_values[3]-level_set_values[1]); + A(1) = 0.; + B(0) = 1.; + C(0) = 0.5*( A(0) + B(0) ); + C(1) = 0.5*( A(1) + B(1) ); + D(0) = 1./3. + 2./3. * C(0); + D(1) = 2./3. * C(1); + E(0) = 0.5*(1 + A(0)); + E(1) = 0.; + F(0) = 1.; + F(1) = 0.5*B(1); + } + else if (Pos == 2) + { + A[0] = 1 - level_set_values[3]/(level_set_values[3]-level_set_values[2]); + B[1] = level_set_values[0]/(level_set_values[0]-level_set_values[2]); + A(1) = 1.; + B(0) = 0.; + C(0) = 0.5*( A(0) + B(0) ); + C(1) = 0.5*( A(1) + B(1) ); + D(0) = 2./3. * C(0); + D(1) = 1./3. + 2./3. * C(1); + E(0) = 0.5* A(0); + E(1) = 1.; + F(0) = 0.; + F(1) = 0.5*( 1. + B(1) ); + } + else if (Pos == 3) + { + A[0] = level_set_values[2]/(level_set_values[2]-level_set_values[3]); + B[1] = level_set_values[1]/(level_set_values[1]-level_set_values[3]); + A(1) = 1.; + B(0) = 1.; + C(0) = 0.5*( A(0) + B(0) ); + C(1) = 0.5*( A(1) + B(1) ); + D(0) = 1./3. + 2./3. * C(0); + D(1) = 1./3. + 2./3. * C(1); + E(0) = 0.5*( 1. + A(0) ); + E(1) = 1.; + F(0) = 1.; + F(1) = 0.5*( 1. + B(1) ); + } + + //std::cout << A << std::endl; + //std::cout << B << std::endl; + //std::cout << C << std::endl; + //std::cout << D << std::endl; + //std::cout << E << std::endl; + //std::cout << F << std::endl; + + std::string filename = "vertices.dat"; + std::ofstream output (filename.c_str()); + output << "#vertices of xfem subcells" << std::endl; + output << v0(0) << " " << v0(1) << std::endl; + output << v1(0) << " " << v1(1) << std::endl; + output << v3(0) << " " << v3(1) << std::endl; + output << v2(0) << " " << v2(1) << std::endl; + output << std::endl; + output << A(0) << " " << A(1) << std::endl; + output << B(0) << " " << B(1) << std::endl; + output << std::endl; + output << C(0) << " " << C(1) << std::endl; + output << D(0) << " " << D(1) << std::endl; + output << std::endl; + output << D(0) << " " << D(1) << std::endl; + output << E(0) << " " << E(1) << std::endl; + output << std::endl; + output << D(0) << " " << D(1) << std::endl; + output << F(0) << " " << F(1) << std::endl; + output << std::endl; + + if (Pos==0) + output << v3(0) << " " << v3(1) << std::endl; + else if (Pos==1) + output << v2(0) << " " << v2(1) << std::endl; + else if (Pos==2) + output << v1(0) << " " << v1(1) << std::endl; + else if (Pos==3) + output << v0(0) << " " << v0(1) << std::endl; + output << C(0) << " " << C(1) << std::endl; + + Point subcell_vertices[10]; + subcell_vertices[0] = v0; + subcell_vertices[1] = v1; + subcell_vertices[2] = v2; + subcell_vertices[3] = v3; + subcell_vertices[4] = A; + subcell_vertices[5] = B; + subcell_vertices[6] = C; + subcell_vertices[7] = D; + subcell_vertices[8] = E; + subcell_vertices[9] = F; + + std::vector > xfem_points; + std::vector xfem_weights; + + // lookup table for the decomposition + + if (dim==2) + { + unsigned int subcell_v_indices[4][5][4] = { + {{0,8,9,7}, {9,7,5,6}, {8,4,7,6}, {5,6,2,3}, {6,4,3,1}}, + {{8,1,7,9}, {4,8,6,7}, {6,7,5,9}, {0,4,2,6}, {2,6,3,5}}, + {{9,7,2,8}, {5,6,9,7}, {6,4,7,8}, {0,1,5,6}, {6,1,4,3}}, + {{7,9,8,3}, {4,6,8,7}, {6,5,7,9}, {0,6,2,4}, {0,1,6,5}} + }; + + for (unsigned int subcell = 0; subcell<5; subcell++) + { + //std::cout << "subcell : " << subcell << std::endl; + std::vector > vertices; + for (unsigned int i=0; i<4; i++) + { + vertices.push_back( subcell_vertices[subcell_v_indices[Pos][subcell][i]] ); + //std::cout << "i : " << i << std::endl; + //std::cout << "subcell v : " << subcell_v_indices[Pos][subcell][i] << std::endl; + //std::cout << vertices[i](0) << " " << vertices[i](1) << std::endl; + } + //std::cout << std::endl; + // create quadrature rule + append_quadrature( plain_quadrature, + vertices, + xfem_points, + xfem_weights); + //initialize xfem_quadrature with quadrature points of all subelements + xfem_quadrature.initialize(xfem_points, xfem_weights); + } + } + + Assert (xfem_quadrature.size() == plain_quadrature.size() * 5, ExcInternalError()); + return std::pair >(2, xfem_quadrature); } - // Type three decomposition - // (+--+, ++--, +-+-, -++-, --++, -+-+) + // Type three decomposition + // (+--+, ++--, +-+-, -++-, --++, -+-+) if (type==3) { - const unsigned int n_q_points = plain_quadrature.size(); - - // loop over all subelements for integration - // in type 2 there are 5 subelements - - Quadrature xfem_quadrature(5*n_q_points); - - std::vector > v(GeometryInfo::vertices_per_cell); - - if ( sign_ls[0]==sign_ls[1] && sign_ls[2]==sign_ls[3] ) - { - Pos = 0; - A(0) = 0.; - A(1) = level_set_values[0]/((level_set_values[0]-level_set_values[2])); - B(0) = 1.; - B(1) = level_set_values[1]/((level_set_values[1]-level_set_values[3])); - } - else if ( sign_ls[0]==sign_ls[2] && sign_ls[1]==sign_ls[3] ) - { - Pos = 1; - A(0) = level_set_values[0]/((level_set_values[0]-level_set_values[1])); - A(1) = 0.; - B(0) = level_set_values[2]/((level_set_values[2]-level_set_values[3])); - B(1) = 1.; - } - else if ( sign_ls[0]==sign_ls[3] && sign_ls[1]==sign_ls[2] ) - { - std::cout << "Error: the element has two cut lines and this is not allowed" << std::endl; - assert(0); - } - else - { - std::cout << "Error: the level set function has not the right values" << std::endl; - assert(0); - } - - //std::cout << "Pos " << Pos << std::endl; - //std::cout << A << std::endl; - //std::cout << B << std::endl; - std::string filename = "vertices.dat"; - std::ofstream output (filename.c_str()); - output << "#vertices of xfem subcells" << std::endl; - output << A(0) << " " << A(1) << std::endl; - output << B(0) << " " << B(1) << std::endl; - - //fill xfem_quadrature - Point subcell_vertices[6]; - subcell_vertices[0] = v0; - subcell_vertices[1] = v1; - subcell_vertices[2] = v2; - subcell_vertices[3] = v3; - subcell_vertices[4] = A; - subcell_vertices[5] = B; - - std::vector > xfem_points; - std::vector xfem_weights; - - if (dim==2) - { - unsigned int subcell_v_indices[2][2][4] = { - {{0,1,4,5}, {4,5,2,3}}, - {{0,4,2,5}, {4,1,5,3}} - }; - - //std::cout << "Pos : " << Pos << std::endl; - for (unsigned int subcell = 0; subcell<2; subcell++) - { - //std::cout << "subcell : " << subcell << std::endl; - std::vector > vertices; - for (unsigned int i=0; i<4; i++) - { - vertices.push_back( subcell_vertices[subcell_v_indices[Pos][subcell][i]] ); - //std::cout << "i : " << i << std::endl; - //std::cout << "subcell v : " << subcell_v_indices[Pos][subcell][i] << std::endl; - //std::cout << vertices[i](0) << " " << vertices[i](1) << std::endl; - } - //std::cout << std::endl; - // create quadrature rule - append_quadrature( plain_quadrature, - vertices, - xfem_points, - xfem_weights); - //initialize xfem_quadrature with quadrature points of all subelements - xfem_quadrature.initialize(xfem_points, xfem_weights); - } - } - Assert (xfem_quadrature.size() == plain_quadrature.size() * 2, ExcInternalError()); - return std::pair >(3, xfem_quadrature); + const unsigned int n_q_points = plain_quadrature.size(); + + // loop over all subelements for integration + // in type 2 there are 5 subelements + + Quadrature xfem_quadrature(5*n_q_points); + + std::vector > v(GeometryInfo::vertices_per_cell); + + if ( sign_ls[0]==sign_ls[1] && sign_ls[2]==sign_ls[3] ) + { + Pos = 0; + A(0) = 0.; + A(1) = level_set_values[0]/((level_set_values[0]-level_set_values[2])); + B(0) = 1.; + B(1) = level_set_values[1]/((level_set_values[1]-level_set_values[3])); + } + else if ( sign_ls[0]==sign_ls[2] && sign_ls[1]==sign_ls[3] ) + { + Pos = 1; + A(0) = level_set_values[0]/((level_set_values[0]-level_set_values[1])); + A(1) = 0.; + B(0) = level_set_values[2]/((level_set_values[2]-level_set_values[3])); + B(1) = 1.; + } + else if ( sign_ls[0]==sign_ls[3] && sign_ls[1]==sign_ls[2] ) + { + std::cout << "Error: the element has two cut lines and this is not allowed" << std::endl; + assert(0); + } + else + { + std::cout << "Error: the level set function has not the right values" << std::endl; + assert(0); + } + + //std::cout << "Pos " << Pos << std::endl; + //std::cout << A << std::endl; + //std::cout << B << std::endl; + std::string filename = "vertices.dat"; + std::ofstream output (filename.c_str()); + output << "#vertices of xfem subcells" << std::endl; + output << A(0) << " " << A(1) << std::endl; + output << B(0) << " " << B(1) << std::endl; + + //fill xfem_quadrature + Point subcell_vertices[6]; + subcell_vertices[0] = v0; + subcell_vertices[1] = v1; + subcell_vertices[2] = v2; + subcell_vertices[3] = v3; + subcell_vertices[4] = A; + subcell_vertices[5] = B; + + std::vector > xfem_points; + std::vector xfem_weights; + + if (dim==2) + { + unsigned int subcell_v_indices[2][2][4] = { + {{0,1,4,5}, {4,5,2,3}}, + {{0,4,2,5}, {4,1,5,3}} + }; + + //std::cout << "Pos : " << Pos << std::endl; + for (unsigned int subcell = 0; subcell<2; subcell++) + { + //std::cout << "subcell : " << subcell << std::endl; + std::vector > vertices; + for (unsigned int i=0; i<4; i++) + { + vertices.push_back( subcell_vertices[subcell_v_indices[Pos][subcell][i]] ); + //std::cout << "i : " << i << std::endl; + //std::cout << "subcell v : " << subcell_v_indices[Pos][subcell][i] << std::endl; + //std::cout << vertices[i](0) << " " << vertices[i](1) << std::endl; + } + //std::cout << std::endl; + // create quadrature rule + append_quadrature( plain_quadrature, + vertices, + xfem_points, + xfem_weights); + //initialize xfem_quadrature with quadrature points of all subelements + xfem_quadrature.initialize(xfem_points, xfem_weights); + } + } + Assert (xfem_quadrature.size() == plain_quadrature.size() * 2, ExcInternalError()); + return std::pair >(3, xfem_quadrature); } return std::pair >(0, plain_quadrature);; @@ -772,16 +772,16 @@ namespace Step47 template void LaplaceProblem::append_quadrature ( const Quadrature &plain_quadrature, - const std::vector > &v, - std::vector > &xfem_points, - std::vector &xfem_weights) + const std::vector > &v, + std::vector > &xfem_points, + std::vector &xfem_weights) { - // Project integration points into sub-elements. - // This maps quadrature points from a reference element to a subelement of a reference element. - // To implement the action of this map the coordinates of the subelements have been calculated (A(0)...F(0),A(1)...F(1)) - // the coordinates of the quadrature points are given by the bi-linear map defined by the form functions - // $x^\prime_i = \sum_j v^\prime \phi_j(x^hat_i)$, where the $\phi_j$ are the shape functions of the FEQ. + // Project integration points into sub-elements. + // This maps quadrature points from a reference element to a subelement of a reference element. + // To implement the action of this map the coordinates of the subelements have been calculated (A(0)...F(0),A(1)...F(1)) + // the coordinates of the quadrature points are given by the bi-linear map defined by the form functions + // $x^\prime_i = \sum_j v^\prime \phi_j(x^hat_i)$, where the $\phi_j$ are the shape functions of the FEQ. unsigned int n_v = GeometryInfo::vertices_per_cell; @@ -797,59 +797,59 @@ namespace Step47 for ( unsigned int i = 0; i < n_q_points; i++) { - switch (dim) - { - case 2: - { - double xi = q_points[i](0); - double eta = q_points[i](1); - - // Define shape functions on reference element - // we consider a bi-linear mapping - phi[0] = (1. - xi) * (1. - eta); - phi[1] = xi * (1. - eta); - phi[2] = (1. - xi) * eta; - phi[3] = xi * eta; - - grad_phi[0][0] = (-1. + eta); - grad_phi[1][0] = (1. - eta); - grad_phi[2][0] = -eta; - grad_phi[3][0] = eta; - - grad_phi[0][1] = (-1. + xi); - grad_phi[1][1] = -xi; - grad_phi[2][1] = 1-xi; - grad_phi[3][1] = xi; - - break; - } - - default: - Assert (false, ExcNotImplemented()); - } - - - Tensor<2,dim> jacobian; - - // Calculate Jacobian of transformation - for (unsigned int d=0; d::vertices_per_cell; j++) - { - jacobian[d][e] += grad_phi[j][e] * v[j](d); - } - } - - double detJ = determinant(jacobian); - xfem_weights.push_back (W[i] * detJ); - - // Map integration points from reference element to subcell of reference element - Point q_prime; - for (unsigned int d=0; d::vertices_per_cell; j++) - q_prime[d] += v[j](d) * phi[j]; - xfem_points.push_back(q_prime); + switch (dim) + { + case 2: + { + double xi = q_points[i](0); + double eta = q_points[i](1); + + // Define shape functions on reference element + // we consider a bi-linear mapping + phi[0] = (1. - xi) * (1. - eta); + phi[1] = xi * (1. - eta); + phi[2] = (1. - xi) * eta; + phi[3] = xi * eta; + + grad_phi[0][0] = (-1. + eta); + grad_phi[1][0] = (1. - eta); + grad_phi[2][0] = -eta; + grad_phi[3][0] = eta; + + grad_phi[0][1] = (-1. + xi); + grad_phi[1][1] = -xi; + grad_phi[2][1] = 1-xi; + grad_phi[3][1] = xi; + + break; + } + + default: + Assert (false, ExcNotImplemented()); + } + + + Tensor<2,dim> jacobian; + + // Calculate Jacobian of transformation + for (unsigned int d=0; d::vertices_per_cell; j++) + { + jacobian[d][e] += grad_phi[j][e] * v[j](d); + } + } + + double detJ = determinant(jacobian); + xfem_weights.push_back (W[i] * detJ); + + // Map integration points from reference element to subcell of reference element + Point q_prime; + for (unsigned int d=0; d::vertices_per_cell; j++) + q_prime[d] += v[j](d) * phi[j]; + xfem_points.push_back(q_prime); } } @@ -865,7 +865,7 @@ namespace Step47 preconditioner.initialize(system_matrix, 1.2); solver.solve (system_matrix, solution, system_rhs, - preconditioner); + preconditioner); constraints.distribute (solution); } @@ -878,14 +878,14 @@ namespace Step47 Vector estimated_error_per_cell (triangulation.n_active_cells()); KellyErrorEstimator::estimate (dof_handler, - QGauss(3), - typename FunctionMap::type(), - solution, - estimated_error_per_cell); + QGauss(3), + typename FunctionMap::type(), + solution, + estimated_error_per_cell); GridRefinement::refine_and_coarsen_fixed_number (triangulation, - estimated_error_per_cell, - 0.3, 0.03); + estimated_error_per_cell, + 0.3, 0.03); triangulation.execute_coarsening_and_refinement (); } @@ -899,11 +899,11 @@ namespace Step47 virtual void compute_derived_quantities_vector (const std::vector > &uh, - const std::vector > > &duh, - const std::vector > > &dduh, - const std::vector > &normals, - const std::vector > &evaluation_points, - std::vector > &computed_quantities) const; + const std::vector > > &duh, + const std::vector > > &dduh, + const std::vector > &normals, + const std::vector > &evaluation_points, + std::vector > &computed_quantities) const; virtual std::vector get_names () const; @@ -932,7 +932,7 @@ namespace Step47 { std::vector interpretation (2, - DataComponentInterpretation::component_is_scalar); + DataComponentInterpretation::component_is_scalar); return interpretation; } @@ -949,11 +949,11 @@ namespace Step47 void Postprocessor:: compute_derived_quantities_vector (const std::vector > &uh, - const std::vector > > &/*duh*/, - const std::vector > > &/*dduh*/, - const std::vector > &/*normals*/, - const std::vector > &evaluation_points, - std::vector > &computed_quantities) const + const std::vector > > &/*duh*/, + const std::vector > > &/*dduh*/, + const std::vector > &/*normals*/, + const std::vector > &evaluation_points, + std::vector > &computed_quantities) const { const unsigned int n_quadrature_points = uh.size(); Assert (computed_quantities.size() == n_quadrature_points, ExcInternalError()); @@ -961,14 +961,14 @@ namespace Step47 for (unsigned int q=0; q(QGauss<1>(2), 4)); hp::FEValues hp_fe_values (fe_collection, q_collection, - update_values | update_q_points | update_JxW_values); + update_values | update_q_points | update_JxW_values); double l2_error_square = 0; @@ -1020,29 +1020,29 @@ namespace Step47 for (; cell!=endc; ++cell) { - hp_fe_values.reinit (cell); - - const FEValues &fe_values = hp_fe_values.get_present_fe_values (); - - solution_values.resize (fe_values.n_quadrature_points, - Vector(2)); - fe_values.get_function_values (solution, - solution_values); - - for (unsigned int q=0; q &fe_values = hp_fe_values.get_present_fe_values (); + + solution_values.resize (fe_values.n_quadrature_points, + Vector(2)); + fe_values.get_function_values (solution, + solution_values); + + for (unsigned int q=0; q boundary; - triangulation.set_boundary (0, boundary); + static const HyperBallBoundary boundary; + triangulation.set_boundary (0, boundary); - triangulation.refine_global (2); - } - else - triangulation.refine_global (1); -// refine_grid (); + triangulation.refine_global (2); + } + else + triangulation.refine_global (1); +// refine_grid (); - std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl; + std::cout << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl; - setup_system (); + setup_system (); - std::cout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl; + std::cout << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << std::endl; - assemble_system (); - solve (); - compute_error (); - output_results (cycle); + assemble_system (); + solve (); + compute_error (); + output_results (cycle); } } } @@ -1106,25 +1106,25 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-5/step-5.cc b/deal.II/examples/step-5/step-5.cc index 82011e8f83..9d219ebca2 100644 --- a/deal.II/examples/step-5/step-5.cc +++ b/deal.II/examples/step-5/step-5.cc @@ -11,9 +11,9 @@ // @sect3{Include files} - // Again, the first few include files - // are already known, so we won't - // comment on them: + // Again, the first few include files + // are already known, so we won't + // comment on them: #include #include #include @@ -35,43 +35,43 @@ #include #include - // This one is new. We want to read a - // triangulation from disk, and the - // class which does this is declared - // in the following file: + // This one is new. We want to read a + // triangulation from disk, and the + // class which does this is declared + // in the following file: #include - // We will use a circular domain, and - // the object describing the boundary - // of it comes from this file: + // We will use a circular domain, and + // the object describing the boundary + // of it comes from this file: #include - // This is C++ ... + // This is C++ ... #include - // ... and this is too: We will - // convert integers to strings using - // the C++ stringstream class - // ostringstream: + // ... and this is too: We will + // convert integers to strings using + // the C++ stringstream class + // ostringstream: #include - // Finally, this has been discussed - // in previous tutorial programs - // before: + // Finally, this has been discussed + // in previous tutorial programs + // before: using namespace dealii; // @sect3{The Step5 class template} - // The main class is mostly as in the - // previous example. The most visible - // change is that the function - // make_grid_and_dofs has been - // removed, since creating the grid - // is now done in the run - // function and the rest of its - // functionality is now in - // setup_system. Apart from this, - // everything is as before. + // The main class is mostly as in the + // previous example. The most visible + // change is that the function + // make_grid_and_dofs has been + // removed, since creating the grid + // is now done in the run + // function and the rest of its + // functionality is now in + // setup_system. Apart from this, + // everything is as before. template class Step5 { @@ -99,31 +99,31 @@ class Step5 // @sect3{Nonconstant coefficients, using Assert} - // In step-4, we showed how to use - // non-constant boundary values and - // right hand side. In this example, - // we want to use a variable - // coefficient in the elliptic - // operator instead. Of course, the - // suitable object is a Function, - // as we have used for the right hand - // side and boundary values in the - // last example. We will use it - // again, but we implement another - // function value_list which - // takes a list of points and returns - // the values of the function at - // these points as a list. The reason - // why such a function is reasonable - // although we can get all the - // information from the value - // function as well will be explained - // below when assembling the matrix. - // - // The need to declare a seemingly - // useless default constructor exists - // here just as in the previous - // example. + // In step-4, we showed how to use + // non-constant boundary values and + // right hand side. In this example, + // we want to use a variable + // coefficient in the elliptic + // operator instead. Of course, the + // suitable object is a Function, + // as we have used for the right hand + // side and boundary values in the + // last example. We will use it + // again, but we implement another + // function value_list which + // takes a list of points and returns + // the values of the function at + // these points as a list. The reason + // why such a function is reasonable + // although we can get all the + // information from the value + // function as well will be explained + // below when assembling the matrix. + // + // The need to declare a seemingly + // useless default constructor exists + // here just as in the previous + // example. template class Coefficient : public Function { @@ -131,30 +131,30 @@ class Coefficient : public Function Coefficient () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void value_list (const std::vector > &points, - std::vector &values, - const unsigned int component = 0) const; + std::vector &values, + const unsigned int component = 0) const; }; - // This is the implementation of the - // coefficient function for a single - // point. We let it return 20 if the - // distance to the origin is less - // than 0.5, and 1 otherwise. As in - // the previous example, we simply - // ignore the second parameter of the - // function that is used to denote - // different components of - // vector-valued functions (we deal - // only with a scalar function here, - // after all): + // This is the implementation of the + // coefficient function for a single + // point. We let it return 20 if the + // distance to the origin is less + // than 0.5, and 1 otherwise. As in + // the previous example, we simply + // ignore the second parameter of the + // function that is used to denote + // different components of + // vector-valued functions (we deal + // only with a scalar function here, + // after all): template double Coefficient::value (const Point &p, - const unsigned int /*component*/) const + const unsigned int /*component*/) const { if (p.square() < 0.5*0.5) return 20; @@ -164,191 +164,191 @@ double Coefficient::value (const Point &p, - // And this is the function that - // returns the value of the - // coefficient at a whole list of - // points at once. Of course, we need - // to make sure that the values are - // the same as if we would ask the - // value function for each point - // individually. - // - // This method takes three - // parameters: a list of points at - // which to evaluate the function, a - // list that will hold the values at - // these points, and the vector - // component that should be zero here - // since we only have a single scalar - // function. Now, of course the size - // of the output array (values) - // must be the same as that of the - // input array (points), and we - // could simply assume that. However, - // in practice, it turns out that - // more than 90 per cent of - // programming errors are invalid - // function parameters such as - // invalid array sizes, etc, so we - // should try to make sure that the - // parameters are valid. For this, - // the Assert macro is a good means, - // since it makes sure that the - // condition which is given as first - // argument is valid, and if not - // throws an exception (its second - // argument) which will usually - // terminate the program giving - // information where the error - // occured and what the reason - // was. This generally reduces the - // time to find programming errors - // dramatically and we have found - // assertions an invaluable means to - // program fast. - // - // On the other hand, all these - // checks (there are more than 4200 - // of them in the library at present) - // should not slow down the program - // too much if you want to do large - // computations. To this end, the - // Assert macro is only used in - // debug mode and expands to nothing - // if in optimized mode. Therefore, - // while you test your program on - // small problems and debug it, the - // assertions will tell you where the - // problems are. Once your program - // is stable, you can switch off - // debugging and the program will run - // your real computations without the - // assertions and at maximum - // speed. (In fact, it turns out the - // switching off all the checks in - // the library that prevent you from - // calling functions with the wrong - // arguments by switching to - // optimized mode, makes most - // programs run faster by about a - // factor of four. This should, - // however, not try to induce you to - // always run in optimized mode: Most - // people who have tried that soon - // realize that they introduce lots - // of errors that would have easily - // been caught had they run the - // program in debug mode while - // developing.) For those who want to - // try: The way to switch from debug - // mode to optimized mode is to go - // edit the Makefile in this - // directory. It should have a line - // debug-mode = on; simply - // replace it by debug-mode = off - // and recompile your program. The - // output of the make program - // should already indicate to you - // that the program is now compiled - // in optimized mode, and it will - // later also be linked to libraries - // that have been compiled for - // optimized mode. - // - // Here, as has been said above, we - // would like to make sure that the - // size of the two arrays is equal, - // and if not throw an - // exception. Comparing the sizes of - // two arrays is one of the most - // frequent checks, which is why - // there is already an exception - // class ExcDimensionMismatch - // that takes the sizes of two - // vectors and prints some output in - // case the condition is violated: + // And this is the function that + // returns the value of the + // coefficient at a whole list of + // points at once. Of course, we need + // to make sure that the values are + // the same as if we would ask the + // value function for each point + // individually. + // + // This method takes three + // parameters: a list of points at + // which to evaluate the function, a + // list that will hold the values at + // these points, and the vector + // component that should be zero here + // since we only have a single scalar + // function. Now, of course the size + // of the output array (values) + // must be the same as that of the + // input array (points), and we + // could simply assume that. However, + // in practice, it turns out that + // more than 90 per cent of + // programming errors are invalid + // function parameters such as + // invalid array sizes, etc, so we + // should try to make sure that the + // parameters are valid. For this, + // the Assert macro is a good means, + // since it makes sure that the + // condition which is given as first + // argument is valid, and if not + // throws an exception (its second + // argument) which will usually + // terminate the program giving + // information where the error + // occured and what the reason + // was. This generally reduces the + // time to find programming errors + // dramatically and we have found + // assertions an invaluable means to + // program fast. + // + // On the other hand, all these + // checks (there are more than 4200 + // of them in the library at present) + // should not slow down the program + // too much if you want to do large + // computations. To this end, the + // Assert macro is only used in + // debug mode and expands to nothing + // if in optimized mode. Therefore, + // while you test your program on + // small problems and debug it, the + // assertions will tell you where the + // problems are. Once your program + // is stable, you can switch off + // debugging and the program will run + // your real computations without the + // assertions and at maximum + // speed. (In fact, it turns out the + // switching off all the checks in + // the library that prevent you from + // calling functions with the wrong + // arguments by switching to + // optimized mode, makes most + // programs run faster by about a + // factor of four. This should, + // however, not try to induce you to + // always run in optimized mode: Most + // people who have tried that soon + // realize that they introduce lots + // of errors that would have easily + // been caught had they run the + // program in debug mode while + // developing.) For those who want to + // try: The way to switch from debug + // mode to optimized mode is to go + // edit the Makefile in this + // directory. It should have a line + // debug-mode = on; simply + // replace it by debug-mode = off + // and recompile your program. The + // output of the make program + // should already indicate to you + // that the program is now compiled + // in optimized mode, and it will + // later also be linked to libraries + // that have been compiled for + // optimized mode. + // + // Here, as has been said above, we + // would like to make sure that the + // size of the two arrays is equal, + // and if not throw an + // exception. Comparing the sizes of + // two arrays is one of the most + // frequent checks, which is why + // there is already an exception + // class ExcDimensionMismatch + // that takes the sizes of two + // vectors and prints some output in + // case the condition is violated: template void Coefficient::value_list (const std::vector > &points, - std::vector &values, - const unsigned int component) const + std::vector &values, + const unsigned int component) const { Assert (values.size() == points.size(), - ExcDimensionMismatch (values.size(), points.size())); - // Since examples are not very good - // if they do not demonstrate their - // point, we will show how to - // trigger this exception at the - // end of the main program, and - // what output results from this - // (see the Results section of - // this example program). You will - // certainly notice that the output - // is quite well suited to quickly - // find what the problem is and - // what parameters are expected. An - // additional plus is that if the - // program is run inside a - // debugger, it will stop at the - // point where the exception is - // triggered, so you can go up the - // call stack to immediately find - // the place where the the array - // with the wrong size was set up. - - // While we're at it, we can do - // another check: the coefficient - // is a scalar, but the - // Function class also - // represents vector-valued - // function. A scalar function must - // therefore be considered as a - // vector-valued function with only - // one component, so the only valid - // component for which a user might - // ask is zero (we always count - // from zero). The following - // assertion checks this. If the - // condition in the Assert call - // is violated, an exception of - // type ExcRange will be - // triggered; that class takes the - // violating index as first - // argument, and the second and - // third arguments denote a range - // that includes the left point but - // is open at the right, i.e. here - // the interval [0,1). For integer - // arguments, this means that the - // only value in the range is the - // zero, of course. (The interval - // is half open since we also want - // to write exceptions like - // ExcRange(i,0,v.size()), - // where an index must be between - // zero but less than the size of - // an array. To save us the effort - // of writing v.size()-1 in - // many places, the range is - // defined as half-open.) + ExcDimensionMismatch (values.size(), points.size())); + // Since examples are not very good + // if they do not demonstrate their + // point, we will show how to + // trigger this exception at the + // end of the main program, and + // what output results from this + // (see the Results section of + // this example program). You will + // certainly notice that the output + // is quite well suited to quickly + // find what the problem is and + // what parameters are expected. An + // additional plus is that if the + // program is run inside a + // debugger, it will stop at the + // point where the exception is + // triggered, so you can go up the + // call stack to immediately find + // the place where the the array + // with the wrong size was set up. + + // While we're at it, we can do + // another check: the coefficient + // is a scalar, but the + // Function class also + // represents vector-valued + // function. A scalar function must + // therefore be considered as a + // vector-valued function with only + // one component, so the only valid + // component for which a user might + // ask is zero (we always count + // from zero). The following + // assertion checks this. If the + // condition in the Assert call + // is violated, an exception of + // type ExcRange will be + // triggered; that class takes the + // violating index as first + // argument, and the second and + // third arguments denote a range + // that includes the left point but + // is open at the right, i.e. here + // the interval [0,1). For integer + // arguments, this means that the + // only value in the range is the + // zero, of course. (The interval + // is half open since we also want + // to write exceptions like + // ExcRange(i,0,v.size()), + // where an index must be between + // zero but less than the size of + // an array. To save us the effort + // of writing v.size()-1 in + // many places, the range is + // defined as half-open.) Assert (component == 0, - ExcIndexRange (component, 0, 1)); - - // The rest of the function is - // uneventful: we define - // n_q_points as an - // abbreviation for the number of - // points for which function values - // are requested, and then simply - // fill the output value: + ExcIndexRange (component, 0, 1)); + + // The rest of the function is + // uneventful: we define + // n_q_points as an + // abbreviation for the number of + // points for which function values + // are requested, and then simply + // fill the output value: const unsigned int n_points = points.size(); for (unsigned int i=0; i::value_list (const std::vector > &points, // @sect4{Step5::Step5} - // This function is as before. + // This function is as before. template Step5::Step5 () : fe (1), - dof_handler (triangulation) + dof_handler (triangulation) {} // @sect4{Step5::setup_system} - // This is the function - // make_grid_and_dofs from the - // previous example, minus the - // generation of the grid. Everything - // else is unchanged: + // This is the function + // make_grid_and_dofs from the + // previous example, minus the + // generation of the grid. Everything + // else is unchanged: template void Step5::setup_system () { dof_handler.distribute_dofs (fe); std::cout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl; + << dof_handler.n_dofs() + << std::endl; CompressedSparsityPattern c_sparsity(dof_handler.n_dofs()); DoFTools::make_sparsity_pattern (dof_handler, c_sparsity); @@ -396,45 +396,45 @@ void Step5::setup_system () // @sect4{Step5::assemble_system} - // As in the previous examples, this - // function is not changed much with - // regard to its functionality, but - // there are still some optimizations - // which we will show. For this, it - // is important to note that if - // efficient solvers are used (such - // as the preconditions CG method), - // assembling the matrix and right - // hand side can take a comparable - // time, and you should think about - // using one or two optimizations at - // some places. - // - // What we will show here is how we - // can avoid calls to the - // shape_value, shape_grad, and - // quadrature_point functions of the - // FEValues object, and in particular - // optimize away most of the virtual - // function calls of the Function - // object. The way to do so will be - // explained in the following, while - // those parts of this function that - // are not changed with respect to - // the previous example are not - // commented on. - // - // The first parts of the function - // are completely unchanged from - // before: + // As in the previous examples, this + // function is not changed much with + // regard to its functionality, but + // there are still some optimizations + // which we will show. For this, it + // is important to note that if + // efficient solvers are used (such + // as the preconditions CG method), + // assembling the matrix and right + // hand side can take a comparable + // time, and you should think about + // using one or two optimizations at + // some places. + // + // What we will show here is how we + // can avoid calls to the + // shape_value, shape_grad, and + // quadrature_point functions of the + // FEValues object, and in particular + // optimize away most of the virtual + // function calls of the Function + // object. The way to do so will be + // explained in the following, while + // those parts of this function that + // are not changed with respect to + // the previous example are not + // commented on. + // + // The first parts of the function + // are completely unchanged from + // before: template void Step5::assemble_system () { QGauss quadrature_formula(2); FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -444,88 +444,88 @@ void Step5::assemble_system () std::vector local_dof_indices (dofs_per_cell); - // Here is one difference: for this - // program, we will again use a - // constant right hand side - // function and zero boundary - // values, but a variable - // coefficient. We have already - // declared the class that - // represents this coefficient - // above, so we only have to - // declare a corresponding object - // here. - // - // Then, below, we will ask the - // coefficient function object - // to compute the values of the - // coefficient at all quadrature - // points on one cell at once. The - // reason for this is that, if you - // look back at how we did this in - // step-4, you will realize that we - // called the function computing - // the right hand side value inside - // nested loops over all degrees of - // freedom and over all quadrature - // points, - // i.e. dofs_per_cell*n_q_points - // times. For the coefficient that - // is used inside the matrix, this - // would actually be - // dofs_per_cell*dofs_per_cell*n_q_points. On - // the other hand, the function - // will of course return the same - // value everytime it is called - // with the same quadrature point, - // independently of what shape - // function we presently treat; - // secondly, these are virtual - // function calls, so are rather - // expensive. Obviously, there are - // only n_q_point different values, - // and we shouldn't call the - // function more often than - // that. Or, even better than this, - // compute all of these values at - // once, and get away with a single - // function call per cell. - // - // This is exactly what we are - // going to do. For this, we need - // some space to store the values - // in. We therefore also have to - // declare an array to hold these - // values: + // Here is one difference: for this + // program, we will again use a + // constant right hand side + // function and zero boundary + // values, but a variable + // coefficient. We have already + // declared the class that + // represents this coefficient + // above, so we only have to + // declare a corresponding object + // here. + // + // Then, below, we will ask the + // coefficient function object + // to compute the values of the + // coefficient at all quadrature + // points on one cell at once. The + // reason for this is that, if you + // look back at how we did this in + // step-4, you will realize that we + // called the function computing + // the right hand side value inside + // nested loops over all degrees of + // freedom and over all quadrature + // points, + // i.e. dofs_per_cell*n_q_points + // times. For the coefficient that + // is used inside the matrix, this + // would actually be + // dofs_per_cell*dofs_per_cell*n_q_points. On + // the other hand, the function + // will of course return the same + // value everytime it is called + // with the same quadrature point, + // independently of what shape + // function we presently treat; + // secondly, these are virtual + // function calls, so are rather + // expensive. Obviously, there are + // only n_q_point different values, + // and we shouldn't call the + // function more often than + // that. Or, even better than this, + // compute all of these values at + // once, and get away with a single + // function call per cell. + // + // This is exactly what we are + // going to do. For this, we need + // some space to store the values + // in. We therefore also have to + // declare an array to hold these + // values: const Coefficient coefficient; std::vector coefficient_values (n_q_points); - // Next is the typical loop over - // all cells to compute local - // contributions and then to - // transfer them into the global - // matrix and vector. - // - // The only two things in which - // this loop differs from step-4 is - // that we want to compute the - // value of the coefficient in all - // quadrature points on the present - // cell at the beginning, and then - // use it in the computation of the - // local contributions. This is - // what we do in the call to - // coefficient.value_list in - // the fourth line of the loop. - // - // The second change is how we make - // use of this coefficient in - // computing the cell matrix - // contributions. This is in the - // obvious way, and not worth more - // comments. For the right hand - // side, we use a constant value - // again. + // Next is the typical loop over + // all cells to compute local + // contributions and then to + // transfer them into the global + // matrix and vector. + // + // The only two things in which + // this loop differs from step-4 is + // that we want to compute the + // value of the coefficient in all + // quadrature points on the present + // cell at the beginning, and then + // use it in the computation of the + // local contributions. This is + // what we do in the call to + // coefficient.value_list in + // the fourth line of the loop. + // + // The second change is how we make + // use of this coefficient in + // computing the cell matrix + // contributions. This is in the + // obvious way, and not worth more + // comments. For the right hand + // side, we use a constant value + // again. typename DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), endc = dof_handler.end(); @@ -537,100 +537,100 @@ void Step5::assemble_system () fe_values.reinit (cell); coefficient.value_list (fe_values.get_quadrature_points(), - coefficient_values); + coefficient_values); for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); for (unsigned int i=0; i boundary_values; VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction(), - boundary_values); + 0, + ZeroFunction(), + boundary_values); MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); + system_matrix, + solution, + system_rhs); } // @sect4{Step5::solve} - // The solution process again looks - // mostly like in the previous - // examples. However, we will now use - // a preconditioned conjugate - // gradient algorithm. It is not very - // difficult to make this change. In - // fact, the only thing we have to - // alter is that we need an object - // which will act as a - // preconditioner. We will use SSOR - // (symmetric successive - // overrelaxation), with a relaxation - // factor of 1.2. For this purpose, - // the SparseMatrix class has a - // function which does one SSOR step, - // and we need to package the address - // of this function together with the - // matrix on which it should act - // (which is the matrix to be - // inverted) and the relaxation - // factor into one object. The - // PreconditionSSOR class does - // this for us. (PreconditionSSOR - // class takes a template argument - // denoting the matrix type it is - // supposed to work on. The default - // value is SparseMatrix@, - // which is exactly what we need - // here, so we simply stick with the - // default and do not specify - // anything in the angle brackets.) - // - // Note that for the present case, - // SSOR doesn't really perform much - // better than most other - // preconditioners (though better - // than no preconditioning at all). A - // brief comparison of different - // preconditioners is presented in - // the Results section of the next - // tutorial program, step-6. - // - // With this, the rest of the - // function is trivial: instead of - // the PreconditionIdentity - // object we have created before, we - // now use the preconditioner we have - // declared, and the CG solver will - // do the rest for us: + // The solution process again looks + // mostly like in the previous + // examples. However, we will now use + // a preconditioned conjugate + // gradient algorithm. It is not very + // difficult to make this change. In + // fact, the only thing we have to + // alter is that we need an object + // which will act as a + // preconditioner. We will use SSOR + // (symmetric successive + // overrelaxation), with a relaxation + // factor of 1.2. For this purpose, + // the SparseMatrix class has a + // function which does one SSOR step, + // and we need to package the address + // of this function together with the + // matrix on which it should act + // (which is the matrix to be + // inverted) and the relaxation + // factor into one object. The + // PreconditionSSOR class does + // this for us. (PreconditionSSOR + // class takes a template argument + // denoting the matrix type it is + // supposed to work on. The default + // value is SparseMatrix@, + // which is exactly what we need + // here, so we simply stick with the + // default and do not specify + // anything in the angle brackets.) + // + // Note that for the present case, + // SSOR doesn't really perform much + // better than most other + // preconditioners (though better + // than no preconditioning at all). A + // brief comparison of different + // preconditioners is presented in + // the Results section of the next + // tutorial program, step-6. + // + // With this, the rest of the + // function is trivial: instead of + // the PreconditionIdentity + // object we have created before, we + // now use the preconditioner we have + // declared, and the CG solver will + // do the rest for us: template void Step5::solve () { @@ -641,23 +641,23 @@ void Step5::solve () preconditioner.initialize(system_matrix, 1.2); solver.solve (system_matrix, solution, system_rhs, - preconditioner); + preconditioner); std::cout << " " << solver_control.last_step() - << " CG iterations needed to obtain convergence." - << std::endl; + << " CG iterations needed to obtain convergence." + << std::endl; } // @sect4{Step5::output_results and setting output flags} - // Writing output to a file is mostly - // the same as for the previous - // example, but here we will show how - // to modify some output options and - // how to construct a different - // filename for each refinement - // cycle. + // Writing output to a file is mostly + // the same as for the previous + // example, but here we will show how + // to modify some output options and + // how to construct a different + // filename for each refinement + // cycle. template void Step5::output_results (const unsigned int cycle) const { @@ -668,126 +668,126 @@ void Step5::output_results (const unsigned int cycle) const data_out.build_patches (); - // For this example, we would like - // to write the output directly to - // a file in Encapsulated - // Postscript (EPS) format. The - // library supports this, but - // things may be a bit more - // difficult sometimes, since EPS - // is a printing format, unlike - // most other supported formats - // which serve as input for - // graphical tools. Therefore, you - // can't scale or rotate the image - // after it has been written to - // disk, and you have to decide - // about the viewpoint or the - // scaling in advance. - // - // The defaults in the library are - // usually quite reasonable, and - // regarding viewpoint and scaling - // they coincide with the defaults - // of Gnuplot. However, since this - // is a tutorial, we will - // demonstrate how to change - // them. For this, we first have to - // generate an object describing - // the flags for EPS output - // (similar flag classes exist for - // all supported output formats): + // For this example, we would like + // to write the output directly to + // a file in Encapsulated + // Postscript (EPS) format. The + // library supports this, but + // things may be a bit more + // difficult sometimes, since EPS + // is a printing format, unlike + // most other supported formats + // which serve as input for + // graphical tools. Therefore, you + // can't scale or rotate the image + // after it has been written to + // disk, and you have to decide + // about the viewpoint or the + // scaling in advance. + // + // The defaults in the library are + // usually quite reasonable, and + // regarding viewpoint and scaling + // they coincide with the defaults + // of Gnuplot. However, since this + // is a tutorial, we will + // demonstrate how to change + // them. For this, we first have to + // generate an object describing + // the flags for EPS output + // (similar flag classes exist for + // all supported output formats): DataOutBase::EpsFlags eps_flags; - // They are initialized with the - // default values, so we only have - // to change those that we don't - // like. For example, we would like - // to scale the z-axis differently - // (stretch each data point in - // z-direction by a factor of four): + // They are initialized with the + // default values, so we only have + // to change those that we don't + // like. For example, we would like + // to scale the z-axis differently + // (stretch each data point in + // z-direction by a factor of four): eps_flags.z_scaling = 4; - // Then we would also like to alter - // the viewpoint from which we look - // at the solution surface. The - // default is at an angle of 60 - // degrees down from the vertical - // axis, and 30 degrees rotated - // against it in mathematical - // positive sense. We raise our - // viewpoint a bit and look more - // along the y-axis: + // Then we would also like to alter + // the viewpoint from which we look + // at the solution surface. The + // default is at an angle of 60 + // degrees down from the vertical + // axis, and 30 degrees rotated + // against it in mathematical + // positive sense. We raise our + // viewpoint a bit and look more + // along the y-axis: eps_flags.azimut_angle = 40; eps_flags.turn_angle = 10; - // That shall suffice. There are - // more flags, for example whether - // to draw the mesh lines, which - // data vectors to use for - // colorization of the interior of - // the cells, and so on. You may - // want to take a look at the - // documentation of the EpsFlags - // structure to get an overview of - // what is possible. - // - // The only thing still to be done, - // is to tell the output object to - // use these flags: + // That shall suffice. There are + // more flags, for example whether + // to draw the mesh lines, which + // data vectors to use for + // colorization of the interior of + // the cells, and so on. You may + // want to take a look at the + // documentation of the EpsFlags + // structure to get an overview of + // what is possible. + // + // The only thing still to be done, + // is to tell the output object to + // use these flags: data_out.set_flags (eps_flags); - // The above way to modify flags - // requires recompilation each time - // we would like to use different - // flags. This is inconvenient, and - // we will see more advanced ways - // in step-19 where the output - // flags are determined at run time - // using an input file (step-19 - // doesn't show many other things; - // you should feel free to read - // over it even if you haven't done - // step-6 to step-18 yet). - - // Finally, we need the filename to - // which the results are to be - // written. We would like to have - // it of the form - // solution-N.eps, where N is - // the number of the refinement - // cycle. Thus, we have to convert - // an integer to a part of a - // string; this can be done using - // the sprintf function, but in - // C++ there is a more elegant way: - // write everything into a special - // stream (just like writing into a - // file or to the screen) and - // retrieve what you wrote as a - // string. This applies the usual - // conversions from integer to - // strings, and one could as well - // use stream modifiers such as - // setw, setprecision, and - // so on. In C++, you can do this - // by using the so-called stringstream - // classes: + // The above way to modify flags + // requires recompilation each time + // we would like to use different + // flags. This is inconvenient, and + // we will see more advanced ways + // in step-19 where the output + // flags are determined at run time + // using an input file (step-19 + // doesn't show many other things; + // you should feel free to read + // over it even if you haven't done + // step-6 to step-18 yet). + + // Finally, we need the filename to + // which the results are to be + // written. We would like to have + // it of the form + // solution-N.eps, where N is + // the number of the refinement + // cycle. Thus, we have to convert + // an integer to a part of a + // string; this can be done using + // the sprintf function, but in + // C++ there is a more elegant way: + // write everything into a special + // stream (just like writing into a + // file or to the screen) and + // retrieve what you wrote as a + // string. This applies the usual + // conversions from integer to + // strings, and one could as well + // use stream modifiers such as + // setw, setprecision, and + // so on. In C++, you can do this + // by using the so-called stringstream + // classes: std::ostringstream filename; - // In order to now actually - // generate a filename, we fill the - // stringstream variable with the - // base of the filename, then the - // number part, and finally the - // suffix indicating the file type: + // In order to now actually + // generate a filename, we fill the + // stringstream variable with the + // base of the filename, then the + // number part, and finally the + // suffix indicating the file type: filename << "solution-" - << cycle - << ".eps"; + << cycle + << ".eps"; // We can get whatever we wrote to the - // stream using the str() function. The - // result is a string which we have to - // convert to a char* using the c_str() - // function. Use that as filename for the - // output stream and then write the data to - // the file: + // stream using the str() function. The + // result is a string which we have to + // convert to a char* using the c_str() + // function. Use that as filename for the + // output stream and then write the data to + // the file: std::ofstream output (filename.str().c_str()); data_out.write_eps (output); @@ -797,120 +797,120 @@ void Step5::output_results (const unsigned int cycle) const // @sect4{Step5::run} - // The second to last thing in this - // program is the definition of the - // run() function. In contrast to - // the previous programs, we will - // compute on a sequence of meshes - // that after each iteration is - // globall refined. The function - // therefore consists of a loop over - // 6 cycles. In each cycle, we first - // print the cycle number, and then - // have to decide what to do with the - // mesh. If this is not the first - // cycle, we simply refine the - // existing mesh once - // globally. Before running through - // these cycles, however, - // we have to generate a mesh: - - // In previous examples, we have - // already used some of the functions - // from the - // GridGenerator - // class. Here we would like to read - // a grid from a file where the cells - // are stored and which may originate - // from someone else, or may be the - // product of a mesh generator tool. - // - // In order to read a grid from a - // file, we generate an object of - // data type GridIn and associate the - // triangulation to it (i.e. we tell - // it to fill our triangulation - // object when we ask it to read the - // file). Then we open the respective - // file and initialize the - // triangulation with the data in the - // file: + // The second to last thing in this + // program is the definition of the + // run() function. In contrast to + // the previous programs, we will + // compute on a sequence of meshes + // that after each iteration is + // globall refined. The function + // therefore consists of a loop over + // 6 cycles. In each cycle, we first + // print the cycle number, and then + // have to decide what to do with the + // mesh. If this is not the first + // cycle, we simply refine the + // existing mesh once + // globally. Before running through + // these cycles, however, + // we have to generate a mesh: + + // In previous examples, we have + // already used some of the functions + // from the + // GridGenerator + // class. Here we would like to read + // a grid from a file where the cells + // are stored and which may originate + // from someone else, or may be the + // product of a mesh generator tool. + // + // In order to read a grid from a + // file, we generate an object of + // data type GridIn and associate the + // triangulation to it (i.e. we tell + // it to fill our triangulation + // object when we ask it to read the + // file). Then we open the respective + // file and initialize the + // triangulation with the data in the + // file: template void Step5::run () { GridIn grid_in; grid_in.attach_triangulation (triangulation); std::ifstream input_file("circle-grid.inp"); - // We would now like to read the - // file. However, the input file is - // only for a two-dimensional - // triangulation, while this - // function is a template for - // arbitrary dimension. Since this - // is only a demonstration program, - // we will not use different input - // files for the different - // dimensions, but rather kill the - // whole program if we are not in - // 2D: + // We would now like to read the + // file. However, the input file is + // only for a two-dimensional + // triangulation, while this + // function is a template for + // arbitrary dimension. Since this + // is only a demonstration program, + // we will not use different input + // files for the different + // dimensions, but rather kill the + // whole program if we are not in + // 2D: Assert (dim==2, ExcInternalError()); - // ExcInternalError is a globally - // defined exception, which may be - // thrown whenever something is - // terribly wrong. Usually, one - // would like to use more specific - // exceptions, and particular in - // this case one would of course - // try to do something else if - // dim is not equal to - // two, e.g. create a grid using - // library functions. Aborting a - // program is usually not a good - // idea and assertions should - // really only be used for - // exceptional cases which should - // not occur, but might due to - // stupidity of the programmer, - // user, or someone else. The - // situation above is not a very - // clever use of Assert, but again: - // this is a tutorial and it might - // be worth to show what not to do, - // after all. - - // So if we got past the assertion, - // we know that dim==2, and we can - // now actually read the grid. It - // is in UCD (unstructured cell - // data) format (but the ending of - // the UCD-file is - // inp), as supported - // as input format by the AVS - // Explorer (a visualization - // program), for example: + // ExcInternalError is a globally + // defined exception, which may be + // thrown whenever something is + // terribly wrong. Usually, one + // would like to use more specific + // exceptions, and particular in + // this case one would of course + // try to do something else if + // dim is not equal to + // two, e.g. create a grid using + // library functions. Aborting a + // program is usually not a good + // idea and assertions should + // really only be used for + // exceptional cases which should + // not occur, but might due to + // stupidity of the programmer, + // user, or someone else. The + // situation above is not a very + // clever use of Assert, but again: + // this is a tutorial and it might + // be worth to show what not to do, + // after all. + + // So if we got past the assertion, + // we know that dim==2, and we can + // now actually read the grid. It + // is in UCD (unstructured cell + // data) format (but the ending of + // the UCD-file is + // inp), as supported + // as input format by the AVS + // Explorer (a visualization + // program), for example: grid_in.read_ucd (input_file); - // If you like to use another input - // format, you have to use an other - // grid_in.read_xxx - // function. (See the documentation - // of the GridIn class - // to find out what input formats - // are presently supported.) - - // The grid in the file describes a - // circle. Therefore we have to use - // a boundary object which tells - // the triangulation where to put - // new points on the boundary when - // the grid is refined. This works - // in the same way as in the first - // example. Note that the - // HyperBallBoundary constructor - // takes two parameters, the center - // of the ball and the radius, but - // that their default (the origin - // and 1.0) are the ones which we - // would like to use here. + // If you like to use another input + // format, you have to use an other + // grid_in.read_xxx + // function. (See the documentation + // of the GridIn class + // to find out what input formats + // are presently supported.) + + // The grid in the file describes a + // circle. Therefore we have to use + // a boundary object which tells + // the triangulation where to put + // new points on the boundary when + // the grid is refined. This works + // in the same way as in the first + // example. Note that the + // HyperBallBoundary constructor + // takes two parameters, the center + // of the ball and the radius, but + // that their default (the origin + // and 1.0) are the ones which we + // would like to use here. static const HyperBallBoundary boundary; triangulation.set_boundary (0, boundary); @@ -919,19 +919,19 @@ void Step5::run () std::cout << "Cycle " << cycle << ':' << std::endl; if (cycle != 0) - triangulation.refine_global (1); + triangulation.refine_global (1); - // Now that we have a mesh for - // sure, we write some output - // and do all the things that - // we have already seen in the - // previous examples. + // Now that we have a mesh for + // sure, we write some output + // and do all the things that + // we have already seen in the + // previous examples. std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl - << " Total number of cells: " - << triangulation.n_cells() - << std::endl; + << triangulation.n_active_cells() + << std::endl + << " Total number of cells: " + << triangulation.n_cells() + << std::endl; setup_system (); assemble_system (); @@ -943,10 +943,10 @@ void Step5::run () // @sect3{The main function} - // The main function looks mostly - // like the one in the previous - // example, so we won't comment on it - // further: + // The main function looks mostly + // like the one in the previous + // example, so we won't comment on it + // further: int main () { deallog.depth_console (0); @@ -954,28 +954,28 @@ int main () Step5<2> laplace_problem_2d; laplace_problem_2d.run (); - // Finally, we have promised to - // trigger an exception in the - // Coefficient class through - // the Assert macro we have - // introduced there. For this, we - // have to call its value_list - // function with two arrays of - // different size (the number in - // parentheses behind the - // declaration of the object). We - // have commented out these lines - // in order to allow the program to - // exit gracefully in normal - // situations (we use the program - // in day-to-day testing of changes - // to the library as well), so you - // will only get the exception by - // un-commenting the following - // lines. Take a look at the - // Results section of the program - // to see what happens when the - // code is actually run: + // Finally, we have promised to + // trigger an exception in the + // Coefficient class through + // the Assert macro we have + // introduced there. For this, we + // have to call its value_list + // function with two arrays of + // different size (the number in + // parentheses behind the + // declaration of the object). We + // have commented out these lines + // in order to allow the program to + // exit gracefully in normal + // situations (we use the program + // in day-to-day testing of changes + // to the library as well), so you + // will only get the exception by + // un-commenting the following + // lines. Take a look at the + // Results section of the program + // to see what happens when the + // code is actually run: /* Coefficient<2> coefficient; std::vector > points (2); diff --git a/deal.II/examples/step-6/step-6.cc b/deal.II/examples/step-6/step-6.cc index 4b6d4a04ef..fca0d404aa 100644 --- a/deal.II/examples/step-6/step-6.cc +++ b/deal.II/examples/step-6/step-6.cc @@ -11,10 +11,10 @@ // @sect3{Include files} - // The first few files have already - // been covered in previous examples - // and will thus not be further - // commented on. + // The first few files have already + // been covered in previous examples + // and will thus not be further + // commented on. #include #include #include @@ -40,86 +40,86 @@ #include #include - // From the following include file we - // will import the declaration of - // H1-conforming finite element shape - // functions. This family of finite - // elements is called FE_Q, and - // was used in all examples before - // already to define the usual bi- or - // tri-linear elements, but we will - // now use it for bi-quadratic - // elements: + // From the following include file we + // will import the declaration of + // H1-conforming finite element shape + // functions. This family of finite + // elements is called FE_Q, and + // was used in all examples before + // already to define the usual bi- or + // tri-linear elements, but we will + // now use it for bi-quadratic + // elements: #include - // We will not read the grid from a - // file as in the previous example, - // but generate it using a function - // of the library. However, we will - // want to write out the locally - // refined grids (just the grid, not - // the solution) in each step, so we - // need the following include file - // instead of grid_in.h: + // We will not read the grid from a + // file as in the previous example, + // but generate it using a function + // of the library. However, we will + // want to write out the locally + // refined grids (just the grid, not + // the solution) in each step, so we + // need the following include file + // instead of grid_in.h: #include - // When using locally refined grids, - // we will get so-called hanging - // nodes. However, the standard - // finite element methods assumes - // that the discrete solution spaces - // be continuous, so we need to make - // sure that the degrees of freedom - // on hanging nodes conform to some - // constraints such that the global - // solution is continuous. The - // following file contains a class - // which is used to handle these - // constraints: + // When using locally refined grids, + // we will get so-called hanging + // nodes. However, the standard + // finite element methods assumes + // that the discrete solution spaces + // be continuous, so we need to make + // sure that the degrees of freedom + // on hanging nodes conform to some + // constraints such that the global + // solution is continuous. The + // following file contains a class + // which is used to handle these + // constraints: #include - // In order to refine our grids - // locally, we need a function from - // the library that decides which - // cells to flag for refinement or - // coarsening based on the error - // indicators we have computed. This - // function is defined here: + // In order to refine our grids + // locally, we need a function from + // the library that decides which + // cells to flag for refinement or + // coarsening based on the error + // indicators we have computed. This + // function is defined here: #include - // Finally, we need a simple way to - // actually compute the refinement - // indicators based on some error - // estimat. While in general, - // adaptivity is very - // problem-specific, the error - // indicator in the following file - // often yields quite nicely adapted - // grids for a wide class of - // problems. + // Finally, we need a simple way to + // actually compute the refinement + // indicators based on some error + // estimat. While in general, + // adaptivity is very + // problem-specific, the error + // indicator in the following file + // often yields quite nicely adapted + // grids for a wide class of + // problems. #include - // Finally, this is as in previous - // programs: + // Finally, this is as in previous + // programs: using namespace dealii; // @sect3{The Step6 class template} - // The main class is again almost - // unchanged. Two additions, however, - // are made: we have added the - // refine_grid function, which is - // used to adaptively refine the grid - // (instead of the global refinement - // in the previous examples), and a - // variable which will hold the - // constraints associated to the - // hanging nodes. In addition, we - // have added a destructor to the - // class for reasons that will become - // clear when we discuss its - // implementation. + // The main class is again almost + // unchanged. Two additions, however, + // are made: we have added the + // refine_grid function, which is + // used to adaptively refine the grid + // (instead of the global refinement + // in the previous examples), and a + // variable which will hold the + // constraints associated to the + // hanging nodes. In addition, we + // have added a destructor to the + // class for reasons that will become + // clear when we discuss its + // implementation. template class Step6 { @@ -141,11 +141,11 @@ class Step6 DoFHandler dof_handler; FE_Q fe; - // This is the new variable in - // the main class. We need an - // object which holds a list of - // constraints originating from - // the hanging nodes: + // This is the new variable in + // the main class. We need an + // object which holds a list of + // constraints originating from + // the hanging nodes: ConstraintMatrix hanging_node_constraints; SparsityPattern sparsity_pattern; @@ -158,9 +158,9 @@ class Step6 // @sect3{Nonconstant coefficients} - // The implementation of nonconstant - // coefficients is copied verbatim - // from step-5: + // The implementation of nonconstant + // coefficients is copied verbatim + // from step-5: template class Coefficient : public Function @@ -169,18 +169,18 @@ class Coefficient : public Function Coefficient () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void value_list (const std::vector > &points, - std::vector &values, - const unsigned int component = 0) const; + std::vector &values, + const unsigned int component = 0) const; }; template double Coefficient::value (const Point &p, - const unsigned int) const + const unsigned int) const { if (p.square() < 0.5*0.5) return 20; @@ -192,23 +192,23 @@ double Coefficient::value (const Point &p, template void Coefficient::value_list (const std::vector > &points, - std::vector &values, - const unsigned int component) const + std::vector &values, + const unsigned int component) const { const unsigned int n_points = points.size(); Assert (values.size() == n_points, - ExcDimensionMismatch (values.size(), n_points)); + ExcDimensionMismatch (values.size(), n_points)); Assert (component == 0, - ExcIndexRange (component, 0, 1)); + ExcIndexRange (component, 0, 1)); for (unsigned int i=0; i::value_list (const std::vector > &points, // @sect4{Step6::Step6} - // The constructor of this class is - // mostly the same as before, but - // this time we want to use the - // quadratic element. To do so, we - // only have to replace the - // constructor argument (which was - // 1 in all previous examples) by - // the desired polynomial degree - // (here 2): + // The constructor of this class is + // mostly the same as before, but + // this time we want to use the + // quadratic element. To do so, we + // only have to replace the + // constructor argument (which was + // 1 in all previous examples) by + // the desired polynomial degree + // (here 2): template Step6::Step6 () - : - dof_handler (triangulation), + : + dof_handler (triangulation), fe (2) {} // @sect4{Step6::~Step6} - // Here comes the added destructor of - // the class. The reason why we want - // to add it is a subtle change in - // the order of data elements in the - // class as compared to all previous - // examples: the dof_handler - // object was defined before and not - // after the fe object. Of course - // we could have left this order - // unchanged, but we would like to - // show what happens if the order is - // reversed since this produces a - // rather nasty side-effect and - // results in an error which is - // difficult to track down if one - // does not know what happens. - // - // Basically what happens is the - // following: when we distribute the - // degrees of freedom using the - // function call - // dof_handler.distribute_dofs(), - // the dof_handler also stores a - // pointer to the finite element in - // use. Since this pointer is used - // every now and then until either - // the degrees of freedom are - // re-distributed using another - // finite element object or until the - // dof_handler object is - // destroyed, it would be unwise if - // we would allow the finite element - // object to be deleted before the - // dof_handler object. To - // disallow this, the DoF handler - // increases a counter inside the - // finite element object which counts - // how many objects use that finite - // element (this is what the - // Subscriptor/SmartPointer - // class pair is used for, in case - // you want something like this for - // your own programs; see step-7 for - // a more complete discussion - // of this topic). The finite - // element object will refuse its - // destruction if that counter is - // larger than zero, since then some - // other objects might rely on the - // persistence of the finite element - // object. An exception will then be - // thrown and the program will - // usually abort upon the attempt to - // destroy the finite element. - // - // To be fair, such exceptions about - // still used objects are not - // particularly popular among - // programmers using deal.II, since - // they only tell us that something - // is wrong, namely that some other - // object is still using the object - // that is presently being - // destructed, but most of the time - // not who this user is. It is - // therefore often rather - // time-consuming to find out where - // the problem exactly is, although - // it is then usually straightforward - // to remedy the situation. However, - // we believe that the effort to find - // invalid references to objects that - // do no longer exist is less if the - // problem is detected once the - // reference becomes invalid, rather - // than when non-existent objects are - // actually accessed again, since - // then usually only invalid data is - // accessed, but no error is - // immediately raised. - // - // Coming back to the present - // situation, if we did not write - // this destructor, the compiler will - // generate code that triggers - // exactly the behavior sketched - // above. The reason is that member - // variables of the - // Step6 class are - // destructed bottom-up (i.e. in - // reverse order of their declaration - // in the class), as always in - // C++. Thus, the finite element - // object will be destructed before - // the DoF handler object, since its - // declaration is below the one of - // the DoF handler. This triggers the - // situation above, and an exception - // will be raised when the fe - // object is destructed. What needs - // to be done is to tell the - // dof_handler object to release - // its lock to the finite element. Of - // course, the dof_handler will - // only release its lock if it really - // does not need the finite element - // any more, i.e. when all finite - // element related data is deleted - // from it. For this purpose, the - // DoFHandler class has a - // function clear which deletes - // all degrees of freedom, and - // releases its lock to the finite - // element. After this, you can - // safely destruct the finite element - // object since its internal counter - // is then zero. - // - // For completeness, we add the - // output of the exception that would - // have been triggered without this - // destructor, to the end of the - // results section of this example. + // Here comes the added destructor of + // the class. The reason why we want + // to add it is a subtle change in + // the order of data elements in the + // class as compared to all previous + // examples: the dof_handler + // object was defined before and not + // after the fe object. Of course + // we could have left this order + // unchanged, but we would like to + // show what happens if the order is + // reversed since this produces a + // rather nasty side-effect and + // results in an error which is + // difficult to track down if one + // does not know what happens. + // + // Basically what happens is the + // following: when we distribute the + // degrees of freedom using the + // function call + // dof_handler.distribute_dofs(), + // the dof_handler also stores a + // pointer to the finite element in + // use. Since this pointer is used + // every now and then until either + // the degrees of freedom are + // re-distributed using another + // finite element object or until the + // dof_handler object is + // destroyed, it would be unwise if + // we would allow the finite element + // object to be deleted before the + // dof_handler object. To + // disallow this, the DoF handler + // increases a counter inside the + // finite element object which counts + // how many objects use that finite + // element (this is what the + // Subscriptor/SmartPointer + // class pair is used for, in case + // you want something like this for + // your own programs; see step-7 for + // a more complete discussion + // of this topic). The finite + // element object will refuse its + // destruction if that counter is + // larger than zero, since then some + // other objects might rely on the + // persistence of the finite element + // object. An exception will then be + // thrown and the program will + // usually abort upon the attempt to + // destroy the finite element. + // + // To be fair, such exceptions about + // still used objects are not + // particularly popular among + // programmers using deal.II, since + // they only tell us that something + // is wrong, namely that some other + // object is still using the object + // that is presently being + // destructed, but most of the time + // not who this user is. It is + // therefore often rather + // time-consuming to find out where + // the problem exactly is, although + // it is then usually straightforward + // to remedy the situation. However, + // we believe that the effort to find + // invalid references to objects that + // do no longer exist is less if the + // problem is detected once the + // reference becomes invalid, rather + // than when non-existent objects are + // actually accessed again, since + // then usually only invalid data is + // accessed, but no error is + // immediately raised. + // + // Coming back to the present + // situation, if we did not write + // this destructor, the compiler will + // generate code that triggers + // exactly the behavior sketched + // above. The reason is that member + // variables of the + // Step6 class are + // destructed bottom-up (i.e. in + // reverse order of their declaration + // in the class), as always in + // C++. Thus, the finite element + // object will be destructed before + // the DoF handler object, since its + // declaration is below the one of + // the DoF handler. This triggers the + // situation above, and an exception + // will be raised when the fe + // object is destructed. What needs + // to be done is to tell the + // dof_handler object to release + // its lock to the finite element. Of + // course, the dof_handler will + // only release its lock if it really + // does not need the finite element + // any more, i.e. when all finite + // element related data is deleted + // from it. For this purpose, the + // DoFHandler class has a + // function clear which deletes + // all degrees of freedom, and + // releases its lock to the finite + // element. After this, you can + // safely destruct the finite element + // object since its internal counter + // is then zero. + // + // For completeness, we add the + // output of the exception that would + // have been triggered without this + // destructor, to the end of the + // results section of this example. template Step6::~Step6 () { @@ -368,42 +368,42 @@ Step6::~Step6 () // @sect4{Step6::setup_system} - // The next function is setting up - // all the variables that describe - // the linear finite element problem, - // such as the DoF handler, the - // matrices, and vectors. The - // difference to what we did in - // step-5 is only that we now also - // have to take care of handing node - // constraints. These constraints are - // handled almost transparently by - // the library, i.e. you only need to - // know that they exist and how to - // get them, but you do not have to - // know how they are formed or what - // exactly is done with them. - // - // At the beginning of the function, - // you find all the things that are - // the same as in step-5: setting up - // the degrees of freedom (this time - // we have quadratic elements, but - // there is no difference from a user - // code perspective to the linear -- - // or cubic, for that matter -- - // case), generating the sparsity - // pattern, and initializing the - // solution and right hand side - // vectors. Note that the sparsity - // pattern will have significantly - // more entries per row now, since - // there are now 9 degrees of freedom - // per cell, not only four, that can - // couple with each other. The - // dof_Handler.max_couplings_between_dofs() - // call will take care of this, - // however: + // The next function is setting up + // all the variables that describe + // the linear finite element problem, + // such as the DoF handler, the + // matrices, and vectors. The + // difference to what we did in + // step-5 is only that we now also + // have to take care of handing node + // constraints. These constraints are + // handled almost transparently by + // the library, i.e. you only need to + // know that they exist and how to + // get them, but you do not have to + // know how they are formed or what + // exactly is done with them. + // + // At the beginning of the function, + // you find all the things that are + // the same as in step-5: setting up + // the degrees of freedom (this time + // we have quadratic elements, but + // there is no difference from a user + // code perspective to the linear -- + // or cubic, for that matter -- + // case), generating the sparsity + // pattern, and initializing the + // solution and right hand side + // vectors. Note that the sparsity + // pattern will have significantly + // more entries per row now, since + // there are now 9 degrees of freedom + // per cell, not only four, that can + // couple with each other. The + // dof_Handler.max_couplings_between_dofs() + // call will take care of this, + // however: template void Step6::setup_system () { @@ -413,149 +413,149 @@ void Step6::setup_system () system_rhs.reinit (dof_handler.n_dofs()); - // After setting up all the degrees - // of freedoms, here are now the - // differences compared to step-5, - // all of which are related to - // constraints associated with the - // hanging nodes. In the class - // desclaration, we have already - // allocated space for an object - // hanging_node_constraints - // that will hold a list of these - // constraints (they form a matrix, - // which is reflected in the name - // of the class, but that is - // immaterial for the moment). Now - // we have to fill this - // object. This is done using the - // following function calls (the - // first clears the contents of the - // object that may still be left - // over from computations on the - // previous mesh before the last - // adaptive refinement): + // After setting up all the degrees + // of freedoms, here are now the + // differences compared to step-5, + // all of which are related to + // constraints associated with the + // hanging nodes. In the class + // desclaration, we have already + // allocated space for an object + // hanging_node_constraints + // that will hold a list of these + // constraints (they form a matrix, + // which is reflected in the name + // of the class, but that is + // immaterial for the moment). Now + // we have to fill this + // object. This is done using the + // following function calls (the + // first clears the contents of the + // object that may still be left + // over from computations on the + // previous mesh before the last + // adaptive refinement): hanging_node_constraints.clear (); DoFTools::make_hanging_node_constraints (dof_handler, - hanging_node_constraints); - - // The next step is closing - // this object. For this note that, - // in principle, the - // ConstraintMatrix class can - // hold other constraints as well, - // i.e. constraints that do not - // stem from hanging - // nodes. Sometimes, it is useful - // to use such constraints, in - // which case they may be added to - // the ConstraintMatrix object - // after the hanging node - // constraints were computed. After - // all constraints have been added, - // they need to be sorted and - // rearranged to perform some - // actions more efficiently. This - // postprocessing is done using the - // close() function, after which - // no further constraints may be - // added any more: + hanging_node_constraints); + + // The next step is closing + // this object. For this note that, + // in principle, the + // ConstraintMatrix class can + // hold other constraints as well, + // i.e. constraints that do not + // stem from hanging + // nodes. Sometimes, it is useful + // to use such constraints, in + // which case they may be added to + // the ConstraintMatrix object + // after the hanging node + // constraints were computed. After + // all constraints have been added, + // they need to be sorted and + // rearranged to perform some + // actions more efficiently. This + // postprocessing is done using the + // close() function, after which + // no further constraints may be + // added any more: hanging_node_constraints.close (); - // Now we first build our - // compressed sparsity pattern like - // we did in the previous - // examples. Nevertheless, we do - // not copy it to the final - // sparsity pattern immediately. + // Now we first build our + // compressed sparsity pattern like + // we did in the previous + // examples. Nevertheless, we do + // not copy it to the final + // sparsity pattern immediately. CompressedSparsityPattern c_sparsity(dof_handler.n_dofs()); DoFTools::make_sparsity_pattern (dof_handler, c_sparsity); - // The constrained hanging nodes - // will later be eliminated from - // the linear system of - // equations. When doing so, some - // additional entries in the global - // matrix will be set to non-zero - // values, so we have to reserve - // some space for them here. Since - // the process of elimination of - // these constrained nodes is - // called condensation, the - // functions that eliminate them - // are called condense for both - // the system matrix and right hand - // side, as well as for the - // sparsity pattern. + // The constrained hanging nodes + // will later be eliminated from + // the linear system of + // equations. When doing so, some + // additional entries in the global + // matrix will be set to non-zero + // values, so we have to reserve + // some space for them here. Since + // the process of elimination of + // these constrained nodes is + // called condensation, the + // functions that eliminate them + // are called condense for both + // the system matrix and right hand + // side, as well as for the + // sparsity pattern. hanging_node_constraints.condense (c_sparsity); - // Now all non-zero entries of the - // matrix are known (i.e. those - // from regularly assembling the - // matrix and those that were - // introduced by eliminating - // constraints). We can thus copy - // our intermediate object to - // the sparsity pattern: + // Now all non-zero entries of the + // matrix are known (i.e. those + // from regularly assembling the + // matrix and those that were + // introduced by eliminating + // constraints). We can thus copy + // our intermediate object to + // the sparsity pattern: sparsity_pattern.copy_from(c_sparsity); - // Finally, the so-constructed - // sparsity pattern serves as the - // basis on top of which we will - // create the sparse matrix: + // Finally, the so-constructed + // sparsity pattern serves as the + // basis on top of which we will + // create the sparse matrix: system_matrix.reinit (sparsity_pattern); } // @sect4{Step6::assemble_system} - // Next, we have to assemble the - // matrix again. There are no code - // changes compared to step-5 except - // for a single place: We have to use - // a higher-order quadrature formula - // to account for the higher - // polynomial degree in the finite - // element shape functions. This is - // easy to change: the constructor of - // the QGauss class takes the - // number of quadrature points in - // each space direction. Previously, - // we had two points for bilinear - // elements. Now we should use three - // points for biquadratic elements. - // - // The rest of the code that forms - // the local contributions and - // transfers them into the global - // objects remains unchanged. It is - // worth noting, however, that under - // the hood several things are - // different than before. First, the - // variables dofs_per_cell and - // n_q_points now are 9 each, - // where they were 4 - // before. Introducing such variables - // as abbreviations is a good - // strategy to make code work with - // different elements without having - // to change too much code. Secondly, - // the fe_values object of course - // needs to do other things as well, - // since the shape functions are now - // quadratic, rather than linear, in - // each coordinate variable. Again, - // however, this is something that is - // completely transparent to user - // code and nothing that you have to - // worry about. + // Next, we have to assemble the + // matrix again. There are no code + // changes compared to step-5 except + // for a single place: We have to use + // a higher-order quadrature formula + // to account for the higher + // polynomial degree in the finite + // element shape functions. This is + // easy to change: the constructor of + // the QGauss class takes the + // number of quadrature points in + // each space direction. Previously, + // we had two points for bilinear + // elements. Now we should use three + // points for biquadratic elements. + // + // The rest of the code that forms + // the local contributions and + // transfers them into the global + // objects remains unchanged. It is + // worth noting, however, that under + // the hood several things are + // different than before. First, the + // variables dofs_per_cell and + // n_q_points now are 9 each, + // where they were 4 + // before. Introducing such variables + // as abbreviations is a good + // strategy to make code work with + // different elements without having + // to change too much code. Secondly, + // the fe_values object of course + // needs to do other things as well, + // since the shape functions are now + // quadratic, rather than linear, in + // each coordinate variable. Again, + // however, this is something that is + // completely transparent to user + // code and nothing that you have to + // worry about. template void Step6::assemble_system () { const QGauss quadrature_formula(3); FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -579,117 +579,117 @@ void Step6::assemble_system () fe_values.reinit (cell); coefficient.value_list (fe_values.get_quadrature_points(), - coefficient_values); + coefficient_values); for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); for (unsigned int i=0; icondense - // function modifies the system so - // that the values in the solution - // corresponding to constrained - // nodes are invalid, but that the - // system still has a well-defined - // solution; we compute the correct - // values for these nodes at the - // end of the solve function). - - // As almost all the stuff before, - // the interpolation of boundary - // values works also for higher - // order elements without the need - // to change your code for that. We - // note that for proper results, it - // is important that the - // elimination of boundary nodes - // from the system of equations - // happens *after* the elimination - // of hanging nodes. + // Using them, degrees of freedom + // associated to hanging nodes have + // been removed from the linear + // system and the independent + // variables are only the regular + // nodes. The constrained nodes are + // still in the linear system + // (there is a one on the diagonal + // of the matrix and all other + // entries for this line are set to + // zero) but the computed values + // are invalid (the condense + // function modifies the system so + // that the values in the solution + // corresponding to constrained + // nodes are invalid, but that the + // system still has a well-defined + // solution; we compute the correct + // values for these nodes at the + // end of the solve function). + + // As almost all the stuff before, + // the interpolation of boundary + // values works also for higher + // order elements without the need + // to change your code for that. We + // note that for proper results, it + // is important that the + // elimination of boundary nodes + // from the system of equations + // happens *after* the elimination + // of hanging nodes. std::map boundary_values; VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction(), - boundary_values); + 0, + ZeroFunction(), + boundary_values); MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); + system_matrix, + solution, + system_rhs); } // @sect4{Step6::solve} - // We continue with gradual - // improvements. The function that - // solves the linear system again - // uses the SSOR preconditioner, and - // is again unchanged except that we - // have to incorporate hanging node - // constraints. As mentioned above, - // the degrees of freedom - // corresponding to hanging node - // constraints have been removed from - // the linear system by giving the - // rows and columns of the matrix a - // special treatment. This way, the - // values for these degrees of - // freedom have wrong, but - // well-defined values after solving - // the linear system. What we then - // have to do is to use the - // constraints to assign to them the - // values that they should have. This - // process, called distributing - // hanging nodes, computes the values - // of constrained nodes from the - // values of the unconstrained ones, - // and requires only a single - // additional function call that you - // find at the end of this function: + // We continue with gradual + // improvements. The function that + // solves the linear system again + // uses the SSOR preconditioner, and + // is again unchanged except that we + // have to incorporate hanging node + // constraints. As mentioned above, + // the degrees of freedom + // corresponding to hanging node + // constraints have been removed from + // the linear system by giving the + // rows and columns of the matrix a + // special treatment. This way, the + // values for these degrees of + // freedom have wrong, but + // well-defined values after solving + // the linear system. What we then + // have to do is to use the + // constraints to assign to them the + // values that they should have. This + // process, called distributing + // hanging nodes, computes the values + // of constrained nodes from the + // values of the unconstrained ones, + // and requires only a single + // additional function call that you + // find at the end of this function: template void Step6::solve () @@ -701,7 +701,7 @@ void Step6::solve () preconditioner.initialize(system_matrix, 1.2); solver.solve (system_matrix, solution, system_rhs, - preconditioner); + preconditioner); hanging_node_constraints.distribute (solution); } @@ -709,261 +709,261 @@ void Step6::solve () // @sect4{Step6::refine_grid} - // Instead of global refinement, we - // now use a slightly more elaborate - // scheme. We will use the - // KellyErrorEstimator class - // which implements an error - // estimator for the Laplace - // equation; it can in principle - // handle variable coefficients, but - // we will not use these advanced - // features, but rather use its most - // simple form since we are not - // interested in quantitative results - // but only in a quick way to - // generate locally refined grids. - // - // Although the error estimator - // derived by Kelly et al. was - // originally developed for the Laplace - // equation, we have found that it is - // also well suited to quickly - // generate locally refined grids for - // a wide class of - // problems. Basically, it looks at - // the jumps of the gradients of the - // solution over the faces of cells - // (which is a measure for the second - // derivatives) and scales it by the - // size of the cell. It is therefore - // a measure for the local smoothness - // of the solution at the place of - // each cell and it is thus - // understandable that it yields - // reasonable grids also for - // hyperbolic transport problems or - // the wave equation as well, - // although these grids are certainly - // suboptimal compared to approaches - // specially tailored to the - // problem. This error estimator may - // therefore be understood as a quick - // way to test an adaptive program. - // - // The way the estimator works is to - // take a DoFHandler object - // describing the degrees of freedom - // and a vector of values for each - // degree of freedom as input and - // compute a single indicator value - // for each active cell of the - // triangulation (i.e. one value for - // each of the - // triangulation.n_active_cells() - // cells). To do so, it needs two - // additional pieces of information: - // a quadrature formula on the faces - // (i.e. quadrature formula on - // dim-1 dimensional objects. We - // use a 3-point Gauss rule again, a - // pick that is consistent and - // appropriate with the choice - // bi-quadratic finite element shape - // functions in this program. - // (What constitutes a suitable - // quadrature rule here of course - // depends on knowledge of the way - // the error estimator evaluates - // the solution field. As said - // above, the jump of the gradient - // is integrated over each face, - // which would be a quadratic - // function on each face for the - // quadratic elements in use in - // this example. In fact, however, - // it is the square of the jump of - // the gradient, as explained in - // the documentation of that class, - // and that is a quartic function, - // for which a 3 point Gauss - // formula is sufficient since it - // integrates polynomials up to - // order 5 exactly.) - // - // Secondly, the function wants a - // list of boundaries where we have - // imposed Neumann value, and the - // corresponding Neumann values. This - // information is represented by an - // object of type - // FunctionMap::type that is - // essentially a map from boundary - // indicators to function objects - // describing Neumann boundary values - // (in the present example program, - // we do not use Neumann boundary - // values, so this map is empty, and - // in fact constructed using the - // default constructor of the map in - // the place where the function call - // expects the respective function - // argument). - // - // The output, as mentioned is a - // vector of values for all - // cells. While it may make sense to - // compute the *value* of a degree of - // freedom very accurately, it is - // usually not helpful to compute the - // *error indicator* corresponding to - // a cell particularly accurately. We - // therefore typically use a vector - // of floats instead of a vector of - // doubles to represent error - // indicators. + // Instead of global refinement, we + // now use a slightly more elaborate + // scheme. We will use the + // KellyErrorEstimator class + // which implements an error + // estimator for the Laplace + // equation; it can in principle + // handle variable coefficients, but + // we will not use these advanced + // features, but rather use its most + // simple form since we are not + // interested in quantitative results + // but only in a quick way to + // generate locally refined grids. + // + // Although the error estimator + // derived by Kelly et al. was + // originally developed for the Laplace + // equation, we have found that it is + // also well suited to quickly + // generate locally refined grids for + // a wide class of + // problems. Basically, it looks at + // the jumps of the gradients of the + // solution over the faces of cells + // (which is a measure for the second + // derivatives) and scales it by the + // size of the cell. It is therefore + // a measure for the local smoothness + // of the solution at the place of + // each cell and it is thus + // understandable that it yields + // reasonable grids also for + // hyperbolic transport problems or + // the wave equation as well, + // although these grids are certainly + // suboptimal compared to approaches + // specially tailored to the + // problem. This error estimator may + // therefore be understood as a quick + // way to test an adaptive program. + // + // The way the estimator works is to + // take a DoFHandler object + // describing the degrees of freedom + // and a vector of values for each + // degree of freedom as input and + // compute a single indicator value + // for each active cell of the + // triangulation (i.e. one value for + // each of the + // triangulation.n_active_cells() + // cells). To do so, it needs two + // additional pieces of information: + // a quadrature formula on the faces + // (i.e. quadrature formula on + // dim-1 dimensional objects. We + // use a 3-point Gauss rule again, a + // pick that is consistent and + // appropriate with the choice + // bi-quadratic finite element shape + // functions in this program. + // (What constitutes a suitable + // quadrature rule here of course + // depends on knowledge of the way + // the error estimator evaluates + // the solution field. As said + // above, the jump of the gradient + // is integrated over each face, + // which would be a quadratic + // function on each face for the + // quadratic elements in use in + // this example. In fact, however, + // it is the square of the jump of + // the gradient, as explained in + // the documentation of that class, + // and that is a quartic function, + // for which a 3 point Gauss + // formula is sufficient since it + // integrates polynomials up to + // order 5 exactly.) + // + // Secondly, the function wants a + // list of boundaries where we have + // imposed Neumann value, and the + // corresponding Neumann values. This + // information is represented by an + // object of type + // FunctionMap::type that is + // essentially a map from boundary + // indicators to function objects + // describing Neumann boundary values + // (in the present example program, + // we do not use Neumann boundary + // values, so this map is empty, and + // in fact constructed using the + // default constructor of the map in + // the place where the function call + // expects the respective function + // argument). + // + // The output, as mentioned is a + // vector of values for all + // cells. While it may make sense to + // compute the *value* of a degree of + // freedom very accurately, it is + // usually not helpful to compute the + // *error indicator* corresponding to + // a cell particularly accurately. We + // therefore typically use a vector + // of floats instead of a vector of + // doubles to represent error + // indicators. template void Step6::refine_grid () { Vector estimated_error_per_cell (triangulation.n_active_cells()); KellyErrorEstimator::estimate (dof_handler, - QGauss(3), - typename FunctionMap::type(), - solution, - estimated_error_per_cell); - - // The above function returned one - // error indicator value for each - // cell in the - // estimated_error_per_cell - // array. Refinement is now done as - // follows: refine those 30 per - // cent of the cells with the - // highest error values, and - // coarsen the 3 per cent of cells - // with the lowest values. - // - // One can easily verify that if - // the second number were zero, - // this would approximately result - // in a doubling of cells in each - // step in two space dimensions, - // since for each of the 30 per - // cent of cells, four new would be - // replaced, while the remaining 70 - // per cent of cells remain - // untouched. In practice, some - // more cells are usually produced - // since it is disallowed that a - // cell is refined twice while the - // neighbor cell is not refined; in - // that case, the neighbor cell - // would be refined as well. - // - // In many applications, the number - // of cells to be coarsened would - // be set to something larger than - // only three per cent. A non-zero - // value is useful especially if - // for some reason the initial - // (coarse) grid is already rather - // refined. In that case, it might - // be necessary to refine it in - // some regions, while coarsening - // in some other regions is - // useful. In our case here, the - // initial grid is very coarse, so - // coarsening is only necessary in - // a few regions where - // over-refinement may have taken - // place. Thus a small, non-zero - // value is appropriate here. - // - // The following function now takes - // these refinement indicators and - // flags some cells of the - // triangulation for refinement or - // coarsening using the method - // described above. It is from a - // class that implements - // several different algorithms to - // refine a triangulation based on - // cell-wise error indicators. + QGauss(3), + typename FunctionMap::type(), + solution, + estimated_error_per_cell); + + // The above function returned one + // error indicator value for each + // cell in the + // estimated_error_per_cell + // array. Refinement is now done as + // follows: refine those 30 per + // cent of the cells with the + // highest error values, and + // coarsen the 3 per cent of cells + // with the lowest values. + // + // One can easily verify that if + // the second number were zero, + // this would approximately result + // in a doubling of cells in each + // step in two space dimensions, + // since for each of the 30 per + // cent of cells, four new would be + // replaced, while the remaining 70 + // per cent of cells remain + // untouched. In practice, some + // more cells are usually produced + // since it is disallowed that a + // cell is refined twice while the + // neighbor cell is not refined; in + // that case, the neighbor cell + // would be refined as well. + // + // In many applications, the number + // of cells to be coarsened would + // be set to something larger than + // only three per cent. A non-zero + // value is useful especially if + // for some reason the initial + // (coarse) grid is already rather + // refined. In that case, it might + // be necessary to refine it in + // some regions, while coarsening + // in some other regions is + // useful. In our case here, the + // initial grid is very coarse, so + // coarsening is only necessary in + // a few regions where + // over-refinement may have taken + // place. Thus a small, non-zero + // value is appropriate here. + // + // The following function now takes + // these refinement indicators and + // flags some cells of the + // triangulation for refinement or + // coarsening using the method + // described above. It is from a + // class that implements + // several different algorithms to + // refine a triangulation based on + // cell-wise error indicators. GridRefinement::refine_and_coarsen_fixed_number (triangulation, - estimated_error_per_cell, - 0.3, 0.03); - - // After the previous function has - // exited, some cells are flagged - // for refinement, and some other - // for coarsening. The refinement - // or coarsening itself is not - // performed by now, however, since - // there are cases where further - // modifications of these flags is - // useful. Here, we don't want to - // do any such thing, so we can - // tell the triangulation to - // perform the actions for which - // the cells are flagged: + estimated_error_per_cell, + 0.3, 0.03); + + // After the previous function has + // exited, some cells are flagged + // for refinement, and some other + // for coarsening. The refinement + // or coarsening itself is not + // performed by now, however, since + // there are cases where further + // modifications of these flags is + // useful. Here, we don't want to + // do any such thing, so we can + // tell the triangulation to + // perform the actions for which + // the cells are flagged: triangulation.execute_coarsening_and_refinement (); } // @sect4{Step6::output_results} - // At the end of computations on each - // grid, and just before we continue - // the next cycle with mesh - // refinement, we want to output the - // results from this cycle. - // - // In the present program, we will - // not write the solution (except for - // in the last step, see the next - // function), but only the meshes - // that we generated, as a - // two-dimensional Encapsulated - // Postscript (EPS) file. - // - // We have already seen in step-1 how - // this can be achieved. The only - // thing we have to change is the - // generation of the file name, since - // it should contain the number of - // the present refinement cycle - // provided to this function as an - // argument. The most general way is - // to use the std::stringstream class - // as shown in step-5, but here's a - // little hack that makes it simpler - // if we know that we have less than - // 10 iterations: assume that the - // %numbers `0' through `9' are - // represented consecutively in the - // character set used on your machine - // (this is in fact the case in all - // known character sets), then - // '0'+cycle gives the character - // corresponding to the present cycle - // number. Of course, this will only - // work if the number of cycles is - // actually less than 10, and rather - // than waiting for the disaster to - // happen, we safeguard our little - // hack with an explicit assertion at - // the beginning of the function. If - // this assertion is triggered, - // i.e. when cycle is larger than - // or equal to 10, an exception of - // type ExcNotImplemented is - // raised, indicating that some - // functionality is not implemented - // for this case (the functionality - // that is missing, of course, is the - // generation of file names for that - // case): + // At the end of computations on each + // grid, and just before we continue + // the next cycle with mesh + // refinement, we want to output the + // results from this cycle. + // + // In the present program, we will + // not write the solution (except for + // in the last step, see the next + // function), but only the meshes + // that we generated, as a + // two-dimensional Encapsulated + // Postscript (EPS) file. + // + // We have already seen in step-1 how + // this can be achieved. The only + // thing we have to change is the + // generation of the file name, since + // it should contain the number of + // the present refinement cycle + // provided to this function as an + // argument. The most general way is + // to use the std::stringstream class + // as shown in step-5, but here's a + // little hack that makes it simpler + // if we know that we have less than + // 10 iterations: assume that the + // %numbers `0' through `9' are + // represented consecutively in the + // character set used on your machine + // (this is in fact the case in all + // known character sets), then + // '0'+cycle gives the character + // corresponding to the present cycle + // number. Of course, this will only + // work if the number of cycles is + // actually less than 10, and rather + // than waiting for the disaster to + // happen, we safeguard our little + // hack with an explicit assertion at + // the beginning of the function. If + // this assertion is triggered, + // i.e. when cycle is larger than + // or equal to 10, an exception of + // type ExcNotImplemented is + // raised, indicating that some + // functionality is not implemented + // for this case (the functionality + // that is missing, of course, is the + // generation of file names for that + // case): template void Step6::output_results (const unsigned int cycle) const { @@ -983,55 +983,55 @@ void Step6::output_results (const unsigned int cycle) const // @sect4{Step6::run} - // The final function before - // main() is again the main - // driver of the class, run(). It - // is similar to the one of step-5, - // except that we generate a file in - // the program again instead of - // reading it from disk, in that we - // adaptively instead of globally - // refine the mesh, and that we - // output the solution on the final - // mesh in the present function. - // - // The first block in the main loop - // of the function deals with mesh - // generation. If this is the first - // cycle of the program, instead of - // reading the grid from a file on - // disk as in the previous example, - // we now again create it using a - // library function. The domain is - // again a circle, which is why we - // have to provide a suitable - // boundary object as well. We place - // the center of the circle at the - // origin and have the radius be one - // (these are the two hidden - // arguments to the function, which - // have default values). - // - // You will notice by looking at the - // coarse grid that it is of inferior - // quality than the one which we read - // from the file in the previous - // example: the cells are less - // equally formed. However, using the - // library function this program - // works in any space dimension, - // which was not the case before. - // - // In case we find that this is not - // the first cycle, we want to refine - // the grid. Unlike the global - // refinement employed in the last - // example program, we now use the - // adaptive procedure described - // above. - // - // The rest of the loop looks as - // before: + // The final function before + // main() is again the main + // driver of the class, run(). It + // is similar to the one of step-5, + // except that we generate a file in + // the program again instead of + // reading it from disk, in that we + // adaptively instead of globally + // refine the mesh, and that we + // output the solution on the final + // mesh in the present function. + // + // The first block in the main loop + // of the function deals with mesh + // generation. If this is the first + // cycle of the program, instead of + // reading the grid from a file on + // disk as in the previous example, + // we now again create it using a + // library function. The domain is + // again a circle, which is why we + // have to provide a suitable + // boundary object as well. We place + // the center of the circle at the + // origin and have the radius be one + // (these are the two hidden + // arguments to the function, which + // have default values). + // + // You will notice by looking at the + // coarse grid that it is of inferior + // quality than the one which we read + // from the file in the previous + // example: the cells are less + // equally formed. However, using the + // library function this program + // works in any space dimension, + // which was not the case before. + // + // In case we find that this is not + // the first cycle, we want to refine + // the grid. Unlike the global + // refinement employed in the last + // example program, we now use the + // adaptive procedure described + // above. + // + // The rest of the loop looks as + // before: template void Step6::run () { @@ -1040,45 +1040,45 @@ void Step6::run () std::cout << "Cycle " << cycle << ':' << std::endl; if (cycle == 0) - { - GridGenerator::hyper_ball (triangulation); + { + GridGenerator::hyper_ball (triangulation); - static const HyperBallBoundary boundary; - triangulation.set_boundary (0, boundary); + static const HyperBallBoundary boundary; + triangulation.set_boundary (0, boundary); - triangulation.refine_global (1); - } + triangulation.refine_global (1); + } else - refine_grid (); + refine_grid (); std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl; + << triangulation.n_active_cells() + << std::endl; setup_system (); std::cout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl; + << dof_handler.n_dofs() + << std::endl; assemble_system (); solve (); output_results (cycle); } - // After we have finished computing - // the solution on the finesh mesh, - // and writing all the grids to - // disk, we want to also write the - // actual solution on this final - // mesh to a file. As already done - // in one of the previous examples, - // we use the EPS format for - // output, and to obtain a - // reasonable view on the solution, - // we rescale the z-axis by a - // factor of four. + // After we have finished computing + // the solution on the finesh mesh, + // and writing all the grids to + // disk, we want to also write the + // actual solution on this final + // mesh to a file. As already done + // in one of the previous examples, + // we use the EPS format for + // output, and to obtain a + // reasonable view on the solution, + // we rescale the z-axis by a + // factor of four. DataOutBase::EpsFlags eps_flags; eps_flags.z_scaling = 4; @@ -1096,52 +1096,52 @@ void Step6::run () // @sect3{The main function} - // The main function is unaltered in - // its functionality from the - // previous example, but we have - // taken a step of additional - // caution. Sometimes, something goes - // wrong (such as insufficient disk - // space upon writing an output file, - // not enough memory when trying to - // allocate a vector or a matrix, or - // if we can't read from or write to - // a file for whatever reason), and - // in these cases the library will - // throw exceptions. Since these are - // run-time problems, not programming - // errors that can be fixed once and - // for all, this kind of exceptions - // is not switched off in optimized - // mode, in contrast to the - // Assert macro which we have - // used to test against programming - // errors. If uncaught, these - // exceptions propagate the call tree - // up to the main function, and - // if they are not caught there - // either, the program is aborted. In - // many cases, like if there is not - // enough memory or disk space, we - // can't do anything but we can at - // least print some text trying to - // explain the reason why the program - // failed. A way to do so is shown in - // the following. It is certainly - // useful to write any larger program - // in this way, and you can do so by - // more or less copying this function - // except for the try block that - // actually encodes the functionality - // particular to the present - // application. + // The main function is unaltered in + // its functionality from the + // previous example, but we have + // taken a step of additional + // caution. Sometimes, something goes + // wrong (such as insufficient disk + // space upon writing an output file, + // not enough memory when trying to + // allocate a vector or a matrix, or + // if we can't read from or write to + // a file for whatever reason), and + // in these cases the library will + // throw exceptions. Since these are + // run-time problems, not programming + // errors that can be fixed once and + // for all, this kind of exceptions + // is not switched off in optimized + // mode, in contrast to the + // Assert macro which we have + // used to test against programming + // errors. If uncaught, these + // exceptions propagate the call tree + // up to the main function, and + // if they are not caught there + // either, the program is aborted. In + // many cases, like if there is not + // enough memory or disk space, we + // can't do anything but we can at + // least print some text trying to + // explain the reason why the program + // failed. A way to do so is shown in + // the following. It is certainly + // useful to write any larger program + // in this way, and you can do so by + // more or less copying this function + // except for the try block that + // actually encodes the functionality + // particular to the present + // application. int main () { - // The general idea behind the - // layout of this function is as - // follows: let's try to run the - // program as we did before... + // The general idea behind the + // layout of this function is as + // follows: let's try to run the + // program as we did before... try { deallog.depth_console (0); @@ -1149,81 +1149,81 @@ int main () Step6<2> laplace_problem_2d; laplace_problem_2d.run (); } - // ...and if this should fail, try - // to gather as much information as - // possible. Specifically, if the - // exception that was thrown is an - // object of a class that is - // derived from the C++ standard - // class exception, then we can - // use the what member function - // to get a string which describes - // the reason why the exception was - // thrown. - // - // The deal.II exception classes - // are all derived from the - // standard class, and in - // particular, the exc.what() - // function will return - // approximately the same string as - // would be generated if the - // exception was thrown using the - // Assert macro. You have seen - // the output of such an exception - // in the previous example, and you - // then know that it contains the - // file and line number of where - // the exception occured, and some - // other information. This is also - // what the following statements - // would print. - // - // Apart from this, there isn't - // much that we can do except - // exiting the program with an - // error code (this is what the - // return 1; does): + // ...and if this should fail, try + // to gather as much information as + // possible. Specifically, if the + // exception that was thrown is an + // object of a class that is + // derived from the C++ standard + // class exception, then we can + // use the what member function + // to get a string which describes + // the reason why the exception was + // thrown. + // + // The deal.II exception classes + // are all derived from the + // standard class, and in + // particular, the exc.what() + // function will return + // approximately the same string as + // would be generated if the + // exception was thrown using the + // Assert macro. You have seen + // the output of such an exception + // in the previous example, and you + // then know that it contains the + // file and line number of where + // the exception occured, and some + // other information. This is also + // what the following statements + // would print. + // + // Apart from this, there isn't + // much that we can do except + // exiting the program with an + // error code (this is what the + // return 1; does): catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } - // If the exception that was thrown - // somewhere was not an object of a - // class derived from the standard - // exception class, then we - // can't do anything at all. We - // then simply print an error - // message and exit. + // If the exception that was thrown + // somewhere was not an object of a + // class derived from the standard + // exception class, then we + // can't do anything at all. We + // then simply print an error + // message and exit. catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } - // If we got to this point, there - // was no exception which - // propagated up to the main - // function (there may have been - // exceptions, but they were caught - // somewhere in the program or the - // library). Therefore, the program - // performed as was expected and we - // can return without error. + // If we got to this point, there + // was no exception which + // propagated up to the main + // function (there may have been + // exceptions, but they were caught + // somewhere in the program or the + // library). Therefore, the program + // performed as was expected and we + // can return without error. return 0; } diff --git a/deal.II/examples/step-7/step-7.cc b/deal.II/examples/step-7/step-7.cc index d2c94aef7a..c6e3cce6ce 100644 --- a/deal.II/examples/step-7/step-7.cc +++ b/deal.II/examples/step-7/step-7.cc @@ -12,9 +12,9 @@ // @sect3{Include files} // These first include files have all - // been treated in previous examples, - // so we won't explain what is in - // them again. + // been treated in previous examples, + // so we won't explain what is in + // them again. #include #include #include @@ -38,90 +38,90 @@ #include #include - // In this example, we will not use the - // numeration scheme which is used per - // default by the DoFHandler class, but - // will renumber them using the Cuthill-McKee - // algorithm. As has already been explained - // in step-2, the necessary functions are - // declared in the following file: + // In this example, we will not use the + // numeration scheme which is used per + // default by the DoFHandler class, but + // will renumber them using the Cuthill-McKee + // algorithm. As has already been explained + // in step-2, the necessary functions are + // declared in the following file: #include - // Then we will show a little trick - // how we can make sure that objects - // are not deleted while they are - // still in use. For this purpose, - // deal.II has the SmartPointer - // helper class, which is declared in - // this file: + // Then we will show a little trick + // how we can make sure that objects + // are not deleted while they are + // still in use. For this purpose, + // deal.II has the SmartPointer + // helper class, which is declared in + // this file: #include - // Next, we will want to use the function - // VectorTools::integrate_difference() - // mentioned in the introduction, and we are - // going to use a ConvergenceTable that - // collects all important data during a run - // and prints it at the end as a table. These - // comes from the following two files: + // Next, we will want to use the function + // VectorTools::integrate_difference() + // mentioned in the introduction, and we are + // going to use a ConvergenceTable that + // collects all important data during a run + // and prints it at the end as a table. These + // comes from the following two files: #include #include - // And finally, we need to use the - // FEFaceValues class, which is - // declared in the same file as the - // FEValues class: + // And finally, we need to use the + // FEFaceValues class, which is + // declared in the same file as the + // FEValues class: #include - // We need one more include from standard - // C++, which is necessary when we try to - // find out the actual type behind a pointer - // to a base class. We will explain this in - // slightly more detail below. The other two - // include files are obvious then: + // We need one more include from standard + // C++, which is necessary when we try to + // find out the actual type behind a pointer + // to a base class. We will explain this in + // slightly more detail below. The other two + // include files are obvious then: #include #include #include - // The last step before we go on with the - // actual implementation is to open a - // namespace Step7 into which we - // will put everything, as discussed at the - // end of the introduction, and to import the - // members of namespace dealii - // into it: + // The last step before we go on with the + // actual implementation is to open a + // namespace Step7 into which we + // will put everything, as discussed at the + // end of the introduction, and to import the + // members of namespace dealii + // into it: namespace Step7 { using namespace dealii; - // @sect3{Equation data} - - // Before implementing the classes that - // actually solve something, we first declare - // and define some function classes that - // represent right hand side and solution - // classes. Since we want to compare the - // numerically obtained solution to the exact - // continuous one, we need a function object - // that represents the continuous - // solution. On the other hand, we need the - // right hand side function, and that one of - // course shares some characteristics with - // the solution. In order to reduce - // dependencies which arise if we have to - // change something in both classes at the - // same time, we move the common - // characteristics of both functions into a - // base class. - // - // The common characteristics for solution - // (as explained in the introduction, we - // choose a sum of three exponentials) and - // right hand side, are these: the number of - // exponentials, their centers, and their - // half width. We declare them in the - // following class. Since the number of - // exponentials is a constant scalar integral - // quantity, C++ allows its definition - // (i.e. assigning a value) right at the - // place of declaration (i.e. where we - // declare that such a variable exists). + // @sect3{Equation data} + + // Before implementing the classes that + // actually solve something, we first declare + // and define some function classes that + // represent right hand side and solution + // classes. Since we want to compare the + // numerically obtained solution to the exact + // continuous one, we need a function object + // that represents the continuous + // solution. On the other hand, we need the + // right hand side function, and that one of + // course shares some characteristics with + // the solution. In order to reduce + // dependencies which arise if we have to + // change something in both classes at the + // same time, we move the common + // characteristics of both functions into a + // base class. + // + // The common characteristics for solution + // (as explained in the introduction, we + // choose a sum of three exponentials) and + // right hand side, are these: the number of + // exponentials, their centers, and their + // half width. We declare them in the + // following class. Since the number of + // exponentials is a constant scalar integral + // quantity, C++ allows its definition + // (i.e. assigning a value) right at the + // place of declaration (i.e. where we + // declare that such a variable exists). template class SolutionBase { @@ -132,39 +132,39 @@ namespace Step7 }; - // The variables which denote the - // centers and the width of the - // exponentials have just been - // declared, now we still need to - // assign values to them. Here, we - // can show another small piece of - // template sorcery, namely how we - // can assign different values to - // these variables depending on the - // dimension. We will only use the 2d - // case in the program, but we show - // the 1d case for exposition of a - // useful technique. - // - // First we assign values to the centers for - // the 1d case, where we place the centers - // equidistantly at -1/3, 0, and 1/3. The - // template <> header for this definition - // indicates an explicit specialization. This - // means, that the variable belongs to a - // template, but that instead of providing - // the compiler with a template from which it - // can specialize a concrete variable by - // substituting dim with some concrete - // value, we provide a specialization - // ourselves, in this case for dim=1. If - // the compiler then sees a reference to this - // variable in a place where the template - // argument equals one, it knows that it - // doesn't have to generate the variable from - // a template by substituting dim, but - // can immediately use the following - // definition: + // The variables which denote the + // centers and the width of the + // exponentials have just been + // declared, now we still need to + // assign values to them. Here, we + // can show another small piece of + // template sorcery, namely how we + // can assign different values to + // these variables depending on the + // dimension. We will only use the 2d + // case in the program, but we show + // the 1d case for exposition of a + // useful technique. + // + // First we assign values to the centers for + // the 1d case, where we place the centers + // equidistantly at -1/3, 0, and 1/3. The + // template <> header for this definition + // indicates an explicit specialization. This + // means, that the variable belongs to a + // template, but that instead of providing + // the compiler with a template from which it + // can specialize a concrete variable by + // substituting dim with some concrete + // value, we provide a specialization + // ourselves, in this case for dim=1. If + // the compiler then sees a reference to this + // variable in a place where the template + // argument equals one, it knows that it + // doesn't have to generate the variable from + // a template by substituting dim, but + // can immediately use the following + // definition: template <> const Point<1> SolutionBase<1>::source_centers[SolutionBase<1>::n_source_centers] @@ -172,9 +172,9 @@ namespace Step7 Point<1>(0.0), Point<1>(+1.0 / 3.0) }; - // Likewise, we can provide an explicit - // specialization for dim=2. We place the - // centers for the 2d case as follows: + // Likewise, we can provide an explicit + // specialization for dim=2. We place the + // centers for the 2d case as follows: template <> const Point<2> SolutionBase<2>::source_centers[SolutionBase<2>::n_source_centers] @@ -182,170 +182,170 @@ namespace Step7 Point<2>(-0.5, -0.5), Point<2>(+0.5, -0.5) }; - // There remains to assign a value to the - // half-width of the exponentials. We would - // like to use the same value for all - // dimensions. In this case, we simply - // provide the compiler with a template from - // which it can generate a concrete - // instantiation by substituting dim with - // a concrete value: + // There remains to assign a value to the + // half-width of the exponentials. We would + // like to use the same value for all + // dimensions. In this case, we simply + // provide the compiler with a template from + // which it can generate a concrete + // instantiation by substituting dim with + // a concrete value: template const double SolutionBase::width = 1./3.; - // After declaring and defining the - // characteristics of solution and - // right hand side, we can declare - // the classes representing these - // two. They both represent - // continuous functions, so they are - // derived from the Function<dim> - // base class, and they also inherit - // the characteristics defined in the - // SolutionBase class. - // - // The actual classes are declared in the - // following. Note that in order to compute - // the error of the numerical solution - // against the continuous one in the L2 and - // H1 norms, we have to provide value and - // gradient of the exact solution. This is - // more than we have done in previous - // examples, where all we provided was the - // value at one or a list of - // points. Fortunately, the Function - // class also has virtual functions for the - // gradient, so we can simply overload the - // respective virtual member functions in the - // Function base class. Note that the - // gradient of a function in dim space - // dimensions is a vector of size dim, - // i.e. a tensor of rank 1 and dimension - // dim. As for so many other things, the - // library provides a suitable class for - // this. - // - // Just as in previous examples, we - // are forced by the C++ language - // specification to declare a - // seemingly useless default - // constructor. + // After declaring and defining the + // characteristics of solution and + // right hand side, we can declare + // the classes representing these + // two. They both represent + // continuous functions, so they are + // derived from the Function<dim> + // base class, and they also inherit + // the characteristics defined in the + // SolutionBase class. + // + // The actual classes are declared in the + // following. Note that in order to compute + // the error of the numerical solution + // against the continuous one in the L2 and + // H1 norms, we have to provide value and + // gradient of the exact solution. This is + // more than we have done in previous + // examples, where all we provided was the + // value at one or a list of + // points. Fortunately, the Function + // class also has virtual functions for the + // gradient, so we can simply overload the + // respective virtual member functions in the + // Function base class. Note that the + // gradient of a function in dim space + // dimensions is a vector of size dim, + // i.e. a tensor of rank 1 and dimension + // dim. As for so many other things, the + // library provides a suitable class for + // this. + // + // Just as in previous examples, we + // are forced by the C++ language + // specification to declare a + // seemingly useless default + // constructor. template class Solution : public Function, - protected SolutionBase + protected SolutionBase { public: Solution () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual Tensor<1,dim> gradient (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; - // The actual definition of the values and - // gradients of the exact solution class is - // according to their mathematical definition - // and does not need much explanation. - // - // The only thing that is worth - // mentioning is that if we access - // elements of a base class that is - // template dependent (in this case - // the elements of - // SolutionBase<dim>), then the - // C++ language forces us to write - // this->n_source_centers (for - // example). Note that the this-> - // qualification is not necessary if - // the base class is not template - // dependent, and also that the gcc - // compilers prior to version 3.4 don't - // enforce this requirement of the - // C++ standard. The reason why this - // is necessary is complicated; some - // books on C++ may explain it, so if - // you are interested you can look it - // up under the phrase two-stage - // (name) lookup. + // The actual definition of the values and + // gradients of the exact solution class is + // according to their mathematical definition + // and does not need much explanation. + // + // The only thing that is worth + // mentioning is that if we access + // elements of a base class that is + // template dependent (in this case + // the elements of + // SolutionBase<dim>), then the + // C++ language forces us to write + // this->n_source_centers (for + // example). Note that the this-> + // qualification is not necessary if + // the base class is not template + // dependent, and also that the gcc + // compilers prior to version 3.4 don't + // enforce this requirement of the + // C++ standard. The reason why this + // is necessary is complicated; some + // books on C++ may explain it, so if + // you are interested you can look it + // up under the phrase two-stage + // (name) lookup. template double Solution::value (const Point &p, - const unsigned int) const + const unsigned int) const { double return_value = 0; for (unsigned int i=0; in_source_centers; ++i) { - const Point x_minus_xi = p - this->source_centers[i]; - return_value += std::exp(-x_minus_xi.square() / - (this->width * this->width)); + const Point x_minus_xi = p - this->source_centers[i]; + return_value += std::exp(-x_minus_xi.square() / + (this->width * this->width)); } return return_value; } - // Likewise, this is the computation of the - // gradient of the solution. In order to - // accumulate the gradient from the - // contributions of the exponentials, we - // allocate an object return_value that - // denotes the mathematical quantity of a - // tensor of rank 1 and dimension - // dim. Its default constructor sets it - // to the vector containing only zeroes, so - // we need not explicitly care for its - // initialization. - // - // Note that we could as well have taken the - // type of the object to be Point<dim> - // instead of Tensor<1,dim>. Tensors of - // rank 1 and points are almost exchangeable, - // and have only very slightly different - // mathematical meanings. In fact, the - // Point<dim> class is derived from the - // Tensor<1,dim> class, which makes up - // for their mutual exchange ability. Their - // main difference is in what they logically - // mean: points are points in space, such as - // the location at which we want to evaluate - // a function (see the type of the first - // argument of this function for example). On - // the other hand, tensors of rank 1 share - // the same transformation properties, for - // example that they need to be rotated in a - // certain way when we change the coordinate - // system; however, they do not share the - // same connotation that points have and are - // only objects in a more abstract space than - // the one spanned by the coordinate - // directions. (In fact, gradients live in - // `reciprocal' space, since the dimension of - // their components is not that of a length, - // but one over length). + // Likewise, this is the computation of the + // gradient of the solution. In order to + // accumulate the gradient from the + // contributions of the exponentials, we + // allocate an object return_value that + // denotes the mathematical quantity of a + // tensor of rank 1 and dimension + // dim. Its default constructor sets it + // to the vector containing only zeroes, so + // we need not explicitly care for its + // initialization. + // + // Note that we could as well have taken the + // type of the object to be Point<dim> + // instead of Tensor<1,dim>. Tensors of + // rank 1 and points are almost exchangeable, + // and have only very slightly different + // mathematical meanings. In fact, the + // Point<dim> class is derived from the + // Tensor<1,dim> class, which makes up + // for their mutual exchange ability. Their + // main difference is in what they logically + // mean: points are points in space, such as + // the location at which we want to evaluate + // a function (see the type of the first + // argument of this function for example). On + // the other hand, tensors of rank 1 share + // the same transformation properties, for + // example that they need to be rotated in a + // certain way when we change the coordinate + // system; however, they do not share the + // same connotation that points have and are + // only objects in a more abstract space than + // the one spanned by the coordinate + // directions. (In fact, gradients live in + // `reciprocal' space, since the dimension of + // their components is not that of a length, + // but one over length). template Tensor<1,dim> Solution::gradient (const Point &p, - const unsigned int) const + const unsigned int) const { Tensor<1,dim> return_value; for (unsigned int i=0; in_source_centers; ++i) { - const Point x_minus_xi = p - this->source_centers[i]; - - // For the gradient, note that - // its direction is along - // (x-x_i), so we add up - // multiples of this distance - // vector, where the factor is - // given by the exponentials. - return_value += (-2 / (this->width * this->width) * - std::exp(-x_minus_xi.square() / - (this->width * this->width)) * - x_minus_xi); + const Point x_minus_xi = p - this->source_centers[i]; + + // For the gradient, note that + // its direction is along + // (x-x_i), so we add up + // multiples of this distance + // vector, where the factor is + // given by the exponentials. + return_value += (-2 / (this->width * this->width) * + std::exp(-x_minus_xi.square() / + (this->width * this->width)) * + x_minus_xi); } return return_value; @@ -353,98 +353,98 @@ namespace Step7 - // Besides the function that - // represents the exact solution, we - // also need a function which we can - // use as right hand side when - // assembling the linear system of - // discretized equations. This is - // accomplished using the following - // class and the following definition - // of its function. Note that here we - // only need the value of the - // function, not its gradients or - // higher derivatives. + // Besides the function that + // represents the exact solution, we + // also need a function which we can + // use as right hand side when + // assembling the linear system of + // discretized equations. This is + // accomplished using the following + // class and the following definition + // of its function. Note that here we + // only need the value of the + // function, not its gradients or + // higher derivatives. template class RightHandSide : public Function, - protected SolutionBase + protected SolutionBase { public: RightHandSide () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; }; - // The value of the right hand side - // is given by the negative Laplacian - // of the solution plus the solution - // itself, since we wanted to solve - // Helmholtz's equation: + // The value of the right hand side + // is given by the negative Laplacian + // of the solution plus the solution + // itself, since we wanted to solve + // Helmholtz's equation: template double RightHandSide::value (const Point &p, - const unsigned int) const + const unsigned int) const { double return_value = 0; for (unsigned int i=0; in_source_centers; ++i) { - const Point x_minus_xi = p - this->source_centers[i]; - - // The first contribution is - // the Laplacian: - return_value += ((2*dim - 4*x_minus_xi.square()/ - (this->width * this->width)) / - (this->width * this->width) * - std::exp(-x_minus_xi.square() / - (this->width * this->width))); - // And the second is the - // solution itself: - return_value += std::exp(-x_minus_xi.square() / - (this->width * this->width)); + const Point x_minus_xi = p - this->source_centers[i]; + + // The first contribution is + // the Laplacian: + return_value += ((2*dim - 4*x_minus_xi.square()/ + (this->width * this->width)) / + (this->width * this->width) * + std::exp(-x_minus_xi.square() / + (this->width * this->width))); + // And the second is the + // solution itself: + return_value += std::exp(-x_minus_xi.square() / + (this->width * this->width)); } return return_value; } - // @sect3{The Helmholtz solver class} - - // Then we need the class that does all the - // work. Except for its name, its interface - // is mostly the same as in previous - // examples. - // - // One of the differences is that we will use - // this class in several modes: for different - // finite elements, as well as for adaptive - // and global refinement. The decision - // whether global or adaptive refinement - // shall be used is communicated to the - // constructor of this class through an - // enumeration type declared at the top of - // the class. The constructor then takes a - // finite element object and the refinement - // mode as arguments. - // - // The rest of the member functions are as - // before except for the process_solution - // function: After the solution has been - // computed, we perform some analysis on it, - // such as computing the error in various - // norms. To enable some output, it requires - // the number of the refinement cycle, and - // consequently gets it as an argument. + // @sect3{The Helmholtz solver class} + + // Then we need the class that does all the + // work. Except for its name, its interface + // is mostly the same as in previous + // examples. + // + // One of the differences is that we will use + // this class in several modes: for different + // finite elements, as well as for adaptive + // and global refinement. The decision + // whether global or adaptive refinement + // shall be used is communicated to the + // constructor of this class through an + // enumeration type declared at the top of + // the class. The constructor then takes a + // finite element object and the refinement + // mode as arguments. + // + // The rest of the member functions are as + // before except for the process_solution + // function: After the solution has been + // computed, we perform some analysis on it, + // such as computing the error in various + // norms. To enable some output, it requires + // the number of the refinement cycle, and + // consequently gets it as an argument. template class HelmholtzProblem { public: enum RefinementMode { - global_refinement, adaptive_refinement + global_refinement, adaptive_refinement }; HelmholtzProblem (const FiniteElement &fe, - const RefinementMode refinement_mode); + const RefinementMode refinement_mode); ~HelmholtzProblem (); @@ -457,192 +457,192 @@ namespace Step7 void refine_grid (); void process_solution (const unsigned int cycle); - // Now for the data elements of - // this class. Among the variables - // that we have already used in - // previous examples, only the - // finite element object differs: - // The finite elements which the - // objects of this class operate - // on are passed to the - // constructor of this class. It - // has to store a pointer to the - // finite element for the member - // functions to use. Now, for the - // present class there is no big - // deal in that, but since we - // want to show techniques rather - // than solutions in these - // programs, we will here point - // out a problem that often - // occurs -- and of course the - // right solution as well. - // - // Consider the following - // situation that occurs in all - // the example programs: we have - // a triangulation object, and we - // have a finite element object, - // and we also have an object of - // type DoFHandler that uses - // both of the first two. These - // three objects all have a - // lifetime that is rather long - // compared to most other - // objects: they are basically - // set at the beginning of the - // program or an outer loop, and - // they are destroyed at the very - // end. The question is: can we - // guarantee that the two objects - // which the DoFHandler uses, - // live at least as long as they - // are in use? This means that - // the DoFHandler must have some - // kind of lock on the - // destruction of the other - // objects, and it can only - // release this lock once it has - // cleared all active references - // to these objects. We have seen - // what happens if we violate - // this order of destruction in - // the previous example program: - // an exception is thrown that - // terminates the program in - // order to notify the programmer - // of this potentially dangerous - // state where an object is - // pointed to that no longer - // persists. - // - // We will show here how the - // library managed to find out - // that there are still active - // references to an - // object. Basically, the method - // is along the following line: - // all objects that are subject - // to such potentially dangerous - // pointers are derived from a - // class called - // Subscriptor. For example, - // the Triangulation, - // DoFHandler, and a base - // class of the FiniteElement - // class are derived from - // Subscriptor. This latter - // class does not offer much - // functionality, but it has a - // built-in counter which we can - // subscribe to, thus the name of - // the class. Whenever we - // initialize a pointer to that - // object, we can increase its use - // counter, and when we move away - // our pointer or do not need it - // any more, we decrease the - // counter again. This way, we - // can always check how many - // objects still use that - // object. - // - // On the other hand, if an object of a - // class that is derived from the - // Subscriptor class is destroyed, it - // also has to call the destructor of the - // Subscriptor class. In this - // destructor, there - // will then be a check whether the - // counter is really zero. If - // yes, then there are no active - // references to this object any - // more, and we can safely - // destroy it. If the counter is - // non-zero, however, then the - // destruction would result in - // stale and thus potentially - // dangerous pointers, and we - // rather throw an exception to - // alert the programmer that this - // is doing something dangerous - // and the program better be - // fixed. - // - // While this certainly all - // sounds very well, it has some - // problems in terms of - // usability: what happens if I - // forget to increase the counter - // when I let a pointer point to - // such an object? And what - // happens if I forget to - // decrease it again? Note that - // this may lead to extremely - // difficult to find bugs, since - // the place where we have - // forgotten something may be - // far away from the place - // where the check for zeroness - // of the counter upon - // destruction actually - // fails. This kind of bug is - // rather annoying and usually very - // hard to fix. - // - // The solution to this problem - // is to again use some C++ - // trickery: we create a class - // that acts just like a pointer, - // i.e. can be dereferenced, can - // be assigned to and from other - // pointers, and so on. This can - // be done by overloading the - // several dereferencing - // operators of that - // class. Within the - // constructors, destructors, and - // assignment operators of that - // class, we can however also - // manage increasing or - // decreasing the use counters of - // the objects we point - // to. Objects of that class - // therefore can be used just - // like ordinary pointers to - // objects, but they also serve - // to change the use counters of - // those objects without the need - // for the programmer to do so - // herself. The class that - // actually does all this is - // called SmartPointer and - // takes as template parameter - // the data type of the object - // which it shall point to. The - // latter type may be any class, - // as long as it is derived from - // the Subscriptor class. - // - // In the present example program, we - // want to protect the finite element - // object from the situation that for - // some reason the finite element pointed - // to is destroyed while still in use. We - // therefore use a SmartPointer to - // the finite element object; since the - // finite element object is actually - // never changed in our computations, we - // pass a const FiniteElement<dim> as - // template argument to the - // SmartPointer class. Note that the - // pointer so declared is assigned at - // construction time of the solve object, - // and destroyed upon destruction, so the - // lock on the destruction of the finite - // element object extends throughout the - // lifetime of this HelmholtzProblem - // object. + // Now for the data elements of + // this class. Among the variables + // that we have already used in + // previous examples, only the + // finite element object differs: + // The finite elements which the + // objects of this class operate + // on are passed to the + // constructor of this class. It + // has to store a pointer to the + // finite element for the member + // functions to use. Now, for the + // present class there is no big + // deal in that, but since we + // want to show techniques rather + // than solutions in these + // programs, we will here point + // out a problem that often + // occurs -- and of course the + // right solution as well. + // + // Consider the following + // situation that occurs in all + // the example programs: we have + // a triangulation object, and we + // have a finite element object, + // and we also have an object of + // type DoFHandler that uses + // both of the first two. These + // three objects all have a + // lifetime that is rather long + // compared to most other + // objects: they are basically + // set at the beginning of the + // program or an outer loop, and + // they are destroyed at the very + // end. The question is: can we + // guarantee that the two objects + // which the DoFHandler uses, + // live at least as long as they + // are in use? This means that + // the DoFHandler must have some + // kind of lock on the + // destruction of the other + // objects, and it can only + // release this lock once it has + // cleared all active references + // to these objects. We have seen + // what happens if we violate + // this order of destruction in + // the previous example program: + // an exception is thrown that + // terminates the program in + // order to notify the programmer + // of this potentially dangerous + // state where an object is + // pointed to that no longer + // persists. + // + // We will show here how the + // library managed to find out + // that there are still active + // references to an + // object. Basically, the method + // is along the following line: + // all objects that are subject + // to such potentially dangerous + // pointers are derived from a + // class called + // Subscriptor. For example, + // the Triangulation, + // DoFHandler, and a base + // class of the FiniteElement + // class are derived from + // Subscriptor. This latter + // class does not offer much + // functionality, but it has a + // built-in counter which we can + // subscribe to, thus the name of + // the class. Whenever we + // initialize a pointer to that + // object, we can increase its use + // counter, and when we move away + // our pointer or do not need it + // any more, we decrease the + // counter again. This way, we + // can always check how many + // objects still use that + // object. + // + // On the other hand, if an object of a + // class that is derived from the + // Subscriptor class is destroyed, it + // also has to call the destructor of the + // Subscriptor class. In this + // destructor, there + // will then be a check whether the + // counter is really zero. If + // yes, then there are no active + // references to this object any + // more, and we can safely + // destroy it. If the counter is + // non-zero, however, then the + // destruction would result in + // stale and thus potentially + // dangerous pointers, and we + // rather throw an exception to + // alert the programmer that this + // is doing something dangerous + // and the program better be + // fixed. + // + // While this certainly all + // sounds very well, it has some + // problems in terms of + // usability: what happens if I + // forget to increase the counter + // when I let a pointer point to + // such an object? And what + // happens if I forget to + // decrease it again? Note that + // this may lead to extremely + // difficult to find bugs, since + // the place where we have + // forgotten something may be + // far away from the place + // where the check for zeroness + // of the counter upon + // destruction actually + // fails. This kind of bug is + // rather annoying and usually very + // hard to fix. + // + // The solution to this problem + // is to again use some C++ + // trickery: we create a class + // that acts just like a pointer, + // i.e. can be dereferenced, can + // be assigned to and from other + // pointers, and so on. This can + // be done by overloading the + // several dereferencing + // operators of that + // class. Within the + // constructors, destructors, and + // assignment operators of that + // class, we can however also + // manage increasing or + // decreasing the use counters of + // the objects we point + // to. Objects of that class + // therefore can be used just + // like ordinary pointers to + // objects, but they also serve + // to change the use counters of + // those objects without the need + // for the programmer to do so + // herself. The class that + // actually does all this is + // called SmartPointer and + // takes as template parameter + // the data type of the object + // which it shall point to. The + // latter type may be any class, + // as long as it is derived from + // the Subscriptor class. + // + // In the present example program, we + // want to protect the finite element + // object from the situation that for + // some reason the finite element pointed + // to is destroyed while still in use. We + // therefore use a SmartPointer to + // the finite element object; since the + // finite element object is actually + // never changed in our computations, we + // pass a const FiniteElement<dim> as + // template argument to the + // SmartPointer class. Note that the + // pointer so declared is assigned at + // construction time of the solve object, + // and destroyed upon destruction, so the + // lock on the destruction of the finite + // element object extends throughout the + // lifetime of this HelmholtzProblem + // object. Triangulation triangulation; DoFHandler dof_handler; @@ -656,58 +656,58 @@ namespace Step7 Vector solution; Vector system_rhs; - // The second to last variable - // stores the refinement mode - // passed to the - // constructor. Since it is only - // set in the constructor, we can - // declare this variable - // constant, to avoid that - // someone sets it involuntarily - // (e.g. in an `if'-statement - // where == was written as = by - // chance). + // The second to last variable + // stores the refinement mode + // passed to the + // constructor. Since it is only + // set in the constructor, we can + // declare this variable + // constant, to avoid that + // someone sets it involuntarily + // (e.g. in an `if'-statement + // where == was written as = by + // chance). const RefinementMode refinement_mode; - // For each refinement level some data - // (like the number of cells, or the L2 - // error of the numerical solution) will - // be generated and later printed. The - // TableHandler can be used to - // collect all this data and to output it - // at the end of the run as a table in a - // simple text or in LaTeX - // format. Here we don't only use the - // TableHandler but we use the - // derived class ConvergenceTable - // that additionally evaluates rates of - // convergence: + // For each refinement level some data + // (like the number of cells, or the L2 + // error of the numerical solution) will + // be generated and later printed. The + // TableHandler can be used to + // collect all this data and to output it + // at the end of the run as a table in a + // simple text or in LaTeX + // format. Here we don't only use the + // TableHandler but we use the + // derived class ConvergenceTable + // that additionally evaluates rates of + // convergence: ConvergenceTable convergence_table; }; - // @sect3{The HelmholtzProblem class implementation} + // @sect3{The HelmholtzProblem class implementation} - // @sect4{HelmholtzProblem::HelmholtzProblem} + // @sect4{HelmholtzProblem::HelmholtzProblem} - // In the constructor of this class, - // we only set the variables passed - // as arguments, and associate the - // DoF handler object with the - // triangulation (which is empty at - // present, however). + // In the constructor of this class, + // we only set the variables passed + // as arguments, and associate the + // DoF handler object with the + // triangulation (which is empty at + // present, however). template HelmholtzProblem::HelmholtzProblem (const FiniteElement &fe, - const RefinementMode refinement_mode) : - dof_handler (triangulation), - fe (&fe), - refinement_mode (refinement_mode) + const RefinementMode refinement_mode) : + dof_handler (triangulation), + fe (&fe), + refinement_mode (refinement_mode) {} - // @sect4{HelmholtzProblem::~HelmholtzProblem} + // @sect4{HelmholtzProblem::~HelmholtzProblem} - // This is no different than before: + // This is no different than before: template HelmholtzProblem::~HelmholtzProblem () { @@ -715,55 +715,55 @@ namespace Step7 } - // @sect4{HelmholtzProblem::setup_system} - - // The following function sets up the - // degrees of freedom, sizes of - // matrices and vectors, etc. Most of - // its functionality has been showed - // in previous examples, the only - // difference being the renumbering - // step immediately after first - // distributing degrees of freedom. - // - // Renumbering the degrees of - // freedom is not overly difficult, - // as long as you use one of the - // algorithms included in the - // library. It requires only a single - // line of code. Some more information - // on this can be found in step-2. - // - // Note, however, that when you - // renumber the degrees of freedom, - // you must do so immediately after - // distributing them, since such - // things as hanging nodes, the - // sparsity pattern etc. depend on - // the absolute numbers which are - // altered by renumbering. - // - // The reason why we introduce renumbering - // here is that it is a relatively cheap - // operation but often has a beneficial - // effect: While the CG iteration itself is - // independent of the actual ordering of - // degrees of freedom, we will use SSOR as a - // preconditioner. SSOR goes through all - // degrees of freedom and does some - // operations that depend on what happened - // before; the SSOR operation is therefore - // not independent of the numbering of - // degrees of freedom, and it is known that - // its performance improves by using - // renumbering techniques. A little - // experiment shows that indeed, for example, - // the number of CG iterations for the fifth - // refinement cycle of adaptive refinement - // with the Q1 program used here is 40 - // without, but 36 with renumbering. Similar - // savings can generally be observed for all - // the computations in this program. + // @sect4{HelmholtzProblem::setup_system} + + // The following function sets up the + // degrees of freedom, sizes of + // matrices and vectors, etc. Most of + // its functionality has been showed + // in previous examples, the only + // difference being the renumbering + // step immediately after first + // distributing degrees of freedom. + // + // Renumbering the degrees of + // freedom is not overly difficult, + // as long as you use one of the + // algorithms included in the + // library. It requires only a single + // line of code. Some more information + // on this can be found in step-2. + // + // Note, however, that when you + // renumber the degrees of freedom, + // you must do so immediately after + // distributing them, since such + // things as hanging nodes, the + // sparsity pattern etc. depend on + // the absolute numbers which are + // altered by renumbering. + // + // The reason why we introduce renumbering + // here is that it is a relatively cheap + // operation but often has a beneficial + // effect: While the CG iteration itself is + // independent of the actual ordering of + // degrees of freedom, we will use SSOR as a + // preconditioner. SSOR goes through all + // degrees of freedom and does some + // operations that depend on what happened + // before; the SSOR operation is therefore + // not independent of the numbering of + // degrees of freedom, and it is known that + // its performance improves by using + // renumbering techniques. A little + // experiment shows that indeed, for example, + // the number of CG iterations for the fifth + // refinement cycle of adaptive refinement + // with the Q1 program used here is 40 + // without, but 36 with renumbering. Similar + // savings can generally be observed for all + // the computations in this program. template void HelmholtzProblem::setup_system () { @@ -772,12 +772,12 @@ namespace Step7 hanging_node_constraints.clear (); DoFTools::make_hanging_node_constraints (dof_handler, - hanging_node_constraints); + hanging_node_constraints); hanging_node_constraints.close (); sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); hanging_node_constraints.condense (sparsity_pattern); sparsity_pattern.compress(); @@ -789,27 +789,27 @@ namespace Step7 } - // @sect4{HelmholtzProblem::assemble_system} - - // Assembling the system of equations - // for the problem at hand is mostly - // as for the example programs - // before. However, some things have - // changed anyway, so we comment on - // this function fairly extensively. - // - // At the top of the function you will find - // the usual assortment of variable - // declarations. Compared to previous - // programs, of importance is only that we - // expect to solve problems also with - // bi-quadratic elements and therefore have - // to use sufficiently accurate quadrature - // formula. In addition, we need to compute - // integrals over faces, i.e. dim-1 - // dimensional objects. The declaration of a - // face quadrature formula is then - // straightforward: + // @sect4{HelmholtzProblem::assemble_system} + + // Assembling the system of equations + // for the problem at hand is mostly + // as for the example programs + // before. However, some things have + // changed anyway, so we comment on + // this function fairly extensively. + // + // At the top of the function you will find + // the usual assortment of variable + // declarations. Compared to previous + // programs, of importance is only that we + // expect to solve problems also with + // bi-quadratic elements and therefore have + // to use sufficiently accurate quadrature + // formula. In addition, we need to compute + // integrals over faces, i.e. dim-1 + // dimensional objects. The declaration of a + // face quadrature formula is then + // straightforward: template void HelmholtzProblem::assemble_system () { @@ -826,272 +826,272 @@ namespace Step7 std::vector local_dof_indices (dofs_per_cell); - // Then we need objects which can - // evaluate the values, gradients, - // etc of the shape functions at - // the quadrature points. While it - // seems that it should be feasible - // to do it with one object for - // both domain and face integrals, - // there is a subtle difference - // since the weights in the domain - // integrals include the measure of - // the cell in the domain, while - // the face integral quadrature - // requires the measure of the face - // in a lower-dimensional - // manifold. Internally these two - // classes are rooted in a common - // base class which does most of - // the work and offers the same - // interface to both domain and - // interface integrals. - // - // For the domain integrals in the - // bilinear form for Helmholtz's - // equation, we need to compute the - // values and gradients, as well as - // the weights at the quadrature - // points. Furthermore, we need the - // quadrature points on the real - // cell (rather than on the unit - // cell) to evaluate the right hand - // side function. The object we use - // to get at this information is - // the FEValues class discussed - // previously. - // - // For the face integrals, we only - // need the values of the shape - // functions, as well as the - // weights. We also need the normal - // vectors and quadrature points on - // the real cell since we want to - // determine the Neumann values - // from the exact solution object - // (see below). The class that gives - // us this information is called - // FEFaceValues: + // Then we need objects which can + // evaluate the values, gradients, + // etc of the shape functions at + // the quadrature points. While it + // seems that it should be feasible + // to do it with one object for + // both domain and face integrals, + // there is a subtle difference + // since the weights in the domain + // integrals include the measure of + // the cell in the domain, while + // the face integral quadrature + // requires the measure of the face + // in a lower-dimensional + // manifold. Internally these two + // classes are rooted in a common + // base class which does most of + // the work and offers the same + // interface to both domain and + // interface integrals. + // + // For the domain integrals in the + // bilinear form for Helmholtz's + // equation, we need to compute the + // values and gradients, as well as + // the weights at the quadrature + // points. Furthermore, we need the + // quadrature points on the real + // cell (rather than on the unit + // cell) to evaluate the right hand + // side function. The object we use + // to get at this information is + // the FEValues class discussed + // previously. + // + // For the face integrals, we only + // need the values of the shape + // functions, as well as the + // weights. We also need the normal + // vectors and quadrature points on + // the real cell since we want to + // determine the Neumann values + // from the exact solution object + // (see below). The class that gives + // us this information is called + // FEFaceValues: FEValues fe_values (*fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); FEFaceValues fe_face_values (*fe, face_quadrature_formula, - update_values | update_quadrature_points | - update_normal_vectors | update_JxW_values); - - // Then we need some objects - // already known from previous - // examples: An object denoting the - // right hand side function, its - // values at the quadrature points - // on a cell, the cell matrix and - // right hand side, and the indices - // of the degrees of freedom on a - // cell. - // - // Note that the operations we will do with - // the right hand side object are only - // querying data, never changing the - // object. We can therefore declare it - // const: + update_values | update_quadrature_points | + update_normal_vectors | update_JxW_values); + + // Then we need some objects + // already known from previous + // examples: An object denoting the + // right hand side function, its + // values at the quadrature points + // on a cell, the cell matrix and + // right hand side, and the indices + // of the degrees of freedom on a + // cell. + // + // Note that the operations we will do with + // the right hand side object are only + // querying data, never changing the + // object. We can therefore declare it + // const: const RightHandSide right_hand_side; std::vector rhs_values (n_q_points); - // Finally we define an object - // denoting the exact solution - // function. We will use it to - // compute the Neumann values at - // the boundary from it. Usually, - // one would of course do so using - // a separate object, in particular - // since the exact solution is generally - // unknown while the Neumann values - // are prescribed. We will, - // however, be a little bit lazy - // and use what we already have in - // information. Real-life programs - // would to go other ways here, of - // course. + // Finally we define an object + // denoting the exact solution + // function. We will use it to + // compute the Neumann values at + // the boundary from it. Usually, + // one would of course do so using + // a separate object, in particular + // since the exact solution is generally + // unknown while the Neumann values + // are prescribed. We will, + // however, be a little bit lazy + // and use what we already have in + // information. Real-life programs + // would to go other ways here, of + // course. const Solution exact_solution; - // Now for the main loop over all - // cells. This is mostly unchanged - // from previous examples, so we - // only comment on the things that - // have changed. + // Now for the main loop over all + // cells. This is mostly unchanged + // from previous examples, so we + // only comment on the things that + // have changed. typename DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), endc = dof_handler.end(); for (; cell!=endc; ++cell) { - cell_matrix = 0; - cell_rhs = 0; - - fe_values.reinit (cell); - - right_hand_side.value_list (fe_values.get_quadrature_points(), - rhs_values); - - for (unsigned int q_point=0; q_point1, - // which is the value that we - // have assigned to that - // portions of the boundary - // composing Gamma2 in the - // run() function further - // below. (The - // default value of boundary - // indicators is 0, so faces - // can only have an indicator - // equal to 1 if we have - // explicitly set it.) - for (unsigned int face=0; face::faces_per_cell; ++face) - if (cell->face(face)->at_boundary() - && - (cell->face(face)->boundary_indicator() == 1)) - { - // If we came into here, - // then we have found an - // external face - // belonging to - // Gamma2. Next, we have - // to compute the values - // of the shape functions - // and the other - // quantities which we - // will need for the - // computation of the - // contour integral. This - // is done using the - // reinit function - // which we already know - // from the FEValue - // class: - fe_face_values.reinit (cell, face); - - // And we can then - // perform the - // integration by using a - // loop over all - // quadrature points. - // - // On each quadrature point, we - // first compute the value of the - // normal derivative. We do so - // using the gradient of the - // exact solution and the normal - // vector to the face at the - // present quadrature point - // obtained from the - // fe_face_values - // object. This is then used to - // compute the additional - // contribution of this face to - // the right hand side: - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - for (unsigned int i=0; i1, + // which is the value that we + // have assigned to that + // portions of the boundary + // composing Gamma2 in the + // run() function further + // below. (The + // default value of boundary + // indicators is 0, so faces + // can only have an indicator + // equal to 1 if we have + // explicitly set it.) + for (unsigned int face=0; face::faces_per_cell; ++face) + if (cell->face(face)->at_boundary() + && + (cell->face(face)->boundary_indicator() == 1)) + { + // If we came into here, + // then we have found an + // external face + // belonging to + // Gamma2. Next, we have + // to compute the values + // of the shape functions + // and the other + // quantities which we + // will need for the + // computation of the + // contour integral. This + // is done using the + // reinit function + // which we already know + // from the FEValue + // class: + fe_face_values.reinit (cell, face); + + // And we can then + // perform the + // integration by using a + // loop over all + // quadrature points. + // + // On each quadrature point, we + // first compute the value of the + // normal derivative. We do so + // using the gradient of the + // exact solution and the normal + // vector to the face at the + // present quadrature point + // obtained from the + // fe_face_values + // object. This is then used to + // compute the additional + // contribution of this face to + // the right hand side: + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + for (unsigned int i=0; iinterpolate_boundary_values) - // does not represent the whole - // boundary any more. Rather, it is - // that portion of the boundary - // which we have not assigned - // another indicator (see - // below). The degrees of freedom - // at the boundary that do not - // belong to Gamma1 are therefore - // excluded from the interpolation - // of boundary values, just as - // we want. + // Likewise, elimination and treatment of + // boundary values has been shown + // previously. + // + // We note, however that now + // the boundary indicator for which + // we interpolate boundary values + // (denoted by the second parameter + // to + // interpolate_boundary_values) + // does not represent the whole + // boundary any more. Rather, it is + // that portion of the boundary + // which we have not assigned + // another indicator (see + // below). The degrees of freedom + // at the boundary that do not + // belong to Gamma1 are therefore + // excluded from the interpolation + // of boundary values, just as + // we want. hanging_node_constraints.condense (system_matrix); hanging_node_constraints.condense (system_rhs); std::map boundary_values; VectorTools::interpolate_boundary_values (dof_handler, - 0, - Solution(), - boundary_values); + 0, + Solution(), + boundary_values); MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); + system_matrix, + solution, + system_rhs); } - // @sect4{HelmholtzProblem::solve} + // @sect4{HelmholtzProblem::solve} - // Solving the system of equations is - // done in the same way as before: + // Solving the system of equations is + // done in the same way as before: template void HelmholtzProblem::solve () { @@ -1102,255 +1102,255 @@ namespace Step7 preconditioner.initialize(system_matrix, 1.2); cg.solve (system_matrix, solution, system_rhs, - preconditioner); + preconditioner); hanging_node_constraints.distribute (solution); } - // @sect4{HelmholtzProblem::refine_grid} - - // Now for the function doing grid - // refinement. Depending on the - // refinement mode passed to the - // constructor, we do global or - // adaptive refinement. - // - // Global refinement is simple, - // so there is - // not much to comment on. - // In case of adaptive - // refinement, we use the same - // functions and classes as in - // the previous example - // program. Note that one - // could treat Neumann - // boundaries differently than - // Dirichlet boundaries, and - // one should in fact do so - // here since we have Neumann - // boundary conditions on part - // of the boundaries, but - // since we don't have a - // function here that - // describes the Neumann - // values (we only construct - // these values from the exact - // solution when assembling - // the matrix), we omit this - // detail even though they would - // not be hard to add. - // - // At the end of the switch, we have a - // default case that looks slightly strange: - // an Assert statement with a false - // condition. Since the Assert macro - // raises an error whenever the condition is - // false, this means that whenever we hit - // this statement the program will be - // aborted. This in intentional: Right now we - // have only implemented two refinement - // strategies (global and adaptive), but - // someone might want to add a third strategy - // (for example adaptivity with a different - // refinement criterion) and add a third - // member to the enumeration that determines - // the refinement mode. If it weren't for the - // default case of the switch statement, this - // function would simply run to its end - // without doing anything. This is most - // likely not what was intended. One of the - // defensive programming techniques that you - // will find all over the deal.II library is - // therefore to always have default cases - // that abort, to make sure that values not - // considered when listing the cases in the - // switch statement are eventually caught, - // and forcing programmers to add code to - // handle them. We will use this same - // technique in other places further down as - // well. + // @sect4{HelmholtzProblem::refine_grid} + + // Now for the function doing grid + // refinement. Depending on the + // refinement mode passed to the + // constructor, we do global or + // adaptive refinement. + // + // Global refinement is simple, + // so there is + // not much to comment on. + // In case of adaptive + // refinement, we use the same + // functions and classes as in + // the previous example + // program. Note that one + // could treat Neumann + // boundaries differently than + // Dirichlet boundaries, and + // one should in fact do so + // here since we have Neumann + // boundary conditions on part + // of the boundaries, but + // since we don't have a + // function here that + // describes the Neumann + // values (we only construct + // these values from the exact + // solution when assembling + // the matrix), we omit this + // detail even though they would + // not be hard to add. + // + // At the end of the switch, we have a + // default case that looks slightly strange: + // an Assert statement with a false + // condition. Since the Assert macro + // raises an error whenever the condition is + // false, this means that whenever we hit + // this statement the program will be + // aborted. This in intentional: Right now we + // have only implemented two refinement + // strategies (global and adaptive), but + // someone might want to add a third strategy + // (for example adaptivity with a different + // refinement criterion) and add a third + // member to the enumeration that determines + // the refinement mode. If it weren't for the + // default case of the switch statement, this + // function would simply run to its end + // without doing anything. This is most + // likely not what was intended. One of the + // defensive programming techniques that you + // will find all over the deal.II library is + // therefore to always have default cases + // that abort, to make sure that values not + // considered when listing the cases in the + // switch statement are eventually caught, + // and forcing programmers to add code to + // handle them. We will use this same + // technique in other places further down as + // well. template void HelmholtzProblem::refine_grid () { switch (refinement_mode) { - case global_refinement: - { - triangulation.refine_global (1); - break; - } - - case adaptive_refinement: - { - Vector estimated_error_per_cell (triangulation.n_active_cells()); - - typename FunctionMap::type neumann_boundary; - KellyErrorEstimator::estimate (dof_handler, - QGauss(3), - neumann_boundary, - solution, - estimated_error_per_cell); - - GridRefinement::refine_and_coarsen_fixed_number (triangulation, - estimated_error_per_cell, - 0.3, 0.03); - - triangulation.execute_coarsening_and_refinement (); - - break; - } - - default: - { - Assert (false, ExcNotImplemented()); - } + case global_refinement: + { + triangulation.refine_global (1); + break; + } + + case adaptive_refinement: + { + Vector estimated_error_per_cell (triangulation.n_active_cells()); + + typename FunctionMap::type neumann_boundary; + KellyErrorEstimator::estimate (dof_handler, + QGauss(3), + neumann_boundary, + solution, + estimated_error_per_cell); + + GridRefinement::refine_and_coarsen_fixed_number (triangulation, + estimated_error_per_cell, + 0.3, 0.03); + + triangulation.execute_coarsening_and_refinement (); + + break; + } + + default: + { + Assert (false, ExcNotImplemented()); + } } } - // @sect4{HelmholtzProblem::process_solution} + // @sect4{HelmholtzProblem::process_solution} - // Finally we want to process the solution - // after it has been computed. For this, we - // integrate the error in various norms, and - // we generate tables that will later be used - // to display the convergence against the - // continuous solution in a nice format. + // Finally we want to process the solution + // after it has been computed. For this, we + // integrate the error in various norms, and + // we generate tables that will later be used + // to display the convergence against the + // continuous solution in a nice format. template void HelmholtzProblem::process_solution (const unsigned int cycle) { - // Our first task is to compute - // error norms. In order to integrate - // the difference between computed - // numerical solution and the - // continuous solution (described - // by the Solution class - // defined at the top of this - // file), we first need a vector - // that will hold the norm of the - // error on each cell. Since - // accuracy with 16 digits is not - // so important for these - // quantities, we save some memory - // by using float instead of - // double values. - // - // The next step is to use a function - // from the library which computes the - // error in the L2 norm on each cell. - // We have to pass it the DoF handler - // object, the vector holding the - // nodal values of the numerical - // solution, the continuous - // solution as a function object, - // the vector into which it shall - // place the norm of the error on - // each cell, a quadrature rule by - // which this norm shall be - // computed, and the type of norm - // to be used. Here, we use a Gauss - // formula with three points in - // each space direction, and - // compute the L2 norm. - // - // Finally, we want to get the - // global L2 norm. This can of - // course be obtained by summing - // the squares of the norms on each - // cell, and taking the square root - // of that value. This is - // equivalent to taking the l2 - // (lower case l) norm of the - // vector of norms on each cell: + // Our first task is to compute + // error norms. In order to integrate + // the difference between computed + // numerical solution and the + // continuous solution (described + // by the Solution class + // defined at the top of this + // file), we first need a vector + // that will hold the norm of the + // error on each cell. Since + // accuracy with 16 digits is not + // so important for these + // quantities, we save some memory + // by using float instead of + // double values. + // + // The next step is to use a function + // from the library which computes the + // error in the L2 norm on each cell. + // We have to pass it the DoF handler + // object, the vector holding the + // nodal values of the numerical + // solution, the continuous + // solution as a function object, + // the vector into which it shall + // place the norm of the error on + // each cell, a quadrature rule by + // which this norm shall be + // computed, and the type of norm + // to be used. Here, we use a Gauss + // formula with three points in + // each space direction, and + // compute the L2 norm. + // + // Finally, we want to get the + // global L2 norm. This can of + // course be obtained by summing + // the squares of the norms on each + // cell, and taking the square root + // of that value. This is + // equivalent to taking the l2 + // (lower case l) norm of the + // vector of norms on each cell: Vector difference_per_cell (triangulation.n_active_cells()); VectorTools::integrate_difference (dof_handler, - solution, - Solution(), - difference_per_cell, - QGauss(3), - VectorTools::L2_norm); + solution, + Solution(), + difference_per_cell, + QGauss(3), + VectorTools::L2_norm); const double L2_error = difference_per_cell.l2_norm(); - // By same procedure we get the H1 - // semi-norm. We re-use the - // difference_per_cell vector since it - // is no longer used after computing the - // L2_error variable above. + // By same procedure we get the H1 + // semi-norm. We re-use the + // difference_per_cell vector since it + // is no longer used after computing the + // L2_error variable above. VectorTools::integrate_difference (dof_handler, - solution, - Solution(), - difference_per_cell, - QGauss(3), - VectorTools::H1_seminorm); + solution, + Solution(), + difference_per_cell, + QGauss(3), + VectorTools::H1_seminorm); const double H1_error = difference_per_cell.l2_norm(); - // Finally, we compute the maximum - // norm. Of course, we can't - // actually compute the true maximum, - // but only the maximum at the - // quadrature points. Since this - // depends quite sensitively on the - // quadrature rule being used, and - // since we would like to avoid - // false results due to - // super-convergence effects at - // some points, we use a special - // quadrature rule that is obtained - // by iterating the trapezoidal - // rule five times in each space - // direction. Note that the - // constructor of the QIterated - // class takes a one-dimensional - // quadrature rule and a number - // that tells it how often it shall - // use this rule in each space - // direction. - // - // Using this special quadrature rule, we - // can then try to find the maximal error - // on each cell. Finally, we compute the - // global L infinity error from the L - // infinite errors on each cell. Instead of - // summing squares, we now have to take the - // maximum value over all cell-wise - // entries, an operation that is - // conveniently done using the - // Vector::linfty() function: + // Finally, we compute the maximum + // norm. Of course, we can't + // actually compute the true maximum, + // but only the maximum at the + // quadrature points. Since this + // depends quite sensitively on the + // quadrature rule being used, and + // since we would like to avoid + // false results due to + // super-convergence effects at + // some points, we use a special + // quadrature rule that is obtained + // by iterating the trapezoidal + // rule five times in each space + // direction. Note that the + // constructor of the QIterated + // class takes a one-dimensional + // quadrature rule and a number + // that tells it how often it shall + // use this rule in each space + // direction. + // + // Using this special quadrature rule, we + // can then try to find the maximal error + // on each cell. Finally, we compute the + // global L infinity error from the L + // infinite errors on each cell. Instead of + // summing squares, we now have to take the + // maximum value over all cell-wise + // entries, an operation that is + // conveniently done using the + // Vector::linfty() function: const QTrapez<1> q_trapez; const QIterated q_iterated (q_trapez, 5); VectorTools::integrate_difference (dof_handler, - solution, - Solution(), - difference_per_cell, - q_iterated, - VectorTools::Linfty_norm); + solution, + Solution(), + difference_per_cell, + q_iterated, + VectorTools::Linfty_norm); const double Linfty_error = difference_per_cell.linfty_norm(); - // After all these errors have been - // computed, we finally write some - // output. In addition, we add the - // important data to the - // TableHandler by specifying - // the key of the column and the value. - // Note that it is not necessary to - // define column keys beforehand -- it is - // sufficient to just add values, - // and columns will be - // introduced into the table in the - // order values are added the - // first time. + // After all these errors have been + // computed, we finally write some + // output. In addition, we add the + // important data to the + // TableHandler by specifying + // the key of the column and the value. + // Note that it is not necessary to + // define column keys beforehand -- it is + // sufficient to just add values, + // and columns will be + // introduced into the table in the + // order values are added the + // first time. const unsigned int n_active_cells=triangulation.n_active_cells(); const unsigned int n_dofs=dof_handler.n_dofs(); std::cout << "Cycle " << cycle << ':' - << std::endl - << " Number of active cells: " - << n_active_cells - << std::endl - << " Number of degrees of freedom: " - << n_dofs - << std::endl; + << std::endl + << " Number of active cells: " + << n_active_cells + << std::endl + << " Number of degrees of freedom: " + << n_dofs + << std::endl; convergence_table.add_value("cycle", cycle); convergence_table.add_value("cells", n_active_cells); @@ -1361,196 +1361,196 @@ namespace Step7 } - // @sect4{HelmholtzProblem::run} - - // As in previous example programs, - // the run function controls the - // flow of execution. The basic - // layout is as in previous examples: - // an outer loop over successively - // refined grids, and in this loop - // first problem setup, assembling - // the linear system, solution, and - // post-processing. - // - // The first task in the main loop is - // creation and refinement of - // grids. This is as in previous - // examples, with the only difference - // that we want to have part of the - // boundary marked as Neumann type, - // rather than Dirichlet. - // - // For this, we will use the - // following convention: Faces - // belonging to Gamma1 will have the - // boundary indicator 0 - // (which is the default, so we don't - // have to set it explicitely), and - // faces belonging to Gamma2 will use - // 1 as boundary - // indicator. To set these values, - // we loop over all cells, then over - // all faces of a given cell, check - // whether it is part of the boundary - // that we want to denote by Gamma2, - // and if so set its boundary - // indicator to 1. For - // the present program, we consider - // the left and bottom boundaries as - // Gamma2. We determine whether a - // face is part of that boundary by - // asking whether the x or y - // coordinates (i.e. vector - // components 0 and 1) of the - // midpoint of a face equals -1, up - // to some small wiggle room that we - // have to give since it is instable - // to compare floating point numbers - // that are subject to round off in - // intermediate computations. - // - // It is worth noting that we have to - // loop over all cells here, not only - // the active ones. The reason is - // that upon refinement, newly - // created faces inherit the boundary - // indicator of their parent face. If - // we now only set the boundary - // indicator for active faces, - // coarsen some cells and refine them - // later on, they will again have the - // boundary indicator of the parent - // cell which we have not modified, - // instead of the one we - // intended. Consequently, we have to - // change the boundary indicators of - // faces of all cells on Gamma2, - // whether they are active or not. - // Alternatively, we could of course - // have done this job on the coarsest - // mesh (i.e. before the first - // refinement step) and refined the - // mesh only after that. + // @sect4{HelmholtzProblem::run} + + // As in previous example programs, + // the run function controls the + // flow of execution. The basic + // layout is as in previous examples: + // an outer loop over successively + // refined grids, and in this loop + // first problem setup, assembling + // the linear system, solution, and + // post-processing. + // + // The first task in the main loop is + // creation and refinement of + // grids. This is as in previous + // examples, with the only difference + // that we want to have part of the + // boundary marked as Neumann type, + // rather than Dirichlet. + // + // For this, we will use the + // following convention: Faces + // belonging to Gamma1 will have the + // boundary indicator 0 + // (which is the default, so we don't + // have to set it explicitely), and + // faces belonging to Gamma2 will use + // 1 as boundary + // indicator. To set these values, + // we loop over all cells, then over + // all faces of a given cell, check + // whether it is part of the boundary + // that we want to denote by Gamma2, + // and if so set its boundary + // indicator to 1. For + // the present program, we consider + // the left and bottom boundaries as + // Gamma2. We determine whether a + // face is part of that boundary by + // asking whether the x or y + // coordinates (i.e. vector + // components 0 and 1) of the + // midpoint of a face equals -1, up + // to some small wiggle room that we + // have to give since it is instable + // to compare floating point numbers + // that are subject to round off in + // intermediate computations. + // + // It is worth noting that we have to + // loop over all cells here, not only + // the active ones. The reason is + // that upon refinement, newly + // created faces inherit the boundary + // indicator of their parent face. If + // we now only set the boundary + // indicator for active faces, + // coarsen some cells and refine them + // later on, they will again have the + // boundary indicator of the parent + // cell which we have not modified, + // instead of the one we + // intended. Consequently, we have to + // change the boundary indicators of + // faces of all cells on Gamma2, + // whether they are active or not. + // Alternatively, we could of course + // have done this job on the coarsest + // mesh (i.e. before the first + // refinement step) and refined the + // mesh only after that. template void HelmholtzProblem::run () { for (unsigned int cycle=0; cycle<7; ++cycle) { - if (cycle == 0) - { - GridGenerator::hyper_cube (triangulation, -1, 1); - triangulation.refine_global (1); - - typename Triangulation::cell_iterator - cell = triangulation.begin (), - endc = triangulation.end(); - for (; cell!=endc; ++cell) - for (unsigned int face=0; - face::faces_per_cell; - ++face) - if ((std::fabs(cell->face(face)->center()(0) - (-1)) < 1e-12) - || - (std::fabs(cell->face(face)->center()(1) - (-1)) < 1e-12)) - cell->face(face)->set_boundary_indicator (1); - } - else - refine_grid (); - - - // The next steps are already - // known from previous - // examples. This is mostly the - // basic set-up of every finite - // element program: - setup_system (); - - assemble_system (); - solve (); - - // The last step in this chain - // of function calls is usually - // the evaluation of the computed - // solution for the quantities - // one is interested in. This - // is done in the following - // function. Since the function - // generates output that indicates - // the number of the present - // refinement step, we pass this - // number as an argument. - process_solution (cycle); + if (cycle == 0) + { + GridGenerator::hyper_cube (triangulation, -1, 1); + triangulation.refine_global (1); + + typename Triangulation::cell_iterator + cell = triangulation.begin (), + endc = triangulation.end(); + for (; cell!=endc; ++cell) + for (unsigned int face=0; + face::faces_per_cell; + ++face) + if ((std::fabs(cell->face(face)->center()(0) - (-1)) < 1e-12) + || + (std::fabs(cell->face(face)->center()(1) - (-1)) < 1e-12)) + cell->face(face)->set_boundary_indicator (1); + } + else + refine_grid (); + + + // The next steps are already + // known from previous + // examples. This is mostly the + // basic set-up of every finite + // element program: + setup_system (); + + assemble_system (); + solve (); + + // The last step in this chain + // of function calls is usually + // the evaluation of the computed + // solution for the quantities + // one is interested in. This + // is done in the following + // function. Since the function + // generates output that indicates + // the number of the present + // refinement step, we pass this + // number as an argument. + process_solution (cycle); } - // @sect5{Output of graphical data} - - // After the last iteration we output the - // solution on the finest grid. This is - // done using the following sequence of - // statements which we have already - // discussed in previous examples. The - // first step is to generate a suitable - // filename (called gmv_filename here, - // since we want to output data in GMV - // format; we add the prefix to distinguish - // the filename from that used for other - // output files further down below). Here, - // we augment the name by the mesh - // refinement algorithm, and as above we - // make sure that we abort the program if - // another refinement method is added and - // not handled by the following switch - // statement: + // @sect5{Output of graphical data} + + // After the last iteration we output the + // solution on the finest grid. This is + // done using the following sequence of + // statements which we have already + // discussed in previous examples. The + // first step is to generate a suitable + // filename (called gmv_filename here, + // since we want to output data in GMV + // format; we add the prefix to distinguish + // the filename from that used for other + // output files further down below). Here, + // we augment the name by the mesh + // refinement algorithm, and as above we + // make sure that we abort the program if + // another refinement method is added and + // not handled by the following switch + // statement: std::string gmv_filename; switch (refinement_mode) { - case global_refinement: - gmv_filename = "solution-global"; - break; - case adaptive_refinement: - gmv_filename = "solution-adaptive"; - break; - default: - Assert (false, ExcNotImplemented()); + case global_refinement: + gmv_filename = "solution-global"; + break; + case adaptive_refinement: + gmv_filename = "solution-adaptive"; + break; + default: + Assert (false, ExcNotImplemented()); } - // We augment the filename by a postfix - // denoting the finite element which we - // have used in the computation. To this - // end, the finite element base class - // stores the maximal polynomial degree of - // shape functions in each coordinate - // variable as a variable degree, and - // we use for the switch statement (note - // that the polynomial degree of bilinear - // shape functions is really 2, since they - // contain the term x*y; however, the - // polynomial degree in each coordinate - // variable is still only 1). We again use - // the same defensive programming technique - // to safeguard against the case that the - // polynomial degree has an unexpected - // value, using the Assert (false, - // ExcNotImplemented()) idiom in the - // default branch of the switch statement: + // We augment the filename by a postfix + // denoting the finite element which we + // have used in the computation. To this + // end, the finite element base class + // stores the maximal polynomial degree of + // shape functions in each coordinate + // variable as a variable degree, and + // we use for the switch statement (note + // that the polynomial degree of bilinear + // shape functions is really 2, since they + // contain the term x*y; however, the + // polynomial degree in each coordinate + // variable is still only 1). We again use + // the same defensive programming technique + // to safeguard against the case that the + // polynomial degree has an unexpected + // value, using the Assert (false, + // ExcNotImplemented()) idiom in the + // default branch of the switch statement: switch (fe->degree) { - case 1: - gmv_filename += "-q1"; - break; - case 2: - gmv_filename += "-q2"; - break; - - default: - Assert (false, ExcNotImplemented()); + case 1: + gmv_filename += "-q1"; + break; + case 2: + gmv_filename += "-q2"; + break; + + default: + Assert (false, ExcNotImplemented()); } - // Once we have the base name for the - // output file, we add an extension - // appropriate for GMV output, open a file, - // and add the solution vector to the - // object that will do the actual output: + // Once we have the base name for the + // output file, we add an extension + // appropriate for GMV output, open a file, + // and add the solution vector to the + // object that will do the actual output: gmv_filename += ".gmv"; std::ofstream output (gmv_filename.c_str()); @@ -1558,92 +1558,92 @@ namespace Step7 data_out.attach_dof_handler (dof_handler); data_out.add_data_vector (solution, "solution"); - // Now building the intermediate - // format as before is the next - // step. We introduce one more - // feature of deal.II here. The - // background is the following: in - // some of the runs of this - // function, we have used - // biquadratic finite - // elements. However, since almost - // all output formats only support - // bilinear data, the data is - // written only bilinear, and - // information is consequently lost. - // Of course, we can't - // change the format in which - // graphic programs accept their - // inputs, but we can write the - // data differently such that we - // more closely resemble the - // information available in the - // quadratic approximation. We can, - // for example, write each cell as - // four sub-cells with bilinear data - // each, such that we have nine - // data points for each cell in the - // triangulation. The graphic - // programs will, of course, - // display this data still only - // bilinear, but at least we have - // given some more of the - // information we have. - // - // In order to allow writing more - // than one sub-cell per actual - // cell, the build_patches - // function accepts a parameter - // (the default is 1, which is - // why you haven't seen this - // parameter in previous - // examples). This parameter - // denotes into how many sub-cells - // per space direction each cell - // shall be subdivided for - // output. For example, if you give - // 2, this leads to 4 cells in - // 2D and 8 cells in 3D. For - // quadratic elements, two - // sub-cells per space direction is - // obviously the right choice, so - // this is what we choose. In - // general, for elements of - // polynomial order q, we use - // q subdivisions, and the - // order of the elements is - // determined in the same way as - // above. - // - // With the intermediate format - // so generated, we can then actually - // write the graphical output in GMV - // format: + // Now building the intermediate + // format as before is the next + // step. We introduce one more + // feature of deal.II here. The + // background is the following: in + // some of the runs of this + // function, we have used + // biquadratic finite + // elements. However, since almost + // all output formats only support + // bilinear data, the data is + // written only bilinear, and + // information is consequently lost. + // Of course, we can't + // change the format in which + // graphic programs accept their + // inputs, but we can write the + // data differently such that we + // more closely resemble the + // information available in the + // quadratic approximation. We can, + // for example, write each cell as + // four sub-cells with bilinear data + // each, such that we have nine + // data points for each cell in the + // triangulation. The graphic + // programs will, of course, + // display this data still only + // bilinear, but at least we have + // given some more of the + // information we have. + // + // In order to allow writing more + // than one sub-cell per actual + // cell, the build_patches + // function accepts a parameter + // (the default is 1, which is + // why you haven't seen this + // parameter in previous + // examples). This parameter + // denotes into how many sub-cells + // per space direction each cell + // shall be subdivided for + // output. For example, if you give + // 2, this leads to 4 cells in + // 2D and 8 cells in 3D. For + // quadratic elements, two + // sub-cells per space direction is + // obviously the right choice, so + // this is what we choose. In + // general, for elements of + // polynomial order q, we use + // q subdivisions, and the + // order of the elements is + // determined in the same way as + // above. + // + // With the intermediate format + // so generated, we can then actually + // write the graphical output in GMV + // format: data_out.build_patches (fe->degree); data_out.write_gmv (output); - // @sect5{Output of convergence tables} - - // After graphical output, we would also - // like to generate tables from the error - // computations we have done in - // process_solution. There, we have - // filled a table object with the number of - // cells for each refinement step as well - // as the errors in different norms. - - // For a nicer textual output of this data, - // one may want to set the precision with - // which the values will be written upon - // output. We use 3 digits for this, which - // is usually sufficient for error - // norms. By default, data is written in - // fixed point notation. However, for - // columns one would like to see in - // scientific notation another function - // call sets the scientific_flag to - // true, leading to floating point - // representation of numbers. + // @sect5{Output of convergence tables} + + // After graphical output, we would also + // like to generate tables from the error + // computations we have done in + // process_solution. There, we have + // filled a table object with the number of + // cells for each refinement step as well + // as the errors in different norms. + + // For a nicer textual output of this data, + // one may want to set the precision with + // which the values will be written upon + // output. We use 3 digits for this, which + // is usually sufficient for error + // norms. By default, data is written in + // fixed point notation. However, for + // columns one would like to see in + // scientific notation another function + // call sets the scientific_flag to + // true, leading to floating point + // representation of numbers. convergence_table.set_precision("L2", 3); convergence_table.set_precision("H1", 3); convergence_table.set_precision("Linfty", 3); @@ -1652,75 +1652,75 @@ namespace Step7 convergence_table.set_scientific("H1", true); convergence_table.set_scientific("Linfty", true); - // For the output of a table into a LaTeX - // file, the default captions of the - // columns are the keys given as argument - // to the add_value functions. To have - // TeX captions that differ from the - // default ones you can specify them by the - // following function calls. - // Note, that `\\' is reduced to - // `\' by the compiler such that the - // real TeX caption is, e.g., - // `$L^\infty$-error'. + // For the output of a table into a LaTeX + // file, the default captions of the + // columns are the keys given as argument + // to the add_value functions. To have + // TeX captions that differ from the + // default ones you can specify them by the + // following function calls. + // Note, that `\\' is reduced to + // `\' by the compiler such that the + // real TeX caption is, e.g., + // `$L^\infty$-error'. convergence_table.set_tex_caption("cells", "\\# cells"); convergence_table.set_tex_caption("dofs", "\\# dofs"); convergence_table.set_tex_caption("L2", "$L^2$-error"); convergence_table.set_tex_caption("H1", "$H^1$-error"); convergence_table.set_tex_caption("Linfty", "$L^\\infty$-error"); - // Finally, the default LaTeX format for - // each column of the table is `c' - // (centered). To specify a different - // (e.g. `right') one, the following - // function may be used: + // Finally, the default LaTeX format for + // each column of the table is `c' + // (centered). To specify a different + // (e.g. `right') one, the following + // function may be used: convergence_table.set_tex_format("cells", "r"); convergence_table.set_tex_format("dofs", "r"); - // After this, we can finally write the - // table to the standard output stream - // std::cout (after one extra empty - // line, to make things look - // prettier). Note, that the output in text - // format is quite simple and that - // captions may not be printed directly - // above the specific columns. + // After this, we can finally write the + // table to the standard output stream + // std::cout (after one extra empty + // line, to make things look + // prettier). Note, that the output in text + // format is quite simple and that + // captions may not be printed directly + // above the specific columns. std::cout << std::endl; convergence_table.write_text(std::cout); - // The table can also be written - // into a LaTeX file. The (nicely) - // formatted table can be viewed at - // after calling `latex filename' - // and e.g. `xdvi filename', where - // filename is the name of the file - // to which we will write output - // now. We construct the file name - // in the same way as before, but - // with a different prefix "error": + // The table can also be written + // into a LaTeX file. The (nicely) + // formatted table can be viewed at + // after calling `latex filename' + // and e.g. `xdvi filename', where + // filename is the name of the file + // to which we will write output + // now. We construct the file name + // in the same way as before, but + // with a different prefix "error": std::string error_filename = "error"; switch (refinement_mode) { - case global_refinement: - error_filename += "-global"; - break; - case adaptive_refinement: - error_filename += "-adaptive"; - break; - default: - Assert (false, ExcNotImplemented()); + case global_refinement: + error_filename += "-global"; + break; + case adaptive_refinement: + error_filename += "-adaptive"; + break; + default: + Assert (false, ExcNotImplemented()); } switch (fe->degree) { - case 1: - error_filename += "-q1"; - break; - case 2: - error_filename += "-q2"; - break; - default: - Assert (false, ExcNotImplemented()); + case 1: + error_filename += "-q1"; + break; + case 2: + error_filename += "-q2"; + break; + default: + Assert (false, ExcNotImplemented()); } error_filename += ".tex"; @@ -1729,156 +1729,156 @@ namespace Step7 convergence_table.write_tex(error_table_file); - // @sect5{Further table manipulations} - - // In case of global refinement, it - // might be of interest to also - // output the convergence - // rates. This may be done by the - // functionality the - // ConvergenceTable offers over - // the regular - // TableHandler. However, we do - // it only for global refinement, - // since for adaptive refinement - // the determination of something - // like an order of convergence is - // somewhat more involved. While we - // are at it, we also show a few - // other things that can be done - // with tables. + // @sect5{Further table manipulations} + + // In case of global refinement, it + // might be of interest to also + // output the convergence + // rates. This may be done by the + // functionality the + // ConvergenceTable offers over + // the regular + // TableHandler. However, we do + // it only for global refinement, + // since for adaptive refinement + // the determination of something + // like an order of convergence is + // somewhat more involved. While we + // are at it, we also show a few + // other things that can be done + // with tables. if (refinement_mode==global_refinement) { - // The first thing is that one - // can group individual columns - // together to form so-called - // super columns. Essentially, - // the columns remain the same, - // but the ones that were - // grouped together will get a - // caption running across all - // columns in a group. For - // example, let's merge the - // "cycle" and "cells" columns - // into a super column named "n - // cells": - convergence_table.add_column_to_supercolumn("cycle", "n cells"); - convergence_table.add_column_to_supercolumn("cells", "n cells"); - - // Next, it isn't necessary to - // always output all columns, - // or in the order in which - // they were originally added - // during the run. Selecting - // and re-ordering the columns - // works as follows (note that - // this includes super - // columns): - std::vector new_order; - new_order.push_back("n cells"); - new_order.push_back("H1"); - new_order.push_back("L2"); - convergence_table.set_column_order (new_order); - - // For everything that happened - // to the ConvergenceTable - // until this point, it would - // have been sufficient to use - // a simple - // TableHandler. Indeed, the - // ConvergenceTable is - // derived from the - // TableHandler but it offers - // the additional functionality - // of automatically evaluating - // convergence rates. For - // example, here is how we can - // let the table compute - // reduction and convergence - // rates (convergence rates are - // the binary logarithm of the - // reduction rate): - convergence_table - .evaluate_convergence_rates("L2", ConvergenceTable::reduction_rate); - convergence_table - .evaluate_convergence_rates("L2", ConvergenceTable::reduction_rate_log2); - convergence_table - .evaluate_convergence_rates("H1", ConvergenceTable::reduction_rate_log2); - // Each of these - // function calls produces an - // additional column that is - // merged with the original - // column (in our example the - // `L2' and the `H1' column) to - // a supercolumn. - - // Finally, we want to write - // this convergence chart - // again, first to the screen - // and then, in LaTeX format, - // to disk. The filename is - // again constructed as above. - std::cout << std::endl; - convergence_table.write_text(std::cout); - - std::string conv_filename = "convergence"; - switch (refinement_mode) - { - case global_refinement: - conv_filename += "-global"; - break; - case adaptive_refinement: - conv_filename += "-adaptive"; - break; - default: - Assert (false, ExcNotImplemented()); - } - switch (fe->degree) - { - case 1: - conv_filename += "-q1"; - break; - case 2: - conv_filename += "-q2"; - break; - default: - Assert (false, ExcNotImplemented()); - } - conv_filename += ".tex"; - - std::ofstream table_file(conv_filename.c_str()); - convergence_table.write_tex(table_file); + // The first thing is that one + // can group individual columns + // together to form so-called + // super columns. Essentially, + // the columns remain the same, + // but the ones that were + // grouped together will get a + // caption running across all + // columns in a group. For + // example, let's merge the + // "cycle" and "cells" columns + // into a super column named "n + // cells": + convergence_table.add_column_to_supercolumn("cycle", "n cells"); + convergence_table.add_column_to_supercolumn("cells", "n cells"); + + // Next, it isn't necessary to + // always output all columns, + // or in the order in which + // they were originally added + // during the run. Selecting + // and re-ordering the columns + // works as follows (note that + // this includes super + // columns): + std::vector new_order; + new_order.push_back("n cells"); + new_order.push_back("H1"); + new_order.push_back("L2"); + convergence_table.set_column_order (new_order); + + // For everything that happened + // to the ConvergenceTable + // until this point, it would + // have been sufficient to use + // a simple + // TableHandler. Indeed, the + // ConvergenceTable is + // derived from the + // TableHandler but it offers + // the additional functionality + // of automatically evaluating + // convergence rates. For + // example, here is how we can + // let the table compute + // reduction and convergence + // rates (convergence rates are + // the binary logarithm of the + // reduction rate): + convergence_table + .evaluate_convergence_rates("L2", ConvergenceTable::reduction_rate); + convergence_table + .evaluate_convergence_rates("L2", ConvergenceTable::reduction_rate_log2); + convergence_table + .evaluate_convergence_rates("H1", ConvergenceTable::reduction_rate_log2); + // Each of these + // function calls produces an + // additional column that is + // merged with the original + // column (in our example the + // `L2' and the `H1' column) to + // a supercolumn. + + // Finally, we want to write + // this convergence chart + // again, first to the screen + // and then, in LaTeX format, + // to disk. The filename is + // again constructed as above. + std::cout << std::endl; + convergence_table.write_text(std::cout); + + std::string conv_filename = "convergence"; + switch (refinement_mode) + { + case global_refinement: + conv_filename += "-global"; + break; + case adaptive_refinement: + conv_filename += "-adaptive"; + break; + default: + Assert (false, ExcNotImplemented()); + } + switch (fe->degree) + { + case 1: + conv_filename += "-q1"; + break; + case 2: + conv_filename += "-q2"; + break; + default: + Assert (false, ExcNotImplemented()); + } + conv_filename += ".tex"; + + std::ofstream table_file(conv_filename.c_str()); + convergence_table.write_tex(table_file); } } - // The final step before going to - // main() is then to close the - // namespace Step7 into which - // we have put everything we needed for - // this program: + // The final step before going to + // main() is then to close the + // namespace Step7 into which + // we have put everything we needed for + // this program: } - // @sect3{Main function} - - // The main function is mostly as - // before. The only difference is - // that we solve three times, once - // for Q1 and adaptive refinement, - // once for Q1 elements and global - // refinement, and once for Q2 - // elements and global refinement. - // - // Since we instantiate several - // template classes below for two - // space dimensions, we make this - // more generic by declaring a - // constant at the beginning of the - // function denoting the number of - // space dimensions. If you want to - // run the program in 1d or 2d, you - // will then only have to change this - // one instance, rather than all uses - // below: + // @sect3{Main function} + + // The main function is mostly as + // before. The only difference is + // that we solve three times, once + // for Q1 and adaptive refinement, + // once for Q1 elements and global + // refinement, and once for Q2 + // elements and global refinement. + // + // Since we instantiate several + // template classes below for two + // space dimensions, we make this + // more generic by declaring a + // constant at the beginning of the + // function denoting the number of + // space dimensions. If you want to + // run the program in 1d or 2d, you + // will then only have to change this + // one instance, rather than all uses + // below: int main () { const unsigned int dim = 2; @@ -1890,87 +1890,87 @@ int main () deallog.depth_console (0); - // Now for the three calls to - // the main class. Each call is - // blocked into curly braces in - // order to destroy the - // respective objects (i.e. the - // finite element and the - // HelmholtzProblem object) - // at the end of the block and - // before we go to the next - // run. This avoids conflicts - // with variable names, and - // also makes sure that memory - // is released immediately - // after one of the three runs - // has finished, and not only - // at the end of the try - // block. + // Now for the three calls to + // the main class. Each call is + // blocked into curly braces in + // order to destroy the + // respective objects (i.e. the + // finite element and the + // HelmholtzProblem object) + // at the end of the block and + // before we go to the next + // run. This avoids conflicts + // with variable names, and + // also makes sure that memory + // is released immediately + // after one of the three runs + // has finished, and not only + // at the end of the try + // block. { - std::cout << "Solving with Q1 elements, adaptive refinement" << std::endl - << "=============================================" << std::endl - << std::endl; + std::cout << "Solving with Q1 elements, adaptive refinement" << std::endl + << "=============================================" << std::endl + << std::endl; - FE_Q fe(1); - HelmholtzProblem - helmholtz_problem_2d (fe, HelmholtzProblem::adaptive_refinement); + FE_Q fe(1); + HelmholtzProblem + helmholtz_problem_2d (fe, HelmholtzProblem::adaptive_refinement); - helmholtz_problem_2d.run (); + helmholtz_problem_2d.run (); - std::cout << std::endl; + std::cout << std::endl; } { - std::cout << "Solving with Q1 elements, global refinement" << std::endl - << "===========================================" << std::endl - << std::endl; + std::cout << "Solving with Q1 elements, global refinement" << std::endl + << "===========================================" << std::endl + << std::endl; - FE_Q fe(1); - HelmholtzProblem - helmholtz_problem_2d (fe, HelmholtzProblem::global_refinement); + FE_Q fe(1); + HelmholtzProblem + helmholtz_problem_2d (fe, HelmholtzProblem::global_refinement); - helmholtz_problem_2d.run (); + helmholtz_problem_2d.run (); - std::cout << std::endl; + std::cout << std::endl; } { - std::cout << "Solving with Q2 elements, global refinement" << std::endl - << "===========================================" << std::endl - << std::endl; + std::cout << "Solving with Q2 elements, global refinement" << std::endl + << "===========================================" << std::endl + << std::endl; - FE_Q fe(2); - HelmholtzProblem - helmholtz_problem_2d (fe, HelmholtzProblem::global_refinement); + FE_Q fe(2); + HelmholtzProblem + helmholtz_problem_2d (fe, HelmholtzProblem::global_refinement); - helmholtz_problem_2d.run (); + helmholtz_problem_2d.run (); - std::cout << std::endl; + std::cout << std::endl; } } catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } @@ -1978,22 +1978,22 @@ int main () } - // What comes here is basically just - // an annoyance that you can ignore - // if you are not working on an AIX - // system: on this system, static - // member variables are not - // instantiated automatically when - // their enclosing class is - // instantiated. This leads to linker - // errors if these variables are not - // explicitly instantiated. As said, - // this is, strictly C++ standards - // speaking, not necessary, but it - // doesn't hurt either on other - // systems, and since it is necessary - // to get things running on AIX, why - // not do it: + // What comes here is basically just + // an annoyance that you can ignore + // if you are not working on an AIX + // system: on this system, static + // member variables are not + // instantiated automatically when + // their enclosing class is + // instantiated. This leads to linker + // errors if these variables are not + // explicitly instantiated. As said, + // this is, strictly C++ standards + // speaking, not necessary, but it + // doesn't hurt either on other + // systems, and since it is necessary + // to get things running on AIX, why + // not do it: namespace Step7 { template const double SolutionBase<2>::width; diff --git a/deal.II/examples/step-8/step-8.cc b/deal.II/examples/step-8/step-8.cc index 4f05ba00f7..37c3d8bdf7 100644 --- a/deal.II/examples/step-8/step-8.cc +++ b/deal.II/examples/step-8/step-8.cc @@ -11,9 +11,9 @@ // @sect3{Include files} - // As usual, the first few include - // files are already known, so we - // will not comment on them further. + // As usual, the first few include + // files are already known, so we + // will not comment on them further. #include #include #include @@ -38,54 +38,54 @@ #include #include - // In this example, we need - // vector-valued finite elements. The - // support for these can be found in - // the following include file: + // In this example, we need + // vector-valued finite elements. The + // support for these can be found in + // the following include file: #include - // We will compose the vector-valued - // finite elements from regular Q1 - // elements which can be found here, - // as usual: + // We will compose the vector-valued + // finite elements from regular Q1 + // elements which can be found here, + // as usual: #include - // This again is C++: + // This again is C++: #include #include - // The last step is as in previous - // programs. In particular, just like in - // step-7, we pack everything that's specific - // to this program into a namespace of its - // own. + // The last step is as in previous + // programs. In particular, just like in + // step-7, we pack everything that's specific + // to this program into a namespace of its + // own. namespace Step8 { using namespace dealii; - // @sect3{The ElasticProblem class template} - - // The main class is, except for its - // name, almost unchanged with - // respect to the step-6 example. - // - // The only change is the use of a - // different class for the fe - // variable: Instead of a concrete - // finite element class such as - // FE_Q, we now use a more - // generic one, FESystem. In - // fact, FESystem is not really a - // finite element itself in that it - // does not implement shape functions - // of its own. Rather, it is a class - // that can be used to stack several - // other elements together to form - // one vector-valued finite - // element. In our case, we will - // compose the vector-valued element - // of FE_Q(1) objects, as shown - // below in the constructor of this - // class. + // @sect3{The ElasticProblem class template} + + // The main class is, except for its + // name, almost unchanged with + // respect to the step-6 example. + // + // The only change is the use of a + // different class for the fe + // variable: Instead of a concrete + // finite element class such as + // FE_Q, we now use a more + // generic one, FESystem. In + // fact, FESystem is not really a + // finite element itself in that it + // does not implement shape functions + // of its own. Rather, it is a class + // that can be used to stack several + // other elements together to form + // one vector-valued finite + // element. In our case, we will + // compose the vector-valued element + // of FE_Q(1) objects, as shown + // below in the constructor of this + // class. template class ElasticProblem { @@ -116,185 +116,185 @@ namespace Step8 }; - // @sect3{Right hand side values} - - // Before going over to the - // implementation of the main class, - // we declare and define the class - // which describes the right hand - // side. This time, the right hand - // side is vector-valued, as is the - // solution, so we will describe the - // changes required for this in some - // more detail. - // - // The first thing is that - // vector-valued functions have to - // have a constructor, since they - // need to pass down to the base - // class of how many components the - // function consists. The default - // value in the constructor of the - // base class is one (i.e.: a scalar - // function), which is why we did not - // need not define a constructor for - // the scalar function used in - // previous programs. + // @sect3{Right hand side values} + + // Before going over to the + // implementation of the main class, + // we declare and define the class + // which describes the right hand + // side. This time, the right hand + // side is vector-valued, as is the + // solution, so we will describe the + // changes required for this in some + // more detail. + // + // The first thing is that + // vector-valued functions have to + // have a constructor, since they + // need to pass down to the base + // class of how many components the + // function consists. The default + // value in the constructor of the + // base class is one (i.e.: a scalar + // function), which is why we did not + // need not define a constructor for + // the scalar function used in + // previous programs. template class RightHandSide : public Function { public: RightHandSide (); - // The next change is that we - // want a replacement for the - // value function of the - // previous examples. There, a - // second parameter component - // was given, which denoted which - // component was requested. Here, - // we implement a function that - // returns the whole vector of - // values at the given place at - // once, in the second argument - // of the function. The obvious - // name for such a replacement - // function is vector_value. - // - // Secondly, in analogy to the - // value_list function, there - // is a function - // vector_value_list, which - // returns the values of the - // vector-valued function at - // several points at once: + // The next change is that we + // want a replacement for the + // value function of the + // previous examples. There, a + // second parameter component + // was given, which denoted which + // component was requested. Here, + // we implement a function that + // returns the whole vector of + // values at the given place at + // once, in the second argument + // of the function. The obvious + // name for such a replacement + // function is vector_value. + // + // Secondly, in analogy to the + // value_list function, there + // is a function + // vector_value_list, which + // returns the values of the + // vector-valued function at + // several points at once: virtual void vector_value (const Point &p, - Vector &values) const; + Vector &values) const; virtual void vector_value_list (const std::vector > &points, - std::vector > &value_list) const; + std::vector > &value_list) const; }; - // This is the constructor of the - // right hand side class. As said - // above, it only passes down to the - // base class the number of - // components, which is dim in - // the present case (one force - // component in each of the dim - // space directions). - // - // Some people would have moved the - // definition of such a short - // function right into the class - // declaration. We do not do that, as - // a matter of style: the deal.II - // style guides require that class - // declarations contain only - // declarations, and that definitions - // are always to be found - // outside. This is, obviously, as - // much as matter of taste as - // indentation, but we try to be - // consistent in this direction. + // This is the constructor of the + // right hand side class. As said + // above, it only passes down to the + // base class the number of + // components, which is dim in + // the present case (one force + // component in each of the dim + // space directions). + // + // Some people would have moved the + // definition of such a short + // function right into the class + // declaration. We do not do that, as + // a matter of style: the deal.II + // style guides require that class + // declarations contain only + // declarations, and that definitions + // are always to be found + // outside. This is, obviously, as + // much as matter of taste as + // indentation, but we try to be + // consistent in this direction. template RightHandSide::RightHandSide () - : - Function (dim) + : + Function (dim) {} - // Next the function that returns - // the whole vector of values at the - // point p at once. - // - // To prevent cases where the return - // vector has not previously been set - // to the right size we test for this - // case and otherwise throw an - // exception at the beginning of the - // function. Note that enforcing that - // output arguments already have the - // correct size is a convention in - // deal.II, and enforced almost - // everywhere. The reason is that we - // would otherwise have to check at - // the beginning of the function and - // possibly change the size of the - // output vector. This is expensive, - // and would almost always be - // unnecessary (the first call to the - // function would set the vector to - // the right size, and subsequent - // calls would only have to do - // redundant checks). In addition, - // checking and possibly resizing the - // vector is an operation that can - // not be removed if we can't rely on - // the assumption that the vector - // already has the correct size; this - // is in contract to the Assert - // call that is completely removed if - // the program is compiled in - // optimized mode. - // - // Likewise, if by some accident - // someone tried to compile and run - // the program in only one space - // dimension (in which the elastic - // equations do not make much sense - // since they reduce to the ordinary - // Laplace equation), we terminate - // the program in the second - // assertion. The program will work - // just fine in 3d, however. + // Next the function that returns + // the whole vector of values at the + // point p at once. + // + // To prevent cases where the return + // vector has not previously been set + // to the right size we test for this + // case and otherwise throw an + // exception at the beginning of the + // function. Note that enforcing that + // output arguments already have the + // correct size is a convention in + // deal.II, and enforced almost + // everywhere. The reason is that we + // would otherwise have to check at + // the beginning of the function and + // possibly change the size of the + // output vector. This is expensive, + // and would almost always be + // unnecessary (the first call to the + // function would set the vector to + // the right size, and subsequent + // calls would only have to do + // redundant checks). In addition, + // checking and possibly resizing the + // vector is an operation that can + // not be removed if we can't rely on + // the assumption that the vector + // already has the correct size; this + // is in contract to the Assert + // call that is completely removed if + // the program is compiled in + // optimized mode. + // + // Likewise, if by some accident + // someone tried to compile and run + // the program in only one space + // dimension (in which the elastic + // equations do not make much sense + // since they reduce to the ordinary + // Laplace equation), we terminate + // the program in the second + // assertion. The program will work + // just fine in 3d, however. template inline void RightHandSide::vector_value (const Point &p, - Vector &values) const + Vector &values) const { Assert (values.size() == dim, - ExcDimensionMismatch (values.size(), dim)); + ExcDimensionMismatch (values.size(), dim)); Assert (dim >= 2, ExcNotImplemented()); - // The rest of the function - // implements computing force - // values. We will use a constant - // (unit) force in x-direction - // located in two little circles - // (or spheres, in 3d) around - // points (0.5,0) and (-0.5,0), and - // y-force in an area around the - // origin; in 3d, the z-component - // of these centers is zero as - // well. - // - // For this, let us first define - // two objects that denote the - // centers of these areas. Note - // that upon construction of the - // Point objects, all - // components are set to zero. + // The rest of the function + // implements computing force + // values. We will use a constant + // (unit) force in x-direction + // located in two little circles + // (or spheres, in 3d) around + // points (0.5,0) and (-0.5,0), and + // y-force in an area around the + // origin; in 3d, the z-component + // of these centers is zero as + // well. + // + // For this, let us first define + // two objects that denote the + // centers of these areas. Note + // that upon construction of the + // Point objects, all + // components are set to zero. Point point_1, point_2; point_1(0) = 0.5; point_2(0) = -0.5; - // If now the point p is in a - // circle (sphere) of radius 0.2 - // around one of these points, then - // set the force in x-direction to - // one, otherwise to zero: + // If now the point p is in a + // circle (sphere) of radius 0.2 + // around one of these points, then + // set the force in x-direction to + // one, otherwise to zero: if (((p-point_1).square() < 0.2*0.2) || - ((p-point_2).square() < 0.2*0.2)) + ((p-point_2).square() < 0.2*0.2)) values(0) = 1; else values(0) = 0; - // Likewise, if p is in the - // vicinity of the origin, then set - // the y-force to 1, otherwise to - // zero: + // Likewise, if p is in the + // vicinity of the origin, then set + // the y-force to 1, otherwise to + // zero: if (p.square() < 0.2*0.2) values(1) = 1; else @@ -303,155 +303,155 @@ namespace Step8 - // Now, this is the function of the - // right hand side class that returns - // the values at several points at - // once. The function starts out with - // checking that the number of input - // and output arguments is equal (the - // sizes of the individual output - // vectors will be checked in the - // function that we call further down - // below). Next, we define an - // abbreviation for the number of - // points which we shall work on, to - // make some things simpler below. + // Now, this is the function of the + // right hand side class that returns + // the values at several points at + // once. The function starts out with + // checking that the number of input + // and output arguments is equal (the + // sizes of the individual output + // vectors will be checked in the + // function that we call further down + // below). Next, we define an + // abbreviation for the number of + // points which we shall work on, to + // make some things simpler below. template void RightHandSide::vector_value_list (const std::vector > &points, - std::vector > &value_list) const + std::vector > &value_list) const { Assert (value_list.size() == points.size(), - ExcDimensionMismatch (value_list.size(), points.size())); + ExcDimensionMismatch (value_list.size(), points.size())); const unsigned int n_points = points.size(); - // Finally we treat each of the - // points. In one of the previous - // examples, we have explained why - // the - // value_list/vector_value_list - // function had been introduced: to - // prevent us from calling virtual - // functions too frequently. On the - // other hand, we now need to - // implement the same function - // twice, which can lead to - // confusion if one function is - // changed but the other is - // not. - // - // We can prevent this situation by - // calling - // RightHandSide::vector_value - // on each point in the input - // list. Note that by giving the - // full name of the function, - // including the class name, we - // instruct the compiler to - // explicitly call this function, - // and not to use the virtual - // function call mechanism that - // would be used if we had just - // called vector_value. This is - // important, since the compiler - // generally can't make any - // assumptions which function is - // called when using virtual - // functions, and it therefore - // can't inline the called function - // into the site of the call. On - // the contrary, here we give the - // fully qualified name, which - // bypasses the virtual function - // call, and consequently the - // compiler knows exactly which - // function is called and will - // inline above function into the - // present location. (Note that we - // have declared the - // vector_value function above - // inline, though modern - // compilers are also able to - // inline functions even if they - // have not been declared as - // inline). - // - // It is worth noting why we go to - // such length explaining what we - // do. Using this construct, we - // manage to avoid any - // inconsistency: if we want to - // change the right hand side - // function, it would be difficult - // to always remember that we - // always have to change two - // functions in the same way. Using - // this forwarding mechanism, we - // only have to change a single - // place (the vector_value - // function), and the second place - // (the vector_value_list - // function) will always be - // consistent with it. At the same - // time, using virtual function - // call bypassing, the code is no - // less efficient than if we had - // written it twice in the first - // place: + // Finally we treat each of the + // points. In one of the previous + // examples, we have explained why + // the + // value_list/vector_value_list + // function had been introduced: to + // prevent us from calling virtual + // functions too frequently. On the + // other hand, we now need to + // implement the same function + // twice, which can lead to + // confusion if one function is + // changed but the other is + // not. + // + // We can prevent this situation by + // calling + // RightHandSide::vector_value + // on each point in the input + // list. Note that by giving the + // full name of the function, + // including the class name, we + // instruct the compiler to + // explicitly call this function, + // and not to use the virtual + // function call mechanism that + // would be used if we had just + // called vector_value. This is + // important, since the compiler + // generally can't make any + // assumptions which function is + // called when using virtual + // functions, and it therefore + // can't inline the called function + // into the site of the call. On + // the contrary, here we give the + // fully qualified name, which + // bypasses the virtual function + // call, and consequently the + // compiler knows exactly which + // function is called and will + // inline above function into the + // present location. (Note that we + // have declared the + // vector_value function above + // inline, though modern + // compilers are also able to + // inline functions even if they + // have not been declared as + // inline). + // + // It is worth noting why we go to + // such length explaining what we + // do. Using this construct, we + // manage to avoid any + // inconsistency: if we want to + // change the right hand side + // function, it would be difficult + // to always remember that we + // always have to change two + // functions in the same way. Using + // this forwarding mechanism, we + // only have to change a single + // place (the vector_value + // function), and the second place + // (the vector_value_list + // function) will always be + // consistent with it. At the same + // time, using virtual function + // call bypassing, the code is no + // less efficient than if we had + // written it twice in the first + // place: for (unsigned int p=0; p::vector_value (points[p], - value_list[p]); + value_list[p]); } - // @sect3{The ElasticProblem class implementation} - - // @sect4{ElasticProblem::ElasticProblem} - - // Following is the constructor of - // the main class. As said before, we - // would like to construct a - // vector-valued finite element that - // is composed of several scalar - // finite elements (i.e., we want to - // build the vector-valued element so - // that each of its vector components - // consists of the shape functions of - // a scalar element). Of course, the - // number of scalar finite elements we - // would like to stack together - // equals the number of components - // the solution function has, which - // is dim since we consider - // displacement in each space - // direction. The FESystem class - // can handle this: we pass it the - // finite element of which we would - // like to compose the system of, and - // how often it shall be repeated: + // @sect3{The ElasticProblem class implementation} + + // @sect4{ElasticProblem::ElasticProblem} + + // Following is the constructor of + // the main class. As said before, we + // would like to construct a + // vector-valued finite element that + // is composed of several scalar + // finite elements (i.e., we want to + // build the vector-valued element so + // that each of its vector components + // consists of the shape functions of + // a scalar element). Of course, the + // number of scalar finite elements we + // would like to stack together + // equals the number of components + // the solution function has, which + // is dim since we consider + // displacement in each space + // direction. The FESystem class + // can handle this: we pass it the + // finite element of which we would + // like to compose the system of, and + // how often it shall be repeated: template ElasticProblem::ElasticProblem () - : - dof_handler (triangulation), - fe (FE_Q(1), dim) + : + dof_handler (triangulation), + fe (FE_Q(1), dim) {} - // In fact, the FESystem class - // has several more constructors - // which can perform more complex - // operations than just stacking - // together several scalar finite - // elements of the same type into - // one; we will get to know these - // possibilities in later examples. + // In fact, the FESystem class + // has several more constructors + // which can perform more complex + // operations than just stacking + // together several scalar finite + // elements of the same type into + // one; we will get to know these + // possibilities in later examples. - // @sect4{ElasticProblem::~ElasticProblem} + // @sect4{ElasticProblem::~ElasticProblem} - // The destructor, on the other hand, - // is exactly as in step-6: + // The destructor, on the other hand, + // is exactly as in step-6: template ElasticProblem::~ElasticProblem () { @@ -459,40 +459,40 @@ namespace Step8 } - // @sect4{ElasticProblem::setup_system} - - // Setting up the system of equations - // is identitical to the function - // used in the step-6 example. The - // DoFHandler class and all other - // classes used here are fully aware - // that the finite element we want to - // use is vector-valued, and take - // care of the vector-valuedness of - // the finite element themselves. (In - // fact, they do not, but this does - // not need to bother you: since they - // only need to know how many degrees - // of freedom there are per vertex, - // line and cell, and they do not ask - // what they represent, i.e. whether - // the finite element under - // consideration is vector-valued or - // whether it is, for example, a - // scalar Hermite element with - // several degrees of freedom on each - // vertex). + // @sect4{ElasticProblem::setup_system} + + // Setting up the system of equations + // is identitical to the function + // used in the step-6 example. The + // DoFHandler class and all other + // classes used here are fully aware + // that the finite element we want to + // use is vector-valued, and take + // care of the vector-valuedness of + // the finite element themselves. (In + // fact, they do not, but this does + // not need to bother you: since they + // only need to know how many degrees + // of freedom there are per vertex, + // line and cell, and they do not ask + // what they represent, i.e. whether + // the finite element under + // consideration is vector-valued or + // whether it is, for example, a + // scalar Hermite element with + // several degrees of freedom on each + // vertex). template void ElasticProblem::setup_system () { dof_handler.distribute_dofs (fe); hanging_node_constraints.clear (); DoFTools::make_hanging_node_constraints (dof_handler, - hanging_node_constraints); + hanging_node_constraints); hanging_node_constraints.close (); sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); hanging_node_constraints.condense (sparsity_pattern); @@ -506,45 +506,45 @@ namespace Step8 } - // @sect4{ElasticProblem::assemble_system} - - // The big changes in this program - // are in the creation of matrix and - // right hand side, since they are - // problem-dependent. We will go - // through that process step-by-step, - // since it is a bit more complicated - // than in previous examples. - // - // The first parts of this function - // are the same as before, however: - // setting up a suitable quadrature - // formula, initializing an - // FEValues object for the - // (vector-valued) finite element we - // use as well as the quadrature - // object, and declaring a number of - // auxiliary arrays. In addition, we - // declare the ever same two - // abbreviations: n_q_points and - // dofs_per_cell. The number of - // degrees of freedom per cell we now - // obviously ask from the composed - // finite element rather than from - // the underlying scalar Q1 - // element. Here, it is dim times - // the number of degrees of freedom - // per cell of the Q1 element, though - // this is not explicit knowledge we - // need to care about: + // @sect4{ElasticProblem::assemble_system} + + // The big changes in this program + // are in the creation of matrix and + // right hand side, since they are + // problem-dependent. We will go + // through that process step-by-step, + // since it is a bit more complicated + // than in previous examples. + // + // The first parts of this function + // are the same as before, however: + // setting up a suitable quadrature + // formula, initializing an + // FEValues object for the + // (vector-valued) finite element we + // use as well as the quadrature + // object, and declaring a number of + // auxiliary arrays. In addition, we + // declare the ever same two + // abbreviations: n_q_points and + // dofs_per_cell. The number of + // degrees of freedom per cell we now + // obviously ask from the composed + // finite element rather than from + // the underlying scalar Q1 + // element. Here, it is dim times + // the number of degrees of freedom + // per cell of the Q1 element, though + // this is not explicit knowledge we + // need to care about: template void ElasticProblem::assemble_system () { QGauss quadrature_formula(2); FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); @@ -554,309 +554,309 @@ namespace Step8 std::vector local_dof_indices (dofs_per_cell); - // As was shown in previous - // examples as well, we need a - // place where to store the values - // of the coefficients at all the - // quadrature points on a cell. In - // the present situation, we have - // two coefficients, lambda and mu. + // As was shown in previous + // examples as well, we need a + // place where to store the values + // of the coefficients at all the + // quadrature points on a cell. In + // the present situation, we have + // two coefficients, lambda and mu. std::vector lambda_values (n_q_points); std::vector mu_values (n_q_points); - // Well, we could as well have - // omitted the above two arrays - // since we will use constant - // coefficients for both lambda and - // mu, which can be declared like - // this. They both represent - // functions always returning the - // constant value 1.0. Although we - // could omit the respective - // factors in the assemblage of the - // matrix, we use them here for - // purpose of demonstration. + // Well, we could as well have + // omitted the above two arrays + // since we will use constant + // coefficients for both lambda and + // mu, which can be declared like + // this. They both represent + // functions always returning the + // constant value 1.0. Although we + // could omit the respective + // factors in the assemblage of the + // matrix, we use them here for + // purpose of demonstration. ConstantFunction lambda(1.), mu(1.); - // Then again, we need to have the - // same for the right hand - // side. This is exactly as before - // in previous examples. However, - // we now have a vector-valued - // right hand side, which is why - // the data type of the - // rhs_values array is - // changed. We initialize it by - // n_q_points elements, each of - // which is a Vector@ - // with dim elements. + // Then again, we need to have the + // same for the right hand + // side. This is exactly as before + // in previous examples. However, + // we now have a vector-valued + // right hand side, which is why + // the data type of the + // rhs_values array is + // changed. We initialize it by + // n_q_points elements, each of + // which is a Vector@ + // with dim elements. RightHandSide right_hand_side; std::vector > rhs_values (n_q_points, - Vector(dim)); + Vector(dim)); - // Now we can begin with the loop - // over all cells: + // Now we can begin with the loop + // over all cells: typename DoFHandler::active_cell_iterator cell = dof_handler.begin_active(), - endc = dof_handler.end(); + endc = dof_handler.end(); for (; cell!=endc; ++cell) { - cell_matrix = 0; - cell_rhs = 0; - - fe_values.reinit (cell); - - // Next we get the values of - // the coefficients at the - // quadrature points. Likewise - // for the right hand side: - lambda.value_list (fe_values.get_quadrature_points(), lambda_values); - mu.value_list (fe_values.get_quadrature_points(), mu_values); - - right_hand_side.vector_value_list (fe_values.get_quadrature_points(), - rhs_values); - - // Then assemble the entries of - // the local stiffness matrix - // and right hand side - // vector. This follows almost - // one-to-one the pattern - // described in the - // introduction of this - // example. One of the few - // comments in place is that we - // can compute the number - // comp(i), i.e. the index - // of the only nonzero vector - // component of shape function - // i using the - // fe.system_to_component_index(i).first - // function call below. - // - // (By accessing the - // first variable of - // the return value of the - // system_to_component_index - // function, you might - // already have guessed - // that there is more in - // it. In fact, the - // function returns a - // std::pair@, of - // which the first element - // is comp(i) and the - // second is the value - // base(i) also noted - // in the introduction, i.e. - // the index - // of this shape function - // within all the shape - // functions that are nonzero - // in this component, - // i.e. base(i) in the - // diction of the - // introduction. This is not a - // number that we are usually - // interested in, however.) - // - // With this knowledge, we can - // assemble the local matrix - // contributions: - for (unsigned int i=0; ishape_grad(i,q_point) - // returns the - // gradient of - // the only - // nonzero - // component of - // the i-th shape - // function at - // quadrature - // point - // q_point. The - // component - // comp(i) of - // the gradient, - // which is the - // derivative of - // this only - // nonzero vector - // component of - // the i-th shape - // function with - // respect to the - // comp(i)th - // coordinate is - // accessed by - // the appended - // brackets. - ( - (fe_values.shape_grad(i,q_point)[component_i] * - fe_values.shape_grad(j,q_point)[component_j] * - lambda_values[q_point]) - + - (fe_values.shape_grad(i,q_point)[component_j] * - fe_values.shape_grad(j,q_point)[component_i] * - mu_values[q_point]) - + - // The second term is - // (mu nabla u_i, nabla v_j). - // We need not - // access a - // specific - // component of - // the - // gradient, - // since we - // only have to - // compute the - // scalar - // product of - // the two - // gradients, - // of which an - // overloaded - // version of - // the - // operator* - // takes care, - // as in - // previous - // examples. - // - // Note that by - // using the ?: - // operator, we - // only do this - // if comp(i) - // equals - // comp(j), - // otherwise a - // zero is - // added (which - // will be - // optimized - // away by the - // compiler). - ((component_i == component_j) ? - (fe_values.shape_grad(i,q_point) * - fe_values.shape_grad(j,q_point) * - mu_values[q_point]) : - 0) - ) - * - fe_values.JxW(q_point); - } - } - } - - // Assembling the right hand - // side is also just as - // discussed in the - // introduction: - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - for (unsigned int i=0; icomp(i), i.e. the index + // of the only nonzero vector + // component of shape function + // i using the + // fe.system_to_component_index(i).first + // function call below. + // + // (By accessing the + // first variable of + // the return value of the + // system_to_component_index + // function, you might + // already have guessed + // that there is more in + // it. In fact, the + // function returns a + // std::pair@, of + // which the first element + // is comp(i) and the + // second is the value + // base(i) also noted + // in the introduction, i.e. + // the index + // of this shape function + // within all the shape + // functions that are nonzero + // in this component, + // i.e. base(i) in the + // diction of the + // introduction. This is not a + // number that we are usually + // interested in, however.) + // + // With this knowledge, we can + // assemble the local matrix + // contributions: + for (unsigned int i=0; ishape_grad(i,q_point) + // returns the + // gradient of + // the only + // nonzero + // component of + // the i-th shape + // function at + // quadrature + // point + // q_point. The + // component + // comp(i) of + // the gradient, + // which is the + // derivative of + // this only + // nonzero vector + // component of + // the i-th shape + // function with + // respect to the + // comp(i)th + // coordinate is + // accessed by + // the appended + // brackets. + ( + (fe_values.shape_grad(i,q_point)[component_i] * + fe_values.shape_grad(j,q_point)[component_j] * + lambda_values[q_point]) + + + (fe_values.shape_grad(i,q_point)[component_j] * + fe_values.shape_grad(j,q_point)[component_i] * + mu_values[q_point]) + + + // The second term is + // (mu nabla u_i, nabla v_j). + // We need not + // access a + // specific + // component of + // the + // gradient, + // since we + // only have to + // compute the + // scalar + // product of + // the two + // gradients, + // of which an + // overloaded + // version of + // the + // operator* + // takes care, + // as in + // previous + // examples. + // + // Note that by + // using the ?: + // operator, we + // only do this + // if comp(i) + // equals + // comp(j), + // otherwise a + // zero is + // added (which + // will be + // optimized + // away by the + // compiler). + ((component_i == component_j) ? + (fe_values.shape_grad(i,q_point) * + fe_values.shape_grad(j,q_point) * + mu_values[q_point]) : + 0) + ) + * + fe_values.JxW(q_point); + } + } + } + + // Assembling the right hand + // side is also just as + // discussed in the + // introduction: + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + for (unsigned int i=0; iZeroFunction - // constructor accepts a parameter - // that tells it that it shall - // represent a vector valued, - // constant zero function with that - // many components. By default, - // this parameter is equal to one, - // in which case the - // ZeroFunction object would - // represent a scalar - // function. Since the solution - // vector has dim components, - // we need to pass dim as - // number of components to the zero - // function as well. + // The interpolation of the + // boundary values needs a small + // modification: since the solution + // function is vector-valued, so + // need to be the boundary + // values. The ZeroFunction + // constructor accepts a parameter + // that tells it that it shall + // represent a vector valued, + // constant zero function with that + // many components. By default, + // this parameter is equal to one, + // in which case the + // ZeroFunction object would + // represent a scalar + // function. Since the solution + // vector has dim components, + // we need to pass dim as + // number of components to the zero + // function as well. std::map boundary_values; VectorTools::interpolate_boundary_values (dof_handler, - 0, - ZeroFunction(dim), - boundary_values); + 0, + ZeroFunction(dim), + boundary_values); MatrixTools::apply_boundary_values (boundary_values, - system_matrix, - solution, - system_rhs); + system_matrix, + solution, + system_rhs); } - // @sect4{ElasticProblem::solve} + // @sect4{ElasticProblem::solve} - // The solver does not care about - // where the system of equations - // comes, as long as it stays - // positive definite and symmetric - // (which are the requirements for - // the use of the CG solver), which - // the system indeed is. Therefore, - // we need not change anything. + // The solver does not care about + // where the system of equations + // comes, as long as it stays + // positive definite and symmetric + // (which are the requirements for + // the use of the CG solver), which + // the system indeed is. Therefore, + // we need not change anything. template void ElasticProblem::solve () { @@ -867,38 +867,38 @@ namespace Step8 preconditioner.initialize(system_matrix, 1.2); cg.solve (system_matrix, solution, system_rhs, - preconditioner); + preconditioner); hanging_node_constraints.distribute (solution); } - // @sect4{ElasticProblem::refine_grid} - - // The function that does the - // refinement of the grid is the same - // as in the step-6 example. The - // quadrature formula is adapted to - // the linear elements again. Note - // that the error estimator by - // default adds up the estimated - // obtained from all components of - // the finite element solution, i.e., - // it uses the displacement in all - // directions with the same - // weight. If we would like the grid - // to be adapted to the - // x-displacement only, we could pass - // the function an additional - // parameter which tells it to do so - // and do not consider the - // displacements in all other - // directions for the error - // indicators. However, for the - // current problem, it seems - // appropriate to consider all - // displacement components with equal - // weight. + // @sect4{ElasticProblem::refine_grid} + + // The function that does the + // refinement of the grid is the same + // as in the step-6 example. The + // quadrature formula is adapted to + // the linear elements again. Note + // that the error estimator by + // default adds up the estimated + // obtained from all components of + // the finite element solution, i.e., + // it uses the displacement in all + // directions with the same + // weight. If we would like the grid + // to be adapted to the + // x-displacement only, we could pass + // the function an additional + // parameter which tells it to do so + // and do not consider the + // displacements in all other + // directions for the error + // indicators. However, for the + // current problem, it seems + // appropriate to consider all + // displacement components with equal + // weight. template void ElasticProblem::refine_grid () { @@ -906,30 +906,30 @@ namespace Step8 typename FunctionMap::type neumann_boundary; KellyErrorEstimator::estimate (dof_handler, - QGauss(2), - neumann_boundary, - solution, - estimated_error_per_cell); + QGauss(2), + neumann_boundary, + solution, + estimated_error_per_cell); GridRefinement::refine_and_coarsen_fixed_number (triangulation, - estimated_error_per_cell, - 0.3, 0.03); + estimated_error_per_cell, + 0.3, 0.03); triangulation.execute_coarsening_and_refinement (); } - // @sect4{ElasticProblem::output_results} + // @sect4{ElasticProblem::output_results} - // The output happens mostly as has - // been shown in previous examples - // already. The only difference is - // that the solution function is - // vector valued. The DataOut - // class takes care of this - // automatically, but we have to give - // each component of the solution - // vector a different name. + // The output happens mostly as has + // been shown in previous examples + // already. The only difference is + // that the solution function is + // vector valued. The DataOut + // class takes care of this + // automatically, but we have to give + // each component of the solution + // vector a different name. template void ElasticProblem::output_results (const unsigned int cycle) const { @@ -945,83 +945,83 @@ namespace Step8 - // As said above, we need a - // different name for each - // component of the solution - // function. To pass one name for - // each component, a vector of - // strings is used. Since the - // number of components is the same - // as the number of dimensions we - // are working in, the following - // switch statement is used. - // - // We note that some graphics - // programs have restriction as to - // what characters are allowed in - // the names of variables. The - // library therefore supports only - // the minimal subset of these - // characters that is supported by - // all programs. Basically, these - // are letters, numbers, - // underscores, and some other - // characters, but in particular no - // whitespace and minus/hyphen. The - // library will throw an exception - // otherwise, at least if in debug - // mode. - // - // After listing the 1d, 2d, and 3d - // case, it is good style to let - // the program die if we run upon a - // case which we did not - // consider. Remember that the - // Assert macro generates an - // exception if the condition in - // the first parameter is not - // satisfied. Of course, the - // condition false can never be - // satisfied, so the program will - // always abort whenever it gets to - // the default statement: + // As said above, we need a + // different name for each + // component of the solution + // function. To pass one name for + // each component, a vector of + // strings is used. Since the + // number of components is the same + // as the number of dimensions we + // are working in, the following + // switch statement is used. + // + // We note that some graphics + // programs have restriction as to + // what characters are allowed in + // the names of variables. The + // library therefore supports only + // the minimal subset of these + // characters that is supported by + // all programs. Basically, these + // are letters, numbers, + // underscores, and some other + // characters, but in particular no + // whitespace and minus/hyphen. The + // library will throw an exception + // otherwise, at least if in debug + // mode. + // + // After listing the 1d, 2d, and 3d + // case, it is good style to let + // the program die if we run upon a + // case which we did not + // consider. Remember that the + // Assert macro generates an + // exception if the condition in + // the first parameter is not + // satisfied. Of course, the + // condition false can never be + // satisfied, so the program will + // always abort whenever it gets to + // the default statement: std::vector solution_names; switch (dim) { - case 1: - solution_names.push_back ("displacement"); - break; - case 2: - solution_names.push_back ("x_displacement"); - solution_names.push_back ("y_displacement"); - break; - case 3: - solution_names.push_back ("x_displacement"); - solution_names.push_back ("y_displacement"); - solution_names.push_back ("z_displacement"); - break; - default: - Assert (false, ExcNotImplemented()); + case 1: + solution_names.push_back ("displacement"); + break; + case 2: + solution_names.push_back ("x_displacement"); + solution_names.push_back ("y_displacement"); + break; + case 3: + solution_names.push_back ("x_displacement"); + solution_names.push_back ("y_displacement"); + solution_names.push_back ("z_displacement"); + break; + default: + Assert (false, ExcNotImplemented()); } - // After setting up the names for - // the different components of the - // solution vector, we can add the - // solution vector to the list of - // data vectors scheduled for - // output. Note that the following - // function takes a vector of - // strings as second argument, - // whereas the one which we have - // used in all previous examples - // accepted a string there. In - // fact, the latter function is - // only a shortcut for the function - // which we call here: it puts the - // single string that is passed to - // it into a vector of strings with - // only one element and forwards - // that to the other function. + // After setting up the names for + // the different components of the + // solution vector, we can add the + // solution vector to the list of + // data vectors scheduled for + // output. Note that the following + // function takes a vector of + // strings as second argument, + // whereas the one which we have + // used in all previous examples + // accepted a string there. In + // fact, the latter function is + // only a shortcut for the function + // which we call here: it puts the + // single string that is passed to + // it into a vector of strings with + // only one element and forwards + // that to the other function. data_out.add_data_vector (solution, solution_names); data_out.build_patches (); data_out.write_gmv (output); @@ -1029,124 +1029,124 @@ namespace Step8 - // @sect4{ElasticProblem::run} - - // The run function does the same - // things as in step-6, for - // example. This time, we use the - // square [-1,1]^d as domain, and we - // refine it twice globally before - // starting the first iteration. - // - // The reason is the following: we - // use the Gauss quadrature - // formula with two points in each - // direction for integration of the - // right hand side; that means that - // there are four quadrature points - // on each cell (in 2D). If we only - // refine the initial grid once - // globally, then there will be only - // four quadrature points in each - // direction on the domain. However, - // the right hand side function was - // chosen to be rather localized and - // in that case all quadrature points - // lie outside the support of the - // right hand side function. The - // right hand side vector will then - // contain only zeroes and the - // solution of the system of - // equations is the zero vector, - // i.e. a finite element function - // that it zero everywhere. We should - // not be surprised about such things - // happening, since we have chosen an - // initial grid that is totally - // unsuitable for the problem at - // hand. - // - // The unfortunate thing is that if - // the discrete solution is constant, - // then the error indicators computed - // by the KellyErrorEstimator - // class are zero for each cell as - // well, and the call to - // refine_and_coarsen_fixed_number - // on the triangulation object - // will not flag any cells for - // refinement (why should it if the - // indicated error is zero for each - // cell?). The grid in the next - // iteration will therefore consist - // of four cells only as well, and - // the same problem occurs again. - // - // The conclusion needs to be: while - // of course we will not choose the - // initial grid to be well-suited for - // the accurate solution of the - // problem, we must at least choose - // it such that it has the chance to - // capture the most striking features - // of the solution. In this case, it - // needs to be able to see the right - // hand side. Thus, we refine twice - // globally. (Note that the - // refine_global function is not - // part of the GridRefinement - // class in which - // refine_and_coarsen_fixed_number - // is declared, for example. The - // reason is first that it is not an - // algorithm that computed refinement - // flags from indicators, but more - // importantly that it actually - // performs the refinement, in - // contrast to the functions in - // GridRefinement that only flag - // cells without actually refining - // the grid.) + // @sect4{ElasticProblem::run} + + // The run function does the same + // things as in step-6, for + // example. This time, we use the + // square [-1,1]^d as domain, and we + // refine it twice globally before + // starting the first iteration. + // + // The reason is the following: we + // use the Gauss quadrature + // formula with two points in each + // direction for integration of the + // right hand side; that means that + // there are four quadrature points + // on each cell (in 2D). If we only + // refine the initial grid once + // globally, then there will be only + // four quadrature points in each + // direction on the domain. However, + // the right hand side function was + // chosen to be rather localized and + // in that case all quadrature points + // lie outside the support of the + // right hand side function. The + // right hand side vector will then + // contain only zeroes and the + // solution of the system of + // equations is the zero vector, + // i.e. a finite element function + // that it zero everywhere. We should + // not be surprised about such things + // happening, since we have chosen an + // initial grid that is totally + // unsuitable for the problem at + // hand. + // + // The unfortunate thing is that if + // the discrete solution is constant, + // then the error indicators computed + // by the KellyErrorEstimator + // class are zero for each cell as + // well, and the call to + // refine_and_coarsen_fixed_number + // on the triangulation object + // will not flag any cells for + // refinement (why should it if the + // indicated error is zero for each + // cell?). The grid in the next + // iteration will therefore consist + // of four cells only as well, and + // the same problem occurs again. + // + // The conclusion needs to be: while + // of course we will not choose the + // initial grid to be well-suited for + // the accurate solution of the + // problem, we must at least choose + // it such that it has the chance to + // capture the most striking features + // of the solution. In this case, it + // needs to be able to see the right + // hand side. Thus, we refine twice + // globally. (Note that the + // refine_global function is not + // part of the GridRefinement + // class in which + // refine_and_coarsen_fixed_number + // is declared, for example. The + // reason is first that it is not an + // algorithm that computed refinement + // flags from indicators, but more + // importantly that it actually + // performs the refinement, in + // contrast to the functions in + // GridRefinement that only flag + // cells without actually refining + // the grid.) template void ElasticProblem::run () { for (unsigned int cycle=0; cycle<8; ++cycle) { - std::cout << "Cycle " << cycle << ':' << std::endl; + std::cout << "Cycle " << cycle << ':' << std::endl; - if (cycle == 0) - { - GridGenerator::hyper_cube (triangulation, -1, 1); - triangulation.refine_global (2); - } - else - refine_grid (); + if (cycle == 0) + { + GridGenerator::hyper_cube (triangulation, -1, 1); + triangulation.refine_global (2); + } + else + refine_grid (); - std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl; + std::cout << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl; - setup_system (); + setup_system (); - std::cout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl; + std::cout << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << std::endl; - assemble_system (); - solve (); - output_results (cycle); + assemble_system (); + solve (); + output_results (cycle); } } } // @sect3{The main function} - // After closing the Step8 - // namespace in the last line above, the - // following is the main function of the - // program and is again exactly like in - // step-6 (apart from the changed class - // names, of course). + // After closing the Step8 + // namespace in the last line above, the + // following is the main function of the + // program and is again exactly like in + // step-6 (apart from the changed class + // names, of course). int main () { try @@ -1159,25 +1159,25 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } diff --git a/deal.II/examples/step-9/step-9.cc b/deal.II/examples/step-9/step-9.cc index 08a1780616..53374f6cb7 100644 --- a/deal.II/examples/step-9/step-9.cc +++ b/deal.II/examples/step-9/step-9.cc @@ -9,10 +9,10 @@ /* to the file deal.II/doc/license.html for the text and */ /* further information on this license. */ - // Just as in previous examples, we - // have to include several files of - // which the meaning has already been - // discussed: + // Just as in previous examples, we + // have to include several files of + // which the meaning has already been + // discussed: #include #include #include @@ -38,54 +38,54 @@ #include #include - // The following two files provide classes - // and information for multi-threaded - // programs. In the first one, the classes - // and functions are declared which we need - // to start new threads and to wait for - // threads to return (i.e. the - // Thread class and the - // new_thread functions). The - // second file has a class - // MultithreadInfo (and a global - // object multithread_info of - // that type) which can be used to query the - // number of processors in your system, which - // is often useful when deciding how many - // threads to start in parallel. + // The following two files provide classes + // and information for multi-threaded + // programs. In the first one, the classes + // and functions are declared which we need + // to start new threads and to wait for + // threads to return (i.e. the + // Thread class and the + // new_thread functions). The + // second file has a class + // MultithreadInfo (and a global + // object multithread_info of + // that type) which can be used to query the + // number of processors in your system, which + // is often useful when deciding how many + // threads to start in parallel. #include #include - // The next new include file declares - // a base class TensorFunction - // not unlike the Function class, - // but with the difference that the - // return value is tensor-valued - // rather than scalar of - // vector-valued. + // The next new include file declares + // a base class TensorFunction + // not unlike the Function class, + // but with the difference that the + // return value is tensor-valued + // rather than scalar of + // vector-valued. #include #include - // This is C++, as we want to write - // some output to disk: + // This is C++, as we want to write + // some output to disk: #include #include - // The last step is as in previous - // programs: + // The last step is as in previous + // programs: namespace Step9 { using namespace dealii; - // @sect3{AdvectionProblem class declaration} + // @sect3{AdvectionProblem class declaration} - // Following we declare the main - // class of this program. It is very - // much alike the main classes of - // previous examples, so we again - // only comment on the differences. + // Following we declare the main + // class of this program. It is very + // much alike the main classes of + // previous examples, so we again + // only comment on the differences. template class AdvectionProblem { @@ -96,51 +96,51 @@ namespace Step9 private: void setup_system (); - // The next function will be used - // to assemble the - // matrix. However, unlike in the - // previous examples, the - // function will not do the work - // itself, but rather it will - // split the range of active - // cells into several chunks and - // then call the following - // function on each of these - // chunks. The rationale is that - // matrix assembly can be - // parallelized quite well, as - // the computation of the local - // contributions on each cell is - // entirely independent of other - // cells, and we only have to - // synchronize when we add the - // contribution of a cell to the - // global matrix. The second - // function, doing the actual - // work, accepts two parameters - // which denote the first cell on - // which it shall operate, and - // the one past the last. - // - // The strategy for parallelization we - // choose here is one of the - // possibilities mentioned in detail in - // the @ref threads module in the - // documentation. While it is a - // straightforward way to distribute the - // work for assembling the system onto - // multiple processor cores. As mentioned - // in the module, there are other, and - // possibly better suited, ways to - // achieve the same goal. + // The next function will be used + // to assemble the + // matrix. However, unlike in the + // previous examples, the + // function will not do the work + // itself, but rather it will + // split the range of active + // cells into several chunks and + // then call the following + // function on each of these + // chunks. The rationale is that + // matrix assembly can be + // parallelized quite well, as + // the computation of the local + // contributions on each cell is + // entirely independent of other + // cells, and we only have to + // synchronize when we add the + // contribution of a cell to the + // global matrix. The second + // function, doing the actual + // work, accepts two parameters + // which denote the first cell on + // which it shall operate, and + // the one past the last. + // + // The strategy for parallelization we + // choose here is one of the + // possibilities mentioned in detail in + // the @ref threads module in the + // documentation. While it is a + // straightforward way to distribute the + // work for assembling the system onto + // multiple processor cores. As mentioned + // in the module, there are other, and + // possibly better suited, ways to + // achieve the same goal. void assemble_system (); void assemble_system_interval (const typename DoFHandler::active_cell_iterator &begin, - const typename DoFHandler::active_cell_iterator &end); + const typename DoFHandler::active_cell_iterator &end); - // The following functions again - // are as in previous examples, - // as are the subsequent - // variables. + // The following functions again + // are as in previous examples, + // as are the subsequent + // variables. void solve (); void refine_grid (); void output_results (const unsigned int cycle) const; @@ -158,89 +158,89 @@ namespace Step9 Vector solution; Vector system_rhs; - // When assembling the matrix in - // parallel, we have to - // synchronize when several - // threads attempt to write the - // local contributions of a cell - // to the global matrix at the - // same time. This is done using - // a Mutex, which is an - // object that can be owned by - // only one thread at a time. If - // a thread wants to write to the - // matrix, it has to acquire this - // lock (if it is presently owned - // by another thread, then it has - // to wait), then write to the - // matrix and finally release the - // lock. Note that if the library - // was not compiled to support - // multithreading (which you have - // to specify at the time you - // call the ./configure - // script in the top-level - // directory), then a dummy the - // actual data type of the - // typedef - // Threads::ThreadMutex is a - // class that provides all the - // functions needed for a mutex, - // but does nothing when they are - // called; this is reasonable, of - // course, since if only one - // thread is running at a time, - // there is no need to - // synchronize with other - // threads. + // When assembling the matrix in + // parallel, we have to + // synchronize when several + // threads attempt to write the + // local contributions of a cell + // to the global matrix at the + // same time. This is done using + // a Mutex, which is an + // object that can be owned by + // only one thread at a time. If + // a thread wants to write to the + // matrix, it has to acquire this + // lock (if it is presently owned + // by another thread, then it has + // to wait), then write to the + // matrix and finally release the + // lock. Note that if the library + // was not compiled to support + // multithreading (which you have + // to specify at the time you + // call the ./configure + // script in the top-level + // directory), then a dummy the + // actual data type of the + // typedef + // Threads::ThreadMutex is a + // class that provides all the + // functions needed for a mutex, + // but does nothing when they are + // called; this is reasonable, of + // course, since if only one + // thread is running at a time, + // there is no need to + // synchronize with other + // threads. Threads::ThreadMutex assembler_lock; }; - // @sect3{Equation data declaration} - - // Next we declare a class that - // describes the advection - // field. This, of course, is a - // vector field with as many compents - // as there are space dimensions. One - // could now use a class derived from - // the Function base class, as we - // have done for boundary values and - // coefficients in previous examples, - // but there is another possibility - // in the library, namely a base - // class that describes tensor valued - // functions. In contrast to the - // usual Function objects, we - // provide the compiler with - // knowledge on the size of the - // objects of the return type. This - // enables the compiler to generate - // efficient code, which is not so - // simple for usual vector-valued - // functions where memory has to be - // allocated on the heap (thus, the - // Function::vector_value - // function has to be given the - // address of an object into which - // the result is to be written, in - // order to avoid copying and memory - // allocation and deallocation on the - // heap). In addition to the known - // size, it is possible not only to - // return vectors, but also tensors - // of higher rank; however, this is - // not very often requested by - // applications, to be honest... - // - // The interface of the - // TensorFunction class is - // relatively close to that of the - // Function class, so there is - // probably no need to comment in - // detail the following declaration: + // @sect3{Equation data declaration} + + // Next we declare a class that + // describes the advection + // field. This, of course, is a + // vector field with as many compents + // as there are space dimensions. One + // could now use a class derived from + // the Function base class, as we + // have done for boundary values and + // coefficients in previous examples, + // but there is another possibility + // in the library, namely a base + // class that describes tensor valued + // functions. In contrast to the + // usual Function objects, we + // provide the compiler with + // knowledge on the size of the + // objects of the return type. This + // enables the compiler to generate + // efficient code, which is not so + // simple for usual vector-valued + // functions where memory has to be + // allocated on the heap (thus, the + // Function::vector_value + // function has to be given the + // address of an object into which + // the result is to be written, in + // order to avoid copying and memory + // allocation and deallocation on the + // heap). In addition to the known + // size, it is possible not only to + // return vectors, but also tensors + // of higher rank; however, this is + // not very often requested by + // applications, to be honest... + // + // The interface of the + // TensorFunction class is + // relatively close to that of the + // Function class, so there is + // probably no need to comment in + // detail the following declaration: template class AdvectionField : public TensorFunction<1,dim> { @@ -250,90 +250,90 @@ namespace Step9 virtual Tensor<1,dim> value (const Point &p) const; virtual void value_list (const std::vector > &points, - std::vector > &values) const; - - // In previous examples, we have - // used assertions that throw - // exceptions in several - // places. However, we have never - // seen how such exceptions are - // declared. This can be done as - // follows: + std::vector > &values) const; + + // In previous examples, we have + // used assertions that throw + // exceptions in several + // places. However, we have never + // seen how such exceptions are + // declared. This can be done as + // follows: DeclException2 (ExcDimensionMismatch, - unsigned int, unsigned int, - << "The vector has size " << arg1 << " but should have " - << arg2 << " elements."); - // The syntax may look a little - // strange, but is - // reasonable. The format is - // basically as follows: use the - // name of one of the macros - // DeclExceptionN, where - // N denotes the number of - // additional parameters which - // the exception object shall - // take. In this case, as we want - // to throw the exception when - // the sizes of two vectors - // differ, we need two arguments, - // so we use - // DeclException2. The first - // parameter then describes the - // name of the exception, while - // the following declare the data - // types of the parameters. The - // last argument is a sequence of - // output directives that will be - // piped into the std::cerr - // object, thus the strange - // format with the leading @<@< - // operator and the like. Note - // that we can access the - // parameters which are passed to - // the exception upon - // construction (i.e. within the - // Assert call) by using the - // names arg1 through - // argN, where N is the - // number of arguments as defined - // by the use of the respective - // macro DeclExceptionN. - // - // To learn how the preprocessor - // expands this macro into actual - // code, please refer to the - // documentation of the exception - // classes in the base - // library. Suffice it to say - // that by this macro call, the - // respective exception class is - // declared, which also has error - // output functions already - // implemented. + unsigned int, unsigned int, + << "The vector has size " << arg1 << " but should have " + << arg2 << " elements."); + // The syntax may look a little + // strange, but is + // reasonable. The format is + // basically as follows: use the + // name of one of the macros + // DeclExceptionN, where + // N denotes the number of + // additional parameters which + // the exception object shall + // take. In this case, as we want + // to throw the exception when + // the sizes of two vectors + // differ, we need two arguments, + // so we use + // DeclException2. The first + // parameter then describes the + // name of the exception, while + // the following declare the data + // types of the parameters. The + // last argument is a sequence of + // output directives that will be + // piped into the std::cerr + // object, thus the strange + // format with the leading @<@< + // operator and the like. Note + // that we can access the + // parameters which are passed to + // the exception upon + // construction (i.e. within the + // Assert call) by using the + // names arg1 through + // argN, where N is the + // number of arguments as defined + // by the use of the respective + // macro DeclExceptionN. + // + // To learn how the preprocessor + // expands this macro into actual + // code, please refer to the + // documentation of the exception + // classes in the base + // library. Suffice it to say + // that by this macro call, the + // respective exception class is + // declared, which also has error + // output functions already + // implemented. }; - // The following two functions - // implement the interface described - // above. The first simply implements - // the function as described in the - // introduction, while the second - // uses the same trick to avoid - // calling a virtual function as has - // already been introduced in the - // previous example program. Note the - // check for the right sizes of the - // arguments in the second function, - // which should always be present in - // such functions; it is our - // experience that many if not most - // programming errors result from - // incorrectly initialized arrays, - // incompatible parameters to - // functions and the like; using - // assertion as in this case can - // eliminate many of these problems. + // The following two functions + // implement the interface described + // above. The first simply implements + // the function as described in the + // introduction, while the second + // uses the same trick to avoid + // calling a virtual function as has + // already been introduced in the + // previous example program. Note the + // check for the right sizes of the + // arguments in the second function, + // which should always be present in + // such functions; it is our + // experience that many if not most + // programming errors result from + // incorrectly initialized arrays, + // incompatible parameters to + // functions and the like; using + // assertion as in this case can + // eliminate many of these problems. template Tensor<1,dim> AdvectionField::value (const Point &p) const @@ -351,10 +351,10 @@ namespace Step9 template void AdvectionField::value_list (const std::vector > &points, - std::vector > &values) const + std::vector > &values) const { Assert (values.size() == points.size(), - ExcDimensionMismatch (values.size(), points.size())); + ExcDimensionMismatch (values.size(), points.size())); for (unsigned int i=0; i::value (points[i]); @@ -363,26 +363,26 @@ namespace Step9 - // Besides the advection field, we - // need two functions describing the - // source terms (right hand side) - // and the boundary values. First for - // the right hand side, which follows - // the same pattern as in previous - // examples. As described in the - // introduction, the source is a - // constant function in the vicinity - // of a source point, which we denote - // by the constant static variable - // center_point. We set the - // values of this center using the - // same template tricks as we have - // shown in the step-7 example - // program. The rest is simple and - // has been shown previously, - // including the way to avoid virtual - // function calls in the - // value_list function. + // Besides the advection field, we + // need two functions describing the + // source terms (right hand side) + // and the boundary values. First for + // the right hand side, which follows + // the same pattern as in previous + // examples. As described in the + // introduction, the source is a + // constant function in the vicinity + // of a source point, which we denote + // by the constant static variable + // center_point. We set the + // values of this center using the + // same template tricks as we have + // shown in the step-7 example + // program. The rest is simple and + // has been shown previously, + // including the way to avoid virtual + // function calls in the + // value_list function. template class RightHandSide : public Function { @@ -390,11 +390,11 @@ namespace Step9 RightHandSide () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void value_list (const std::vector > &points, - std::vector &values, - const unsigned int component = 0) const; + std::vector &values, + const unsigned int component = 0) const; private: static const Point center_point; @@ -412,36 +412,36 @@ namespace Step9 - // The only new thing here is that we - // check for the value of the - // component parameter. As this - // is a scalar function, it is - // obvious that it only makes sense - // if the desired component has the - // index zero, so we assert that this - // is indeed the - // case. ExcIndexRange is a - // global predefined exception - // (probably the one most often used, - // we therefore made it global - // instead of local to some class), - // that takes three parameters: the - // index that is outside the allowed - // range, the first element of the - // valid range and the one past the - // last (i.e. again the half-open - // interval so often used in the C++ - // standard library): + // The only new thing here is that we + // check for the value of the + // component parameter. As this + // is a scalar function, it is + // obvious that it only makes sense + // if the desired component has the + // index zero, so we assert that this + // is indeed the + // case. ExcIndexRange is a + // global predefined exception + // (probably the one most often used, + // we therefore made it global + // instead of local to some class), + // that takes three parameters: the + // index that is outside the allowed + // range, the first element of the + // valid range and the one past the + // last (i.e. again the half-open + // interval so often used in the C++ + // standard library): template double RightHandSide::value (const Point &p, - const unsigned int component) const + const unsigned int component) const { Assert (component == 0, ExcIndexRange (component, 0, 1)); const double diameter = 0.1; return ( (p-center_point).square() < diameter*diameter ? - .1/std::pow(diameter,dim) : - 0); + .1/std::pow(diameter,dim) : + 0); } @@ -449,11 +449,11 @@ namespace Step9 template void RightHandSide::value_list (const std::vector > &points, - std::vector &values, - const unsigned int component) const + std::vector &values, + const unsigned int component) const { Assert (values.size() == points.size(), - ExcDimensionMismatch (values.size(), points.size())); + ExcDimensionMismatch (values.size(), points.size())); for (unsigned int i=0; i::value (points[i], component); @@ -461,10 +461,10 @@ namespace Step9 - // Finally for the boundary values, - // which is just another class - // derived from the Function base - // class: + // Finally for the boundary values, + // which is just another class + // derived from the Function base + // class: template class BoundaryValues : public Function { @@ -472,11 +472,11 @@ namespace Step9 BoundaryValues () : Function() {} virtual double value (const Point &p, - const unsigned int component = 0) const; + const unsigned int component = 0) const; virtual void value_list (const std::vector > &points, - std::vector &values, - const unsigned int component = 0) const; + std::vector &values, + const unsigned int component = 0) const; }; @@ -484,7 +484,7 @@ namespace Step9 template double BoundaryValues::value (const Point &p, - const unsigned int component) const + const unsigned int component) const { Assert (component == 0, ExcIndexRange (component, 0, 1)); @@ -498,11 +498,11 @@ namespace Step9 template void BoundaryValues::value_list (const std::vector > &points, - std::vector &values, - const unsigned int component) const + std::vector &values, + const unsigned int component) const { Assert (values.size() == points.size(), - ExcDimensionMismatch (values.size(), points.size())); + ExcDimensionMismatch (values.size(), points.size())); for (unsigned int i=0; i::value (points[i], component); @@ -510,119 +510,119 @@ namespace Step9 - // @sect3{GradientEstimation class declaration} - - // Now, finally, here comes the class - // that will compute the difference - // approximation of the gradient on - // each cell and weighs that with a - // power of the mesh size, as - // described in the introduction. - // This class is a simple version of - // the DerivativeApproximation - // class in the library, that uses - // similar techniques to obtain - // finite difference approximations - // of the gradient of a finite - // element field, or if higher - // derivatives. - // - // The - // class has one public static - // function estimate that is - // called to compute a vector of - // error indicators, and one private - // function that does the actual work - // on an interval of all active - // cells. The latter is called by the - // first one in order to be able to - // do the computations in parallel if - // your computer has more than one - // processor. While the first - // function accepts as parameter a - // vector into which the error - // indicator is written for each - // cell. This vector is passed on to - // the second function that actually - // computes the error indicators on - // some cells, and the respective - // elements of the vector are - // written. By the way, we made it - // somewhat of a convention to use - // vectors of floats for error - // indicators rather than the common - // vectors of doubles, as the - // additional accuracy is not - // necessary for estimated values. - // - // In addition to these two - // functions, the class declares to - // exceptions which are raised when a - // cell has no neighbors in each of - // the space directions (in which - // case the matrix described in the - // introduction would be singular and - // can't be inverted), while the - // other one is used in the more - // common case of invalid parameters - // to a function, namely a vector of - // wrong size. - // - // Two annotations to this class are - // still in order: the first is that - // the class has no non-static member - // functions or variables, so this is - // not really a class, but rather - // serves the purpose of a - // namespace in C++. The reason - // that we chose a class over a - // namespace is that this way we can - // declare functions that are - // private, i.e. visible to the - // outside world but not - // callable. This can be done with - // namespaces as well, if one - // declares some functions in header - // files in the namespace and - // implements these and other - // functions in the implementation - // file. The functions not declared - // in the header file are still in - // the namespace but are not callable - // from outside. However, as we have - // only one file here, it is not - // possible to hide functions in the - // present case. - // - // The second is that the dimension - // template parameter is attached to - // the function rather than to the - // class itself. This way, you don't - // have to specify the template - // parameter yourself as in most - // other cases, but the compiler can - // figure its value out itself from - // the dimension of the DoF handler - // object that one passes as first - // argument. - // - // Finally note that the - // IndexInterval typedef is - // introduced as a convenient - // abbreviation for an otherwise - // lengthy type name. + // @sect3{GradientEstimation class declaration} + + // Now, finally, here comes the class + // that will compute the difference + // approximation of the gradient on + // each cell and weighs that with a + // power of the mesh size, as + // described in the introduction. + // This class is a simple version of + // the DerivativeApproximation + // class in the library, that uses + // similar techniques to obtain + // finite difference approximations + // of the gradient of a finite + // element field, or if higher + // derivatives. + // + // The + // class has one public static + // function estimate that is + // called to compute a vector of + // error indicators, and one private + // function that does the actual work + // on an interval of all active + // cells. The latter is called by the + // first one in order to be able to + // do the computations in parallel if + // your computer has more than one + // processor. While the first + // function accepts as parameter a + // vector into which the error + // indicator is written for each + // cell. This vector is passed on to + // the second function that actually + // computes the error indicators on + // some cells, and the respective + // elements of the vector are + // written. By the way, we made it + // somewhat of a convention to use + // vectors of floats for error + // indicators rather than the common + // vectors of doubles, as the + // additional accuracy is not + // necessary for estimated values. + // + // In addition to these two + // functions, the class declares to + // exceptions which are raised when a + // cell has no neighbors in each of + // the space directions (in which + // case the matrix described in the + // introduction would be singular and + // can't be inverted), while the + // other one is used in the more + // common case of invalid parameters + // to a function, namely a vector of + // wrong size. + // + // Two annotations to this class are + // still in order: the first is that + // the class has no non-static member + // functions or variables, so this is + // not really a class, but rather + // serves the purpose of a + // namespace in C++. The reason + // that we chose a class over a + // namespace is that this way we can + // declare functions that are + // private, i.e. visible to the + // outside world but not + // callable. This can be done with + // namespaces as well, if one + // declares some functions in header + // files in the namespace and + // implements these and other + // functions in the implementation + // file. The functions not declared + // in the header file are still in + // the namespace but are not callable + // from outside. However, as we have + // only one file here, it is not + // possible to hide functions in the + // present case. + // + // The second is that the dimension + // template parameter is attached to + // the function rather than to the + // class itself. This way, you don't + // have to specify the template + // parameter yourself as in most + // other cases, but the compiler can + // figure its value out itself from + // the dimension of the DoF handler + // object that one passes as first + // argument. + // + // Finally note that the + // IndexInterval typedef is + // introduced as a convenient + // abbreviation for an otherwise + // lengthy type name. class GradientEstimation { public: template static void estimate (const DoFHandler &dof, - const Vector &solution, - Vector &error_per_cell); + const Vector &solution, + Vector &error_per_cell); DeclException2 (ExcInvalidVectorLength, - int, int, - << "Vector has length " << arg1 << ", but should have " - << arg2); + int, int, + << "Vector has length " << arg1 << ", but should have " + << arg2); DeclException0 (ExcInsufficientDirections); private: @@ -630,27 +630,27 @@ namespace Step9 template static void estimate_interval (const DoFHandler &dof, - const Vector &solution, - const IndexInterval &index_interval, - Vector &error_per_cell); + const Vector &solution, + const IndexInterval &index_interval, + Vector &error_per_cell); }; - // @sect3{AdvectionProblem class implementation} + // @sect3{AdvectionProblem class implementation} - // Now for the implementation of the - // main class. Constructor, - // destructor and the function - // setup_system follow the same - // pattern that was used previously, - // so we need not comment on these - // three function: + // Now for the implementation of the + // main class. Constructor, + // destructor and the function + // setup_system follow the same + // pattern that was used previously, + // so we need not comment on these + // three function: template AdvectionProblem::AdvectionProblem () : - dof_handler (triangulation), - fe(1) + dof_handler (triangulation), + fe(1) {} @@ -670,12 +670,12 @@ namespace Step9 hanging_node_constraints.clear (); DoFTools::make_hanging_node_constraints (dof_handler, - hanging_node_constraints); + hanging_node_constraints); hanging_node_constraints.close (); sparsity_pattern.reinit (dof_handler.n_dofs(), - dof_handler.n_dofs(), - dof_handler.max_couplings_between_dofs()); + dof_handler.n_dofs(), + dof_handler.max_couplings_between_dofs()); DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); hanging_node_constraints.condense (sparsity_pattern); @@ -690,657 +690,657 @@ namespace Step9 - // In the following function, the - // matrix and right hand side are - // assembled. As stated in the - // documentation of the main class - // above, it does not do this itself, - // but rather delegates to the - // function following next, by - // splitting up the range of cells - // into chunks of approximately the - // same size and assembling on each - // of these chunks in parallel. + // In the following function, the + // matrix and right hand side are + // assembled. As stated in the + // documentation of the main class + // above, it does not do this itself, + // but rather delegates to the + // function following next, by + // splitting up the range of cells + // into chunks of approximately the + // same size and assembling on each + // of these chunks in parallel. template void AdvectionProblem::assemble_system () { - // First, we want to find out how - // many threads shall assemble the - // matrix in parallel. A reasonable - // choice would be that each - // processor in your system - // processes one chunk of cells; if - // we were to use this information, - // we could use the value of the - // global variable - // multithread_info.n_cpus, - // which is determined at start-up - // time of your program - // automatically. (Note that if the - // library was not configured for - // multi-threading, then the number - // of CPUs is set to one.) However, - // sometimes there might be reasons - // to use another value. For - // example, you might want to use - // less processors than there are - // in your system in order not to - // use too many computational - // ressources. On the other hand, - // if there are several jobs - // running on a computer and you - // want to get a higher percentage - // of CPU time, it might be worth - // to start more threads than there - // are CPUs, as most operating - // systems assign roughly the same - // CPU ressources to all threads - // presently running. For this - // reason, the MultithreadInfo - // class contains a read-write - // variable n_default_threads - // which is set to n_cpus by - // default, but can be set to - // another value. This variable is - // also queried by functions inside - // the library to determine how - // many threads they shall create. + // First, we want to find out how + // many threads shall assemble the + // matrix in parallel. A reasonable + // choice would be that each + // processor in your system + // processes one chunk of cells; if + // we were to use this information, + // we could use the value of the + // global variable + // multithread_info.n_cpus, + // which is determined at start-up + // time of your program + // automatically. (Note that if the + // library was not configured for + // multi-threading, then the number + // of CPUs is set to one.) However, + // sometimes there might be reasons + // to use another value. For + // example, you might want to use + // less processors than there are + // in your system in order not to + // use too many computational + // ressources. On the other hand, + // if there are several jobs + // running on a computer and you + // want to get a higher percentage + // of CPU time, it might be worth + // to start more threads than there + // are CPUs, as most operating + // systems assign roughly the same + // CPU ressources to all threads + // presently running. For this + // reason, the MultithreadInfo + // class contains a read-write + // variable n_default_threads + // which is set to n_cpus by + // default, but can be set to + // another value. This variable is + // also queried by functions inside + // the library to determine how + // many threads they shall create. const unsigned int n_threads = multithread_info.n_default_threads; - // It is worth noting, however, that this - // setup determines the load distribution - // onto processor in a static way: it does - // not take into account that some other - // part of our program may also be running - // something in parallel at the same time - // as we get here (this is not the case in - // the current program, but may easily be - // the case in more complex - // applications). A discussion of how to - // deal with this case can be found in the - // @ref threads module. - // - // Next, we need an object which is - // capable of keeping track of the - // threads we created, and allows - // us to wait until they all have - // finished (to join them in - // the language of threads). The - // Threads::ThreadGroup class - // does this, which is basically - // just a container for objects of - // type Threads::Thread that - // represent a single thread; - // Threads::Thread is what the - // Threads::new_thread function below will - // return when we start a new - // thread. - // - // Note that both Threads::ThreadGroup - // and Threads::Thread have a template - // argument that represents the - // return type of the function - // being called on a separate - // thread. Since most of the - // functions that we will call on - // different threads have return - // type void, the template - // argument has a default value - // void, so that in that case - // it can be omitted. (However, you - // still need to write the angle - // brackets, even if they are - // empty.) - // - // If you did not configure for - // multi-threading, then the - // new_thread function that is - // supposed to start a new thread - // in parallel only executes the - // function which should be run in - // parallel, waits for it to return - // (i.e. the function is executed - // sequentially), and puts the - // return value into the Thread - // object. Likewise, the function - // join that is supposed to - // wait for all spawned threads to - // return, returns immediately, as - // there can't be any threads running. + // It is worth noting, however, that this + // setup determines the load distribution + // onto processor in a static way: it does + // not take into account that some other + // part of our program may also be running + // something in parallel at the same time + // as we get here (this is not the case in + // the current program, but may easily be + // the case in more complex + // applications). A discussion of how to + // deal with this case can be found in the + // @ref threads module. + // + // Next, we need an object which is + // capable of keeping track of the + // threads we created, and allows + // us to wait until they all have + // finished (to join them in + // the language of threads). The + // Threads::ThreadGroup class + // does this, which is basically + // just a container for objects of + // type Threads::Thread that + // represent a single thread; + // Threads::Thread is what the + // Threads::new_thread function below will + // return when we start a new + // thread. + // + // Note that both Threads::ThreadGroup + // and Threads::Thread have a template + // argument that represents the + // return type of the function + // being called on a separate + // thread. Since most of the + // functions that we will call on + // different threads have return + // type void, the template + // argument has a default value + // void, so that in that case + // it can be omitted. (However, you + // still need to write the angle + // brackets, even if they are + // empty.) + // + // If you did not configure for + // multi-threading, then the + // new_thread function that is + // supposed to start a new thread + // in parallel only executes the + // function which should be run in + // parallel, waits for it to return + // (i.e. the function is executed + // sequentially), and puts the + // return value into the Thread + // object. Likewise, the function + // join that is supposed to + // wait for all spawned threads to + // return, returns immediately, as + // there can't be any threads running. Threads::ThreadGroup<> threads; - // Now we have to split the range - // of cells into chunks of - // approximately the same - // size. Each thread will then - // assemble the local contributions - // of the cells within its chunk - // and transfer these contributions - // to the global matrix. As - // splitting a range of cells is a - // rather common task when using - // multi-threading, there is a - // function in the Threads - // namespace that does exactly - // this. In fact, it does this not - // only for a range of cell - // iterators, but for iterators in - // general, so you could use it for - // std::vector::iterator or - // usual pointers as well. - // - // The function returns a vector of - // pairs of iterators, where the - // first denotes the first cell of - // each chunk, while the second - // denotes the one past the last - // (this half-open interval is the - // usual convention in the C++ - // standard library, so we keep to - // it). Note that we have to - // specify the actual data type of - // the iterators in angle brackets - // to the function. This is - // necessary, since it is a - // template function which takes - // the data type of the iterators - // as template argument; in the - // present case, however, the data - // types of the two first - // parameters differ - // (begin_active returns an - // active_iterator, while - // end returns a - // raw_iterator), and in this - // case the C++ language requires - // us to specify the template type - // explicitely. For brevity, we - // first typedef this data type to - // an alias. + // Now we have to split the range + // of cells into chunks of + // approximately the same + // size. Each thread will then + // assemble the local contributions + // of the cells within its chunk + // and transfer these contributions + // to the global matrix. As + // splitting a range of cells is a + // rather common task when using + // multi-threading, there is a + // function in the Threads + // namespace that does exactly + // this. In fact, it does this not + // only for a range of cell + // iterators, but for iterators in + // general, so you could use it for + // std::vector::iterator or + // usual pointers as well. + // + // The function returns a vector of + // pairs of iterators, where the + // first denotes the first cell of + // each chunk, while the second + // denotes the one past the last + // (this half-open interval is the + // usual convention in the C++ + // standard library, so we keep to + // it). Note that we have to + // specify the actual data type of + // the iterators in angle brackets + // to the function. This is + // necessary, since it is a + // template function which takes + // the data type of the iterators + // as template argument; in the + // present case, however, the data + // types of the two first + // parameters differ + // (begin_active returns an + // active_iterator, while + // end returns a + // raw_iterator), and in this + // case the C++ language requires + // us to specify the template type + // explicitely. For brevity, we + // first typedef this data type to + // an alias. typedef typename DoFHandler::active_cell_iterator active_cell_iterator; std::vector > thread_ranges = Threads::split_range (dof_handler.begin_active (), - dof_handler.end (), - n_threads); - - // Finally, for each of the chunks - // of iterators we have computed, - // start one thread (or if not in - // multi-thread mode: execute - // assembly on these chunks - // sequentially). This is done - // using the following sequence of - // function calls: + dof_handler.end (), + n_threads); + + // Finally, for each of the chunks + // of iterators we have computed, + // start one thread (or if not in + // multi-thread mode: execute + // assembly on these chunks + // sequentially). This is done + // using the following sequence of + // function calls: for (unsigned int thread=0; thread::assemble_system_interval, - *this, - thread_ranges[thread].first, - thread_ranges[thread].second); - // The reasons and internal - // workings of these functions can - // be found in the report on the - // subject of multi-threading, - // which is available online as - // well. Suffice it to say that we - // create a new thread that calls - // the assemble_system_interval - // function on the present object - // (the this pointer), with the - // arguments following in the - // second set of parentheses passed - // as parameters. The Threads::new_thread - // function returns an object of - // type Threads::Thread, which - // we put into the threads - // container. If a thread exits, - // the return value of the function - // being called is put into a place - // such that the thread objects can - // access it using their - // return_value function; since - // the function we call doesn't - // have a return value, this does - // not apply here. Note that you - // can copy around thread objects - // freely, and that of course they - // will still represent the same - // thread. - - // When all the threads are - // running, the only thing we have - // to do is wait for them to - // finish. This is necessary of - // course, as we can't proceed with - // our tasks before the matrix and - // right hand side are - // assemblesd. Waiting for all the - // threads to finish can be done - // using the joint_all function - // in the ThreadGroup - // container, which just calls - // join on each of the thread - // objects it stores. - // - // Again, if the library was not - // configured to use - // multi-threading, then no threads - // can run in parallel and the - // function returns immediately. + *this, + thread_ranges[thread].first, + thread_ranges[thread].second); + // The reasons and internal + // workings of these functions can + // be found in the report on the + // subject of multi-threading, + // which is available online as + // well. Suffice it to say that we + // create a new thread that calls + // the assemble_system_interval + // function on the present object + // (the this pointer), with the + // arguments following in the + // second set of parentheses passed + // as parameters. The Threads::new_thread + // function returns an object of + // type Threads::Thread, which + // we put into the threads + // container. If a thread exits, + // the return value of the function + // being called is put into a place + // such that the thread objects can + // access it using their + // return_value function; since + // the function we call doesn't + // have a return value, this does + // not apply here. Note that you + // can copy around thread objects + // freely, and that of course they + // will still represent the same + // thread. + + // When all the threads are + // running, the only thing we have + // to do is wait for them to + // finish. This is necessary of + // course, as we can't proceed with + // our tasks before the matrix and + // right hand side are + // assemblesd. Waiting for all the + // threads to finish can be done + // using the joint_all function + // in the ThreadGroup + // container, which just calls + // join on each of the thread + // objects it stores. + // + // Again, if the library was not + // configured to use + // multi-threading, then no threads + // can run in parallel and the + // function returns immediately. threads.join_all (); - // After the matrix has been - // assembled in parallel, we stil - // have to eliminate hanging node - // constraints. This is something - // that can't be done on each of - // the threads separately, so we - // have to do it now. + // After the matrix has been + // assembled in parallel, we stil + // have to eliminate hanging node + // constraints. This is something + // that can't be done on each of + // the threads separately, so we + // have to do it now. hanging_node_constraints.condense (system_matrix); hanging_node_constraints.condense (system_rhs); - // Note also, that unlike in - // previous examples, there are no - // boundary conditions to be - // applied to the system of - // equations. This, of course, is - // due to the fact that we have - // included them into the weak - // formulation of the problem. + // Note also, that unlike in + // previous examples, there are no + // boundary conditions to be + // applied to the system of + // equations. This, of course, is + // due to the fact that we have + // included them into the weak + // formulation of the problem. } - // Now, this is the function that - // does the actual work. It is not - // very different from the - // assemble_system functions of - // previous example programs, so we - // will again only comment on the - // differences. The mathematical - // stuff follows closely what we have - // said in the introduction. + // Now, this is the function that + // does the actual work. It is not + // very different from the + // assemble_system functions of + // previous example programs, so we + // will again only comment on the + // differences. The mathematical + // stuff follows closely what we have + // said in the introduction. template void AdvectionProblem:: assemble_system_interval (const typename DoFHandler::active_cell_iterator &begin, - const typename DoFHandler::active_cell_iterator &end) + const typename DoFHandler::active_cell_iterator &end) { - // First of all, we will need some - // objects that describe boundary - // values, right hand side function - // and the advection field. As we - // will only perform actions on - // these objects that do not change - // them, we declare them as - // constant, which can enable the - // compiler in some cases to - // perform additional - // optimizations. + // First of all, we will need some + // objects that describe boundary + // values, right hand side function + // and the advection field. As we + // will only perform actions on + // these objects that do not change + // them, we declare them as + // constant, which can enable the + // compiler in some cases to + // perform additional + // optimizations. const AdvectionField advection_field; const RightHandSide right_hand_side; const BoundaryValues boundary_values; - // Next we need quadrature formula - // for the cell terms, but also for - // the integral over the inflow - // boundary, which will be a face - // integral. As we use bilinear - // elements, Gauss formulae with - // two points in each space - // direction are sufficient. + // Next we need quadrature formula + // for the cell terms, but also for + // the integral over the inflow + // boundary, which will be a face + // integral. As we use bilinear + // elements, Gauss formulae with + // two points in each space + // direction are sufficient. QGauss quadrature_formula(2); QGauss face_quadrature_formula(2); - // Finally, we need objects of type - // FEValues and - // FEFaceValues. For the cell - // terms we need the values and - // gradients of the shape - // functions, the quadrature points - // in order to determine the source - // density and the advection field - // at a given point, and the - // weights of the quadrature points - // times the determinant of the - // Jacobian at these points. In - // contrast, for the boundary - // integrals, we don't need the - // gradients, but rather the normal - // vectors to the cells. + // Finally, we need objects of type + // FEValues and + // FEFaceValues. For the cell + // terms we need the values and + // gradients of the shape + // functions, the quadrature points + // in order to determine the source + // density and the advection field + // at a given point, and the + // weights of the quadrature points + // times the determinant of the + // Jacobian at these points. In + // contrast, for the boundary + // integrals, we don't need the + // gradients, but rather the normal + // vectors to the cells. FEValues fe_values (fe, quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); + update_values | update_gradients | + update_quadrature_points | update_JxW_values); FEFaceValues fe_face_values (fe, face_quadrature_formula, - update_values | update_quadrature_points | - update_JxW_values | update_normal_vectors); + update_values | update_quadrature_points | + update_JxW_values | update_normal_vectors); - // Then we define some - // abbreviations to avoid - // unnecessarily long lines: + // Then we define some + // abbreviations to avoid + // unnecessarily long lines: const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); const unsigned int n_face_q_points = face_quadrature_formula.size(); - // We declare cell matrix and cell - // right hand side... + // We declare cell matrix and cell + // right hand side... FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); Vector cell_rhs (dofs_per_cell); - // ... an array to hold the global - // indices of the degrees of - // freedom of the cell on which we - // are presently working... + // ... an array to hold the global + // indices of the degrees of + // freedom of the cell on which we + // are presently working... std::vector local_dof_indices (dofs_per_cell); - // ... and array in which the - // values of right hand side, - // advection direction, and - // boundary values will be stored, - // for cell and face integrals - // respectively: + // ... and array in which the + // values of right hand side, + // advection direction, and + // boundary values will be stored, + // for cell and face integrals + // respectively: std::vector rhs_values (n_q_points); std::vector > advection_directions (n_q_points); std::vector face_boundary_values (n_face_q_points); std::vector > face_advection_directions (n_face_q_points); - // Then we start the main loop over - // the cells: + // Then we start the main loop over + // the cells: typename DoFHandler::active_cell_iterator cell; for (cell=begin; cell!=end; ++cell) { - // First clear old contents of - // the cell contributions... - cell_matrix = 0; - cell_rhs = 0; - - // ... then initialize - // the FEValues object... - fe_values.reinit (cell); - - // ... obtain the values of - // right hand side and - // advection directions at the - // quadrature points... - advection_field.value_list (fe_values.get_quadrature_points(), - advection_directions); - right_hand_side.value_list (fe_values.get_quadrature_points(), - rhs_values); - - // ... set the value of the - // streamline diffusion - // parameter as described in - // the introduction... - const double delta = 0.1 * cell->diameter (); - - // ... and assemble the local - // contributions to the system - // matrix and right hand side - // as also discussed above: - for (unsigned int q_point=0; q_pointinflow part of the - // boundary, but to find out - // whether a certain part of a - // face of the present cell is - // part of the inflow boundary, - // we have to have information - // on the exact location of the - // quadrature points and on the - // direction of flow at this - // point; we obtain this - // information using the - // FEFaceValues object and only - // decide within the main loop - // whether a quadrature point - // is on the inflow boundary. - for (unsigned int face=0; face::faces_per_cell; ++face) - if (cell->face(face)->at_boundary()) - { - // Ok, this face of the - // present cell is on the - // boundary of the - // domain. Just as for - // the usual FEValues - // object which we have - // used in previous - // examples and also - // above, we have to - // reinitialize the - // FEFaceValues object - // for the present face: - fe_face_values.reinit (cell, face); - - // For the quadrature - // points at hand, we ask - // for the values of the - // inflow function and - // for the direction of - // flow: - boundary_values.value_list (fe_face_values.get_quadrature_points(), - face_boundary_values); - advection_field.value_list (fe_face_values.get_quadrature_points(), - face_advection_directions); - - // Now loop over all - // quadrature points and - // see whether it is on - // the inflow or outflow - // part of the - // boundary. This is - // determined by a test - // whether the advection - // direction points - // inwards or outwards of - // the domain (note that - // the normal vector - // points outwards of the - // cell, and since the - // cell is at the - // boundary, the normal - // vector points outward - // of the domain, so if - // the advection - // direction points into - // the domain, its scalar - // product with the - // normal vector must be - // negative): - for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); - - // Up until now we have not - // taken care of the fact that - // this function might run more - // than once in parallel, as - // the operations above only - // work on variables that are - // local to this function, or - // if they are global (such as - // the information on the grid, - // the DoF handler, or the DoF - // numbers) they are only - // read. Thus, the different - // threads do not disturb each - // other. - // - // On the other hand, we would - // now like to write the local - // contributions to the global - // system of equations into the - // global objects. This needs - // some kind of - // synchronisation, as if we - // would not take care of the - // fact that multiple threads - // write into the matrix at the - // same time, we might be - // surprised that one threads - // reads data from the matrix - // that another thread is - // presently overwriting, or - // similar things. Thus, to - // make sure that only one - // thread operates on these - // objects at a time, we have - // to lock it. This is done - // using a Mutex, which is - // short for mutually - // exclusive: a thread that - // wants to write to the global - // objects acquires this lock, - // but has to wait if it is - // presently owned by another - // thread. If it has acquired - // the lock, it can be sure - // that no other thread is - // presently writing to the - // matrix, and can do so - // freely. When finished, we - // release the lock again so as - // to allow other threads to - // acquire it and write to the - // matrix. - assembler_lock.acquire (); - for (unsigned int i=0; ilock and release - // functions are no-ops, - // i.e. they return without - // doing anything. - // - // 2. In order to work - // properly, it is essential - // that all threads try to - // acquire the same lock. This, - // of course, can not be - // achieved if the lock is a - // local variable, as then each - // thread would acquire its own - // lock. Therefore, the lock - // variable is a member - // variable of the class; since - // all threads execute member - // functions of the same - // object, they have the same - // this pointer and - // therefore also operate on - // the same lock. + // First clear old contents of + // the cell contributions... + cell_matrix = 0; + cell_rhs = 0; + + // ... then initialize + // the FEValues object... + fe_values.reinit (cell); + + // ... obtain the values of + // right hand side and + // advection directions at the + // quadrature points... + advection_field.value_list (fe_values.get_quadrature_points(), + advection_directions); + right_hand_side.value_list (fe_values.get_quadrature_points(), + rhs_values); + + // ... set the value of the + // streamline diffusion + // parameter as described in + // the introduction... + const double delta = 0.1 * cell->diameter (); + + // ... and assemble the local + // contributions to the system + // matrix and right hand side + // as also discussed above: + for (unsigned int q_point=0; q_pointinflow part of the + // boundary, but to find out + // whether a certain part of a + // face of the present cell is + // part of the inflow boundary, + // we have to have information + // on the exact location of the + // quadrature points and on the + // direction of flow at this + // point; we obtain this + // information using the + // FEFaceValues object and only + // decide within the main loop + // whether a quadrature point + // is on the inflow boundary. + for (unsigned int face=0; face::faces_per_cell; ++face) + if (cell->face(face)->at_boundary()) + { + // Ok, this face of the + // present cell is on the + // boundary of the + // domain. Just as for + // the usual FEValues + // object which we have + // used in previous + // examples and also + // above, we have to + // reinitialize the + // FEFaceValues object + // for the present face: + fe_face_values.reinit (cell, face); + + // For the quadrature + // points at hand, we ask + // for the values of the + // inflow function and + // for the direction of + // flow: + boundary_values.value_list (fe_face_values.get_quadrature_points(), + face_boundary_values); + advection_field.value_list (fe_face_values.get_quadrature_points(), + face_advection_directions); + + // Now loop over all + // quadrature points and + // see whether it is on + // the inflow or outflow + // part of the + // boundary. This is + // determined by a test + // whether the advection + // direction points + // inwards or outwards of + // the domain (note that + // the normal vector + // points outwards of the + // cell, and since the + // cell is at the + // boundary, the normal + // vector points outward + // of the domain, so if + // the advection + // direction points into + // the domain, its scalar + // product with the + // normal vector must be + // negative): + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + + // Up until now we have not + // taken care of the fact that + // this function might run more + // than once in parallel, as + // the operations above only + // work on variables that are + // local to this function, or + // if they are global (such as + // the information on the grid, + // the DoF handler, or the DoF + // numbers) they are only + // read. Thus, the different + // threads do not disturb each + // other. + // + // On the other hand, we would + // now like to write the local + // contributions to the global + // system of equations into the + // global objects. This needs + // some kind of + // synchronisation, as if we + // would not take care of the + // fact that multiple threads + // write into the matrix at the + // same time, we might be + // surprised that one threads + // reads data from the matrix + // that another thread is + // presently overwriting, or + // similar things. Thus, to + // make sure that only one + // thread operates on these + // objects at a time, we have + // to lock it. This is done + // using a Mutex, which is + // short for mutually + // exclusive: a thread that + // wants to write to the global + // objects acquires this lock, + // but has to wait if it is + // presently owned by another + // thread. If it has acquired + // the lock, it can be sure + // that no other thread is + // presently writing to the + // matrix, and can do so + // freely. When finished, we + // release the lock again so as + // to allow other threads to + // acquire it and write to the + // matrix. + assembler_lock.acquire (); + for (unsigned int i=0; ilock and release + // functions are no-ops, + // i.e. they return without + // doing anything. + // + // 2. In order to work + // properly, it is essential + // that all threads try to + // acquire the same lock. This, + // of course, can not be + // achieved if the lock is a + // local variable, as then each + // thread would acquire its own + // lock. Therefore, the lock + // variable is a member + // variable of the class; since + // all threads execute member + // functions of the same + // object, they have the same + // this pointer and + // therefore also operate on + // the same lock. }; } - // Following is the function that - // solves the linear system of - // equations. As the system is no - // more symmetric positive definite - // as in all the previous examples, - // we can't use the Conjugate - // Gradients method anymore. Rather, - // we use a solver that is tailored - // to nonsymmetric systems like the - // one at hand, the BiCGStab - // method. As preconditioner, we use - // the Jacobi method. + // Following is the function that + // solves the linear system of + // equations. As the system is no + // more symmetric positive definite + // as in all the previous examples, + // we can't use the Conjugate + // Gradients method anymore. Rather, + // we use a solver that is tailored + // to nonsymmetric systems like the + // one at hand, the BiCGStab + // method. As preconditioner, we use + // the Jacobi method. template void AdvectionProblem::solve () { @@ -1351,43 +1351,43 @@ namespace Step9 preconditioner.initialize(system_matrix, 1.0); bicgstab.solve (system_matrix, solution, system_rhs, - preconditioner); + preconditioner); hanging_node_constraints.distribute (solution); } - // The following function refines the - // grid according to the quantity - // described in the introduction. The - // respective computations are made - // in the class - // GradientEstimation. The only - // difference to previous examples is - // that we refine a little more - // aggressively (0.5 instead of 0.3 - // of the number of cells). + // The following function refines the + // grid according to the quantity + // described in the introduction. The + // respective computations are made + // in the class + // GradientEstimation. The only + // difference to previous examples is + // that we refine a little more + // aggressively (0.5 instead of 0.3 + // of the number of cells). template void AdvectionProblem::refine_grid () { Vector estimated_error_per_cell (triangulation.n_active_cells()); GradientEstimation::estimate (dof_handler, - solution, - estimated_error_per_cell); + solution, + estimated_error_per_cell); GridRefinement::refine_and_coarsen_fixed_number (triangulation, - estimated_error_per_cell, - 0.5, 0.03); + estimated_error_per_cell, + 0.5, 0.03); triangulation.execute_coarsening_and_refinement (); } - // Writing output to disk is done in - // the same way as in the previous - // examples... + // Writing output to disk is done in + // the same way as in the previous + // examples... template void AdvectionProblem::output_results (const unsigned int cycle) const { @@ -1403,39 +1403,39 @@ namespace Step9 } - // ... as is the main loop (setup -- - // solve -- refine) + // ... as is the main loop (setup -- + // solve -- refine) template void AdvectionProblem::run () { for (unsigned int cycle=0; cycle<6; ++cycle) { - std::cout << "Cycle " << cycle << ':' << std::endl; + std::cout << "Cycle " << cycle << ':' << std::endl; - if (cycle == 0) - { - GridGenerator::hyper_cube (triangulation, -1, 1); - triangulation.refine_global (4); - } - else - { - refine_grid (); - }; + if (cycle == 0) + { + GridGenerator::hyper_cube (triangulation, -1, 1); + triangulation.refine_global (4); + } + else + { + refine_grid (); + }; - std::cout << " Number of active cells: " - << triangulation.n_active_cells() - << std::endl; + std::cout << " Number of active cells: " + << triangulation.n_active_cells() + << std::endl; - setup_system (); + setup_system (); - std::cout << " Number of degrees of freedom: " - << dof_handler.n_dofs() - << std::endl; + std::cout << " Number of degrees of freedom: " + << dof_handler.n_dofs() + << std::endl; - assemble_system (); - solve (); - output_results (cycle); + assemble_system (); + solve (); + output_results (cycle); }; DataOut data_out; @@ -1449,196 +1449,196 @@ namespace Step9 - // @sect3{GradientEstimation class implementation} + // @sect3{GradientEstimation class implementation} - // Now for the implementation of the - // GradientEstimation class. The - // first function does not much - // except for delegating work to the - // other function: + // Now for the implementation of the + // GradientEstimation class. The + // first function does not much + // except for delegating work to the + // other function: template void GradientEstimation::estimate (const DoFHandler &dof_handler, - const Vector &solution, - Vector &error_per_cell) + const Vector &solution, + Vector &error_per_cell) { - // Before starting with the work, - // we check that the vector into - // which the results are written, - // has the right size. It is a - // common error that such - // parameters have the wrong size, - // but the resulting damage by not - // catching these errors are very - // subtle as they are usually - // corruption of data somewhere in - // memory. Often, the problems - // emerging from this are not - // reproducible, and we found that - // it is well worth the effort to - // check for such things. + // Before starting with the work, + // we check that the vector into + // which the results are written, + // has the right size. It is a + // common error that such + // parameters have the wrong size, + // but the resulting damage by not + // catching these errors are very + // subtle as they are usually + // corruption of data somewhere in + // memory. Often, the problems + // emerging from this are not + // reproducible, and we found that + // it is well worth the effort to + // check for such things. Assert (error_per_cell.size() == dof_handler.get_tria().n_active_cells(), - ExcInvalidVectorLength (error_per_cell.size(), - dof_handler.get_tria().n_active_cells())); - - // Next, we subdivide the range of - // cells into chunks of equal - // size. Just as we have used the - // function - // Threads::split_range when - // assembling above, there is a - // function that computes intervals - // of roughly equal size from a - // larger interval. This is used - // here: + ExcInvalidVectorLength (error_per_cell.size(), + dof_handler.get_tria().n_active_cells())); + + // Next, we subdivide the range of + // cells into chunks of equal + // size. Just as we have used the + // function + // Threads::split_range when + // assembling above, there is a + // function that computes intervals + // of roughly equal size from a + // larger interval. This is used + // here: const unsigned int n_threads = multithread_info.n_default_threads; std::vector index_intervals = Threads::split_interval (0, dof_handler.get_tria().n_active_cells(), - n_threads); - - // In the same way as before, we use a - // Threads::ThreadGroup object - // to collect the descriptor objects of - // different threads. Note that as the - // function called is not a member - // function, but rather a static function, - // we need not (and can not) pass a - // this pointer to the - // new_thread function in this - // case. - // - // Taking pointers to templated - // functions seems to be - // notoriously difficult for many - // compilers (since there are - // several functions with the same - // name -- just as with overloaded - // functions). It therefore happens - // quite frequently that we can't - // directly insert taking the - // address of a function in the - // call to encapsulate for one - // or the other compiler, but have - // to take a temporary variable for - // that purpose. Here, in this - // case, Compaq's cxx compiler - // choked on the code so we use - // this workaround with the - // function pointer: + n_threads); + + // In the same way as before, we use a + // Threads::ThreadGroup object + // to collect the descriptor objects of + // different threads. Note that as the + // function called is not a member + // function, but rather a static function, + // we need not (and can not) pass a + // this pointer to the + // new_thread function in this + // case. + // + // Taking pointers to templated + // functions seems to be + // notoriously difficult for many + // compilers (since there are + // several functions with the same + // name -- just as with overloaded + // functions). It therefore happens + // quite frequently that we can't + // directly insert taking the + // address of a function in the + // call to encapsulate for one + // or the other compiler, but have + // to take a temporary variable for + // that purpose. Here, in this + // case, Compaq's cxx compiler + // choked on the code so we use + // this workaround with the + // function pointer: Threads::ThreadGroup<> threads; void (*estimate_interval_ptr) (const DoFHandler &, - const Vector &, - const IndexInterval &, - Vector &) + const Vector &, + const IndexInterval &, + Vector &) = &GradientEstimation::template estimate_interval; for (unsigned int i=0; imultithread_info.n_default_threads - // was one, or if the library was - // not configured to use threads, - // then the sequence of commands - // above reduced to a complicated - // way to simply call the - // estimate_interval function - // with the whole range of cells to - // work on. However, using the way - // above, we are able to write the - // program such that it makes no - // difference whether we presently - // work with multiple threads or in - // single-threaded mode, thus - // eliminating the need to write - // code included in conditional - // preprocessor sections. + // Note that if the value of the + // variable + // multithread_info.n_default_threads + // was one, or if the library was + // not configured to use threads, + // then the sequence of commands + // above reduced to a complicated + // way to simply call the + // estimate_interval function + // with the whole range of cells to + // work on. However, using the way + // above, we are able to write the + // program such that it makes no + // difference whether we presently + // work with multiple threads or in + // single-threaded mode, thus + // eliminating the need to write + // code included in conditional + // preprocessor sections. } - // Following now the function that - // actually computes the finite - // difference approximation to the - // gradient. The general outline of - // the function is to loop over all - // the cells in the range of - // iterators designated by the third - // argument, and on each cell first - // compute the list of active - // neighbors of the present cell and - // then compute the quantities - // described in the introduction for - // each of the neighbors. The reason - // for this order is that it is not a - // one-liner to find a given neighbor - // with locally refined meshes. In - // principle, an optimized - // implementation would find - // neighbors and the quantities - // depending on them in one step, - // rather than first building a list - // of neighbors and in a second step - // their contributions. - // - // Now for the details: + // Following now the function that + // actually computes the finite + // difference approximation to the + // gradient. The general outline of + // the function is to loop over all + // the cells in the range of + // iterators designated by the third + // argument, and on each cell first + // compute the list of active + // neighbors of the present cell and + // then compute the quantities + // described in the introduction for + // each of the neighbors. The reason + // for this order is that it is not a + // one-liner to find a given neighbor + // with locally refined meshes. In + // principle, an optimized + // implementation would find + // neighbors and the quantities + // depending on them in one step, + // rather than first building a list + // of neighbors and in a second step + // their contributions. + // + // Now for the details: template void GradientEstimation::estimate_interval (const DoFHandler &dof_handler, - const Vector &solution, - const IndexInterval &index_interval, - Vector &error_per_cell) + const Vector &solution, + const IndexInterval &index_interval, + Vector &error_per_cell) { - // First we need a way to extract - // the values of the given finite - // element function at the center - // of the cells. As usual with - // values of finite element - // functions, we use an object of - // type FEValues, and we use - // (or mis-use in this case) the - // midpoint quadrature rule to get - // at the values at the - // center. Note that the - // FEValues object only needs - // to compute the values at the - // centers, and the location of the - // quadrature points in real space - // in order to get at the vectors - // y. + // First we need a way to extract + // the values of the given finite + // element function at the center + // of the cells. As usual with + // values of finite element + // functions, we use an object of + // type FEValues, and we use + // (or mis-use in this case) the + // midpoint quadrature rule to get + // at the values at the + // center. Note that the + // FEValues object only needs + // to compute the values at the + // centers, and the location of the + // quadrature points in real space + // in order to get at the vectors + // y. QMidpoint midpoint_rule; FEValues fe_midpoint_value (dof_handler.get_fe(), - midpoint_rule, - update_values | update_quadrature_points); + midpoint_rule, + update_values | update_quadrature_points); - // Then we need space foe the - // tensor Y, which is the sum - // of outer products of the - // y-vectors. + // Then we need space foe the + // tensor Y, which is the sum + // of outer products of the + // y-vectors. Tensor<2,dim> Y; - // Then define iterators into the - // cells and into the output - // vector, which are to be looped - // over by the present instance of - // this function. We get start and - // end iterators over cells by - // setting them to the first active - // cell and advancing them using - // the given start and end - // index. Note that we can use the - // advance function of the - // standard C++ library, but that - // we have to cast the distance by - // which the iterator is to be - // moved forward to a signed - // quantity in order to avoid - // warnings by the compiler. + // Then define iterators into the + // cells and into the output + // vector, which are to be looped + // over by the present instance of + // this function. We get start and + // end iterators over cells by + // setting them to the first active + // cell and advancing them using + // the given start and end + // index. Note that we can use the + // advance function of the + // standard C++ library, but that + // we have to cast the distance by + // which the iterator is to be + // moved forward to a signed + // quantity in order to avoid + // warnings by the compiler. typename DoFHandler::active_cell_iterator cell, endc; cell = dof_handler.begin_active(); @@ -1647,435 +1647,435 @@ namespace Step9 endc = dof_handler.begin_active(); advance (endc, static_cast(index_interval.second)); - // Getting an iterator into the - // output array is simpler. We - // don't need an end iterator, as - // we always move this iterator - // forward by one element for each - // cell we are on, but stop the - // loop when we hit the end cell, - // so we need not have an end - // element for this iterator. + // Getting an iterator into the + // output array is simpler. We + // don't need an end iterator, as + // we always move this iterator + // forward by one element for each + // cell we are on, but stop the + // loop when we hit the end cell, + // so we need not have an end + // element for this iterator. Vector::iterator error_on_this_cell = error_per_cell.begin() + index_interval.first; - // Then we allocate a vector to - // hold iterators to all active - // neighbors of a cell. We reserve - // the maximal number of active - // neighbors in order to avoid - // later reallocations. Note how - // this maximal number of active - // neighbors is computed here. + // Then we allocate a vector to + // hold iterators to all active + // neighbors of a cell. We reserve + // the maximal number of active + // neighbors in order to avoid + // later reallocations. Note how + // this maximal number of active + // neighbors is computed here. std::vector::active_cell_iterator> active_neighbors; active_neighbors.reserve (GeometryInfo::faces_per_cell * - GeometryInfo::max_children_per_face); + GeometryInfo::max_children_per_face); - // Well then, after all these - // preliminaries, lets start the - // computations: + // Well then, after all these + // preliminaries, lets start the + // computations: for (; cell!=endc; ++cell, ++error_on_this_cell) { - // First initialize the - // FEValues object, as well - // as the Y tensor: - fe_midpoint_value.reinit (cell); - Y.clear (); - - // Then allocate the vector - // that will be the sum over - // the y-vectors times the - // approximate directional - // derivative: - Tensor<1,dim> projected_gradient; - - - // Now before going on first - // compute a list of all active - // neighbors of the present - // cell. We do so by first - // looping over all faces and - // see whether the neighbor - // there is active, which would - // be the case if it is on the - // same level as the present - // cell or one level coarser - // (note that a neighbor can - // only be once coarser than - // the present cell, as we only - // allow a maximal difference - // of one refinement over a - // face in - // deal.II). Alternatively, the - // neighbor could be on the - // same level and be further - // refined; then we have to - // find which of its children - // are next to the present cell - // and select these (note that - // if a child of of neighbor of - // an active cell that is next - // to this active cell, needs - // necessarily be active - // itself, due to the - // one-refinement rule cited - // above). - // - // Things are slightly - // different in one space - // dimension, as there the - // one-refinement rule does not - // exist: neighboring active - // cells may differ in as many - // refinement levels as they - // like. In this case, the - // computation becomes a little - // more difficult, but we will - // explain this below. - // - // Before starting the loop - // over all neighbors of the - // present cell, we have to - // clear the array storing the - // iterators to the active - // neighbors, of course. - active_neighbors.clear (); - for (unsigned int face_no=0; face_no::faces_per_cell; ++face_no) - if (! cell->at_boundary(face_no)) - { - // First define an - // abbreviation for the - // iterator to the face - // and the neighbor - const typename DoFHandler::face_iterator - face = cell->face(face_no); - const typename DoFHandler::cell_iterator - neighbor = cell->neighbor(face_no); - - // Then check whether the - // neighbor is active. If - // it is, then it is on - // the same level or one - // level coarser (if we - // are not in 1D), and we - // are interested in it - // in any case. - if (neighbor->active()) - active_neighbors.push_back (neighbor); - else - { - // If the neighbor is - // not active, then - // check its - // children. - if (dim == 1) - { - // To find the - // child of the - // neighbor which - // bounds to the - // present cell, - // successively - // go to its - // right child if - // we are left of - // the present - // cell (n==0), - // or go to the - // left child if - // we are on the - // right (n==1), - // until we find - // an active - // cell. - typename DoFHandler::cell_iterator - neighbor_child = neighbor; - while (neighbor_child->has_children()) - neighbor_child = neighbor_child->child (face_no==0 ? 1 : 0); - - // As this used - // some - // non-trivial - // geometrical - // intuition, we - // might want to - // check whether - // we did it - // right, - // i.e. check - // whether the - // neighbor of - // the cell we - // found is - // indeed the - // cell we are - // presently - // working - // on. Checks - // like this are - // often useful - // and have - // frequently - // uncovered - // errors both in - // algorithms - // like the line - // above (where - // it is simple - // to - // involuntarily - // exchange - // n==1 for - // n==0 or - // the like) and - // in the library - // (the - // assumptions - // underlying the - // algorithm - // above could - // either be - // wrong, wrongly - // documented, or - // are violated - // due to an - // error in the - // library). One - // could in - // principle - // remove such - // checks after - // the program - // works for some - // time, but it - // might be a - // good things to - // leave it in - // anyway to - // check for - // changes in the - // library or in - // the algorithm - // above. - // - // Note that if - // this check - // fails, then - // this is - // certainly an - // error that is - // irrecoverable - // and probably - // qualifies as - // an internal - // error. We - // therefore use - // a predefined - // exception - // class to throw - // here. - Assert (neighbor_child->neighbor(face_no==0 ? 1 : 0)==cell, - ExcInternalError()); - - // If the check - // succeeded, we - // push the - // active - // neighbor we - // just found to - // the stack we - // keep: - active_neighbors.push_back (neighbor_child); - } - else - // If we are not in - // 1d, we collect - // all neighbor - // children - // `behind' the - // subfaces of the - // current face - for (unsigned int subface_no=0; subface_non_children(); ++subface_no) - active_neighbors.push_back ( - cell->neighbor_child_on_subface(face_no, subface_no)); - }; - }; - - // OK, now that we have all the - // neighbors, lets start the - // computation on each of - // them. First we do some - // preliminaries: find out - // about the center of the - // present cell and the - // solution at this point. The - // latter is obtained as a - // vector of function values at - // the quadrature points, of - // which there are only one, of - // course. Likewise, the - // position of the center is - // the position of the first - // (and only) quadrature point - // in real space. - const Point this_center = fe_midpoint_value.quadrature_point(0); - - std::vector this_midpoint_value(1); - fe_midpoint_value.get_function_values (solution, this_midpoint_value); - - - // Now loop over all active neighbors - // and collect the data we - // need. Allocate a vector just like - // this_midpoint_value which we - // will use to store the value of the - // solution in the midpoint of the - // neighbor cell. We allocate it here - // already, since that way we don't - // have to allocate memory repeatedly - // in each iteration of this inner loop - // (memory allocation is a rather - // expensive operation): - std::vector neighbor_midpoint_value(1); - typename std::vector::active_cell_iterator>::const_iterator - neighbor_ptr = active_neighbors.begin(); - for (; neighbor_ptr!=active_neighbors.end(); ++neighbor_ptr) - { - // First define an - // abbreviation for the - // iterator to the active - // neighbor cell: - const typename DoFHandler::active_cell_iterator - neighbor = *neighbor_ptr; - - // Then get the center of - // the neighbor cell and - // the value of the finite - // element function - // thereon. Note that for - // this information we - // have to reinitialize the - // FEValues object for - // the neighbor cell. - fe_midpoint_value.reinit (neighbor); - const Point neighbor_center = fe_midpoint_value.quadrature_point(0); - - fe_midpoint_value.get_function_values (solution, - neighbor_midpoint_value); - - // Compute the vector y - // connecting the centers - // of the two cells. Note - // that as opposed to the - // introduction, we denote - // by y the normalized - // difference vector, as - // this is the quantity - // used everywhere in the - // computations. - Point y = neighbor_center - this_center; - const double distance = std::sqrt(y.square()); - y /= distance; - - // Then add up the - // contribution of this - // cell to the Y matrix... - for (unsigned int i=0; iy - // which span the whole space, - // otherwise we would not have - // all components of the - // gradient. This is indicated - // by the invertability of the - // matrix. - // - // If the matrix should not be - // invertible, this means that - // the present cell had an - // insufficient number of - // active neighbors. In - // contrast to all previous - // cases, where we raised - // exceptions, this is, - // however, not a programming - // error: it is a runtime error - // that can happen in optimized - // mode even if it ran well in - // debug mode, so it is - // reasonable to try to catch - // this error also in optimized - // mode. For this case, there - // is the AssertThrow - // macro: it checks the - // condition like the - // Assert macro, but not - // only in debug mode; it then - // outputs an error message, - // but instead of terminating - // the program as in the case - // of the Assert macro, the - // exception is thrown using - // the throw command of - // C++. This way, one has the - // possibility to catch this - // error and take reasonable - // counter actions. One such - // measure would be to refine - // the grid globally, as the - // case of insufficient - // directions can not occur if - // every cell of the initial - // grid has been refined at - // least once. - AssertThrow (determinant(Y) != 0, - ExcInsufficientDirections()); - - // If, on the other hand the - // matrix is invertible, then - // invert it, multiply the - // other quantity with it and - // compute the estimated error - // using this quantity and the - // right powers of the mesh - // width: - const Tensor<2,dim> Y_inverse = invert(Y); - - Point gradient; - contract (gradient, Y_inverse, projected_gradient); - - *error_on_this_cell = (std::pow(cell->diameter(), - 1+1.0*dim/2) * - std::sqrt(gradient.square())); + // First initialize the + // FEValues object, as well + // as the Y tensor: + fe_midpoint_value.reinit (cell); + Y.clear (); + + // Then allocate the vector + // that will be the sum over + // the y-vectors times the + // approximate directional + // derivative: + Tensor<1,dim> projected_gradient; + + + // Now before going on first + // compute a list of all active + // neighbors of the present + // cell. We do so by first + // looping over all faces and + // see whether the neighbor + // there is active, which would + // be the case if it is on the + // same level as the present + // cell or one level coarser + // (note that a neighbor can + // only be once coarser than + // the present cell, as we only + // allow a maximal difference + // of one refinement over a + // face in + // deal.II). Alternatively, the + // neighbor could be on the + // same level and be further + // refined; then we have to + // find which of its children + // are next to the present cell + // and select these (note that + // if a child of of neighbor of + // an active cell that is next + // to this active cell, needs + // necessarily be active + // itself, due to the + // one-refinement rule cited + // above). + // + // Things are slightly + // different in one space + // dimension, as there the + // one-refinement rule does not + // exist: neighboring active + // cells may differ in as many + // refinement levels as they + // like. In this case, the + // computation becomes a little + // more difficult, but we will + // explain this below. + // + // Before starting the loop + // over all neighbors of the + // present cell, we have to + // clear the array storing the + // iterators to the active + // neighbors, of course. + active_neighbors.clear (); + for (unsigned int face_no=0; face_no::faces_per_cell; ++face_no) + if (! cell->at_boundary(face_no)) + { + // First define an + // abbreviation for the + // iterator to the face + // and the neighbor + const typename DoFHandler::face_iterator + face = cell->face(face_no); + const typename DoFHandler::cell_iterator + neighbor = cell->neighbor(face_no); + + // Then check whether the + // neighbor is active. If + // it is, then it is on + // the same level or one + // level coarser (if we + // are not in 1D), and we + // are interested in it + // in any case. + if (neighbor->active()) + active_neighbors.push_back (neighbor); + else + { + // If the neighbor is + // not active, then + // check its + // children. + if (dim == 1) + { + // To find the + // child of the + // neighbor which + // bounds to the + // present cell, + // successively + // go to its + // right child if + // we are left of + // the present + // cell (n==0), + // or go to the + // left child if + // we are on the + // right (n==1), + // until we find + // an active + // cell. + typename DoFHandler::cell_iterator + neighbor_child = neighbor; + while (neighbor_child->has_children()) + neighbor_child = neighbor_child->child (face_no==0 ? 1 : 0); + + // As this used + // some + // non-trivial + // geometrical + // intuition, we + // might want to + // check whether + // we did it + // right, + // i.e. check + // whether the + // neighbor of + // the cell we + // found is + // indeed the + // cell we are + // presently + // working + // on. Checks + // like this are + // often useful + // and have + // frequently + // uncovered + // errors both in + // algorithms + // like the line + // above (where + // it is simple + // to + // involuntarily + // exchange + // n==1 for + // n==0 or + // the like) and + // in the library + // (the + // assumptions + // underlying the + // algorithm + // above could + // either be + // wrong, wrongly + // documented, or + // are violated + // due to an + // error in the + // library). One + // could in + // principle + // remove such + // checks after + // the program + // works for some + // time, but it + // might be a + // good things to + // leave it in + // anyway to + // check for + // changes in the + // library or in + // the algorithm + // above. + // + // Note that if + // this check + // fails, then + // this is + // certainly an + // error that is + // irrecoverable + // and probably + // qualifies as + // an internal + // error. We + // therefore use + // a predefined + // exception + // class to throw + // here. + Assert (neighbor_child->neighbor(face_no==0 ? 1 : 0)==cell, + ExcInternalError()); + + // If the check + // succeeded, we + // push the + // active + // neighbor we + // just found to + // the stack we + // keep: + active_neighbors.push_back (neighbor_child); + } + else + // If we are not in + // 1d, we collect + // all neighbor + // children + // `behind' the + // subfaces of the + // current face + for (unsigned int subface_no=0; subface_non_children(); ++subface_no) + active_neighbors.push_back ( + cell->neighbor_child_on_subface(face_no, subface_no)); + }; + }; + + // OK, now that we have all the + // neighbors, lets start the + // computation on each of + // them. First we do some + // preliminaries: find out + // about the center of the + // present cell and the + // solution at this point. The + // latter is obtained as a + // vector of function values at + // the quadrature points, of + // which there are only one, of + // course. Likewise, the + // position of the center is + // the position of the first + // (and only) quadrature point + // in real space. + const Point this_center = fe_midpoint_value.quadrature_point(0); + + std::vector this_midpoint_value(1); + fe_midpoint_value.get_function_values (solution, this_midpoint_value); + + + // Now loop over all active neighbors + // and collect the data we + // need. Allocate a vector just like + // this_midpoint_value which we + // will use to store the value of the + // solution in the midpoint of the + // neighbor cell. We allocate it here + // already, since that way we don't + // have to allocate memory repeatedly + // in each iteration of this inner loop + // (memory allocation is a rather + // expensive operation): + std::vector neighbor_midpoint_value(1); + typename std::vector::active_cell_iterator>::const_iterator + neighbor_ptr = active_neighbors.begin(); + for (; neighbor_ptr!=active_neighbors.end(); ++neighbor_ptr) + { + // First define an + // abbreviation for the + // iterator to the active + // neighbor cell: + const typename DoFHandler::active_cell_iterator + neighbor = *neighbor_ptr; + + // Then get the center of + // the neighbor cell and + // the value of the finite + // element function + // thereon. Note that for + // this information we + // have to reinitialize the + // FEValues object for + // the neighbor cell. + fe_midpoint_value.reinit (neighbor); + const Point neighbor_center = fe_midpoint_value.quadrature_point(0); + + fe_midpoint_value.get_function_values (solution, + neighbor_midpoint_value); + + // Compute the vector y + // connecting the centers + // of the two cells. Note + // that as opposed to the + // introduction, we denote + // by y the normalized + // difference vector, as + // this is the quantity + // used everywhere in the + // computations. + Point y = neighbor_center - this_center; + const double distance = std::sqrt(y.square()); + y /= distance; + + // Then add up the + // contribution of this + // cell to the Y matrix... + for (unsigned int i=0; iy + // which span the whole space, + // otherwise we would not have + // all components of the + // gradient. This is indicated + // by the invertability of the + // matrix. + // + // If the matrix should not be + // invertible, this means that + // the present cell had an + // insufficient number of + // active neighbors. In + // contrast to all previous + // cases, where we raised + // exceptions, this is, + // however, not a programming + // error: it is a runtime error + // that can happen in optimized + // mode even if it ran well in + // debug mode, so it is + // reasonable to try to catch + // this error also in optimized + // mode. For this case, there + // is the AssertThrow + // macro: it checks the + // condition like the + // Assert macro, but not + // only in debug mode; it then + // outputs an error message, + // but instead of terminating + // the program as in the case + // of the Assert macro, the + // exception is thrown using + // the throw command of + // C++. This way, one has the + // possibility to catch this + // error and take reasonable + // counter actions. One such + // measure would be to refine + // the grid globally, as the + // case of insufficient + // directions can not occur if + // every cell of the initial + // grid has been refined at + // least once. + AssertThrow (determinant(Y) != 0, + ExcInsufficientDirections()); + + // If, on the other hand the + // matrix is invertible, then + // invert it, multiply the + // other quantity with it and + // compute the estimated error + // using this quantity and the + // right powers of the mesh + // width: + const Tensor<2,dim> Y_inverse = invert(Y); + + Point gradient; + contract (gradient, Y_inverse, projected_gradient); + + *error_on_this_cell = (std::pow(cell->diameter(), + 1+1.0*dim/2) * + std::sqrt(gradient.square())); }; } } - // @sect3{Main function} + // @sect3{Main function} - // The main function is exactly - // like in previous examples, with - // the only difference in the name of - // the main class that actually does - // the computation. + // The main function is exactly + // like in previous examples, with + // the only difference in the name of + // the main class that actually does + // the computation. int main () { try @@ -2088,24 +2088,24 @@ int main () catch (std::exception &exc) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; + << "----------------------------------------------------" + << std::endl; std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; return 1; }; -- 2.39.5