From: Wolfgang Bangerth Date: Tue, 23 May 2006 15:29:08 +0000 (+0000) Subject: Replace by and '' by X-Git-Tag: v8.0.0~11721 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=57728b0c8de2f0b1ad6ebe251d6730b563b79136;p=dealii.git Replace by and '' by git-svn-id: https://svn.dealii.org/trunk@13131 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/examples/step-1/step-1.cc b/deal.II/examples/step-1/step-1.cc index 2915e1ac35..624bb7099f 100644 --- a/deal.II/examples/step-1/step-1.cc +++ b/deal.II/examples/step-1/step-1.cc @@ -12,7 +12,7 @@ // @sect3{Include files} // The most fundamental class in the - // library is the ``Triangulation'' + // library is the Triangulation // class, which is declared here: #include // We need the following two includes @@ -79,7 +79,7 @@ void first_grid () // Now we want to write a graphical // representation of the mesh to an - // output file. The ``GridOut'' + // output file. The GridOut // class of deal.II can do that in // a number of different output // formats; here, we choose @@ -183,11 +183,11 @@ void second_grid () // marked for further // refinement, obviously). By // convention, we almost always - // use the names ``cell'' and - // ``endc'' for the iterator + // use the names cell and + // endc for the iterator // pointing to the present cell // and to the - // ``one-past-the-end'' + // one-past-the-end // iterator: Triangulation<2>::active_cell_iterator cell = triangulation.begin_active(), @@ -213,13 +213,13 @@ void second_grid () // by which we find out about // the number of vertices of // a cell. Using the - // ``GeometryInfo'' class, we + // GeometryInfo class, we // will later have an easier // time getting the program // to also run in 3d: we only // have to change all - // occurrences of ``@<2@>'' to - // ``@<3@>'', and do not have + // occurrences of @<2@> to + // @<3@>, and do not have // to audit our code for the // hidden appearance of magic // numbers like a 4 that diff --git a/deal.II/examples/step-10/step-10.cc b/deal.II/examples/step-10/step-10.cc index 72d4a3443c..f0ce11e6c2 100644 --- a/deal.II/examples/step-10/step-10.cc +++ b/deal.II/examples/step-10/step-10.cc @@ -29,7 +29,7 @@ #include // This is the only new one: in it, - // we declare the ``MappingQ'' class + // we declare the MappingQ class // which we will use for polynomial // mappings of arbitrary order: #include @@ -52,7 +52,7 @@ // range of the number of digits // which a double variable can hold, // we rather declare the reference - // value as a ``long double'' and + // value as a long double and // give it a number of extra digits: const long double pi = 3.141592653589793238462643; @@ -78,7 +78,7 @@ const long double pi = 3.141592653589793238462643; // generates a triangulation of a // circle (hyperball) and outputs the // Qp mapping of its cells for - // different values of ``p''. Then, + // different values of p. Then, // we refine the grid once and do so // again. template @@ -129,14 +129,14 @@ void gnuplot_output() // reasonable character sets // nowadays), but also assumes // that the increment - // ``refinement'' is less than + // refinement is less than // ten. This is therefore more // a quick hack if we know // exactly the values which the // increment can assume. A // better implementation would // use the - // ``std::istringstream'' + // std::istringstream // class to generate a name. std::string filename_base = "ball"; filename_base += '0'+refinement; @@ -150,7 +150,7 @@ void gnuplot_output() // For this, first set up // an object describing the // mapping. This is done - // using the ``MappingQ'' + // using the MappingQ // class, which takes as // argument to the // constructor the @@ -161,20 +161,20 @@ void gnuplot_output() // fact: if you want a // piecewise linear // mapping, then you could - // give a value of ``1'' to + // give a value of 1 to // the // constructor. However, // for linear mappings, so // many things can be // generated simpler that // there is another class, - // called ``MappingQ1'' + // called MappingQ1 // which does exactly the // same is if you gave an - // degree of ``1'' to the - // ``MappingQ'' class, but + // degree of 1 to the + // MappingQ class, but // does so significantly - // faster. ``MappingQ1'' is + // faster. MappingQ1 is // also the class that is // implicitly used // throughout the library @@ -240,7 +240,7 @@ void gnuplot_output() // object. This argument // has a default value, and // if no value is given a - // simple ``MappingQ1'' + // simple MappingQ1 // object is taken, which // we briefly described // above. This would then @@ -281,8 +281,8 @@ void gnuplot_output() // provides the corresponding `JxW' // values of each cell. (Note that // `JxW' is meant to abbreviate - // ``Jacobian determinant times - // weight''; since in numerical + // Jacobian determinant times + // weight; since in numerical // quadrature the two factors always // occur at the same places, we only // offer the combined quantity, @@ -391,7 +391,7 @@ void compute_pi_by_area () // tells the FEValues object // that it needs not compute // other quantities upon - // calling the ``reinit'' + // calling the reinit // function, thus saving // computation time. // @@ -489,12 +489,12 @@ void compute_pi_by_area () // long double) function // implemented. Note that // this also concerns the - // second call as the ``fabs'' - // function in the ``std'' + // second call as the fabs + // function in the std // namespace is overloaded on // its argument types, so there // exists a version taking - // and returning a ``long double'', + // and returning a long double, // in contrast to the global // namespace where only one such // function is declared (which diff --git a/deal.II/examples/step-11/step-11.cc b/deal.II/examples/step-11/step-11.cc index 4f1c21305e..c57edd2efe 100644 --- a/deal.II/examples/step-11/step-11.cc +++ b/deal.II/examples/step-11/step-11.cc @@ -40,7 +40,7 @@ // Just this one is new: it declares // a class - // ``CompressedSparsityPattern'', + // CompressedSparsityPattern, // which we will use and explain // further down below. #include @@ -63,9 +63,9 @@ // class looks rather the same, with // the sole structural difference // that the functions - // ``assemble_system'' now calls - // ``solve'' itself, and is thus - // called ``assemble_and_solve'', and + // assemble_system now calls + // solve itself, and is thus + // called assemble_and_solve, and // that the output function was // dropped since the solution // function is so boring that it is @@ -114,7 +114,7 @@ class LaplaceProblem // Construct such an object, by // initializing the variables. Here, // we use linear finite elements (the - // argument to the ``fe'' variable + // argument to the fe variable // denotes the polynomial degree), // and mappings of given order. Print // to screen what we are about to do. @@ -135,7 +135,7 @@ LaplaceProblem::LaplaceProblem (const unsigned int mapping_degree) : // The first task is to set up the // variables for this problem. This // includes generating a valid - // ``DoFHandler'' object, as well as + // DoFHandler object, as well as // the sparsity patterns for the // matrix, and the object // representing the constraints that @@ -162,9 +162,9 @@ void LaplaceProblem::setup_system () // this, we first want a list of // those nodes which are actually // at the boundary. The - // ``DoFTools'' class has a + // DoFTools class has a // function that returns an array - // of boolean values where ``true'' + // of boolean values where true // indicates that the node is at // the boundary. The second // argument denotes a mask @@ -174,7 +174,7 @@ void LaplaceProblem::setup_system () // have a scalar finite element // anyway, this mask consists of // only one entry, and its value - // must be ``true''. + // must be true. std::vector boundary_dofs (dof_handler.n_dofs(), false); DoFTools::extract_boundary_dofs (dof_handler, std::vector(1,true), boundary_dofs); @@ -188,8 +188,8 @@ void LaplaceProblem::setup_system () // first pick out the first // boundary node from this list. We // do that by searching for the - // first ``true'' value in the - // array (note that ``std::find'' + // first true value in the + // array (note that std::find // returns an iterator to this // element), and computing its // distance to the overall first @@ -209,7 +209,7 @@ void LaplaceProblem::setup_system () // computation on a once coarser // grid), then add this one line // constraining the - // ``first_boundary_dof'' to the + // first_boundary_dof to the // sum of other boundary DoFs each // with weight -1. Finally, close // the constraints object, i.e. do @@ -228,7 +228,7 @@ void LaplaceProblem::setup_system () // sparsity pattern. This is indeed // a tricky task here. Usually, we // just call - // ``DoFTools::make_sparsity_pattern'' + // DoFTools::make_sparsity_pattern // and condense the result using // the hanging node constraints. We // have no hanging node constraints @@ -237,7 +237,7 @@ void LaplaceProblem::setup_system () // we have this global constraint // on the boundary. This poses one // severe problem in this context: - // the ``SparsityPattern'' class + // the SparsityPattern class // wants us to state beforehand the // maximal number of entries per // row, either for all rows or for @@ -246,7 +246,7 @@ void LaplaceProblem::setup_system () // can tell you this number in case // you just have hanging node // constraints (namely - // ``DoFHandler::max_coupling_between_dofs''), + // DoFHandler::max_coupling_between_dofs), // but how is this for the present // case? The difficulty arises // because the elimination of the @@ -264,7 +264,7 @@ void LaplaceProblem::setup_system () // given that allows allocation of // only a reasonable amount of // memory, there is a class - // ``CompressedSparsityPattern'', + // CompressedSparsityPattern, // that can help us out here. It // does not require that we know in // advance how many entries rows @@ -285,7 +285,7 @@ void LaplaceProblem::setup_system () // initializing it with the // dimensions of the matrix and // calling another function - // ``DoFTools::make_sparsity_pattern'' + // DoFTools::make_sparsity_pattern // to get the sparsity pattern due // to the differential operator, // then condense it with the @@ -302,34 +302,34 @@ void LaplaceProblem::setup_system () // Finally, once we have the full // pattern, we can initialize an // object of type - // ``SparsityPattern'' from it and + // SparsityPattern from it and // in turn initialize the matrix // with it. Note that this is // actually necessary, since the - // ``CompressedSparsityPattern'' is + // CompressedSparsityPattern is // so inefficient compared to the - // ``SparsityPattern'' class due to + // SparsityPattern class due to // the more flexible data // structures it has to use, that // we can impossibly base the // sparse matrix class on it, but // rather need an object of type - // ``SparsityPattern'', which we + // SparsityPattern, which we // generate by copying from the // intermediate object. // // As a further sidenote, you will // notice that we do not explicitly - // have to ``compress'' the + // have to compress the // sparsity pattern here. This, of // course, is due to the fact that - // the ``copy_from'' function + // the copy_from function // generates a compressed object // right from the start, to which // you cannot add new entries - // anymore. The ``compress'' call + // anymore. The compress call // is therefore implicit in the - // ``copy_from'' call. + // copy_from call. sparsity_pattern.copy_from (csp); system_matrix.reinit (sparsity_pattern); } @@ -378,7 +378,7 @@ void LaplaceProblem::assemble_and_solve () // of a right hand side vector from // body or boundary forces. They // take the mapping object, the - // ``DoFHandler'' object + // DoFHandler object // representing the degrees of // freedom and the finite element // in use, a quadrature formula to @@ -418,8 +418,8 @@ void LaplaceProblem::assemble_and_solve () // operator. For this reason, there // are quite a large number of // variants of these functions in - // the ``MatrixCreator'' and - // ``MatrixTools'' + // the MatrixCreator and + // MatrixTools // classes. Whenever you need a // slightly different version of // these functions than the ones @@ -447,7 +447,7 @@ void LaplaceProblem::assemble_and_solve () // mentioned above) has to be at // least 2, this makes up for the // formula above computing - // ``gauss_degree''. + // gauss_degree. // // Since the generation of the body // force contributions to the right @@ -479,9 +479,9 @@ void LaplaceProblem::assemble_and_solve () // later add them together. The // reason we had to do so is that // the - // ``VectorTools::create_right_hand_side'' + // VectorTools::create_right_hand_side // and - // ``VectorTools::create_boundary_right_hand_side'' + // VectorTools::create_boundary_right_hand_side // functions first clear the output // vector, rather than adding up // their results to previous @@ -521,7 +521,7 @@ void LaplaceProblem::assemble_and_solve () // a function in the library that // does this, although in a // slightly non-obvious way: the - // ``VectorTools::integrate_difference'' + // VectorTools::integrate_difference // function integrates the norm of // the difference between a finite // element function and a @@ -537,7 +537,7 @@ void LaplaceProblem::assemble_and_solve () // (which we make us of here), and // the one which we have used in // previous examples which - // implicitly uses ``MappingQ1''. + // implicitly uses MappingQ1. // Also note that we take a // quadrature formula of one degree // higher, in order to avoid @@ -662,11 +662,11 @@ int main () // mappings of linear through // cubic mappings. Note that // since we need the object of - // type ``LaplaceProblem@<2@>'' + // type LaplaceProblem@<2@> // only once, we do not even // name it, but create an // unnamed such object and call - // the ``run'' function of it, + // the run function of it, // subsequent to which it is // immediately destroyed again. for (unsigned int mapping_degree=1; mapping_degree<=3; ++mapping_degree) diff --git a/deal.II/examples/step-12/step-12.cc b/deal.II/examples/step-12/step-12.cc index 755dcbaeb5..4dc5df5ce5 100644 --- a/deal.II/examples/step-12/step-12.cc +++ b/deal.II/examples/step-12/step-12.cc @@ -32,13 +32,13 @@ #include // This is the first new file. It - // declares the ``MappingQ1'' class + // declares the MappingQ1 class // that gives the standard bilinear // mapping. For bilinear mappings use // an object of this class rather // than an object of the - // ``MappingQ(1)'' class, as the - // ``MappingQ1'' class is optimized + // MappingQ(1) class, as the + // MappingQ1 class is optimized // due to the pre-knowledge of the // actual polynomial degree 1. #include @@ -50,7 +50,7 @@ // programs -- there isn't much user // interaction with finite element // classes at all: the are passed to - // ``DoFHandler'' and ``FEValues'' + // DoFHandler and FEValues // objects, and that is about it. #include // We are going to use the simplest @@ -69,7 +69,7 @@ // refinement indicator. #include // Finally we do some time comparison - // using the ``Timer'' class. + // using the Timer class. #include // And this again is C++: @@ -81,10 +81,10 @@ // // First we define the classes // representing the equation-specific - // functions. Both classes, ``RHS'' - // and ``BoundaryValues'', are - // derived from the ``Function'' - // class. Only the ``value_list'' + // functions. Both classes, RHS + // and BoundaryValues, are + // derived from the Function + // class. Only the value_list // function are implemented because // only lists of function values are // computed rather than single @@ -109,18 +109,18 @@ class BoundaryValues: public Function }; - // The class ``Beta'' represents the + // The class Beta represents the // vector valued flow field of the // linear transport equation and is - // not derived from the ``Function'' + // not derived from the Function // class as we prefer to get function - // values of type ``Point'' rather + // values of type Point rather // than of type - // ``Vector@''. This, because + // Vector@. This, because // there exist scalar products - // between ``Point'' and ``Point'' as - // well as between ``Point'' and - // ``Tensor'', simplifying terms like + // between Point and Point as + // well as between Point and + // Tensor, simplifying terms like // $\beta\cdot n$ and // $\beta\cdot\nabla v$. // @@ -142,7 +142,7 @@ class Beta // The implementation of the - // ``value_list'' functions of these + // value_list functions of these // classes are rather simple. For // simplicity the right hand side is // set to be zero but will be @@ -216,14 +216,14 @@ void BoundaryValues::value_list(const std::vector > &points, // Next we define the // equation-dependent and // DG-method-dependent class - // ``DGTransportEquation''. Its + // DGTransportEquation. Its // member functions were already // mentioned in the Introduction and // will be explained // below. Furthermore it includes // objects of the previously defined - // ``Beta'', ``RHS'' and - // ``BoundaryValues'' function + // Beta, RHS and + // BoundaryValues function // classes. template class DGTransportEquation @@ -268,15 +268,15 @@ DGTransportEquation::DGTransportEquation () // @sect4{Function: assemble_cell_term} // - // The ``assemble_cell_term'' + // The assemble_cell_term // function assembles the cell terms // of the discretization. - // ``ui_vi_matrix'' is a cell matrix, + // ui_vi_matrix is a cell matrix, // i.e. for a DG method of degree 1, // it is of size 4 times 4, and - // ``cell_vector'' is of size 4. + // cell_vector is of size 4. // When this function is invoked, - // ``fe_v'' is already reinit'ed with the + // fe_v is already reinit'ed with the // current cell before and includes // all shape values needed. template @@ -285,12 +285,12 @@ void DGTransportEquation::assemble_cell_term( FullMatrix &ui_vi_matrix, Vector &cell_vector) const { - // First we ask ``fe_v'' for the + // First we ask fe_v for the // quadrature weights, const std::vector &JxW = fe_v.get_JxW_values (); // Then the flow field beta and the - // ``rhs_function'' are evaluated at + // rhs_function are evaluated at // the quadrature points, std::vector > beta (fe_v.n_quadrature_points); std::vector rhs (fe_v.n_quadrature_points); @@ -317,10 +317,10 @@ void DGTransportEquation::assemble_cell_term( // @sect4{Function: assemble_boundary_term} // - // The ``assemble_boundary_term'' + // The assemble_boundary_term // function assembles the face terms // at boundary faces. When this - // function is invoked, ``fe_v'' is + // function is invoked, fe_v is // already reinit'ed with the current // cell and current face. Hence it // provides the shape values on that @@ -333,7 +333,7 @@ void DGTransportEquation::assemble_boundary_term( { // Again, as in the previous // function, we ask the - // ``FEValues'' object for the + // FEValues object for the // quadrature weights const std::vector &JxW = fe_v.get_JxW_values (); // but here also for the normals. @@ -380,7 +380,7 @@ void DGTransportEquation::assemble_boundary_term( // @sect4{Function: assemble_face_term1} // - // The ``assemble_face_term1'' + // The assemble_face_term1 // function assembles the face terms // corresponding to the first version // of the DG method, cf. above. For @@ -389,7 +389,7 @@ void DGTransportEquation::assemble_boundary_term( // all cell boundaries. // // When this function is invoked, - // ``fe_v'' and ``fe_v_neighbor'' are + // fe_v and fe_v_neighbor are // already reinit'ed with the current // cell and the neighoring cell, // respectively, as well as with the @@ -398,15 +398,15 @@ void DGTransportEquation::assemble_boundary_term( // on the face. // // In addition to the cell matrix - // ``ui_vi_matrix'' this function + // ui_vi_matrix this function // gets a new argument - // ``ue_vi_matrix'', that stores + // ue_vi_matrix, that stores // contributions to the system matrix // that are based on exterior values // of $u$ and interior values of - // $v$. Here we note that ``ue'' is - // the short notation for ``u - // exterior'' and represents $u_h^-$, + // $v$. Here we note that ue is + // the short notation for u + // exterior and represents $u_h^-$, // see the introduction. template void DGTransportEquation::assemble_face_term1( @@ -462,7 +462,7 @@ void DGTransportEquation::assemble_face_term1( // @sect4{Function: assemble_face_term2} // // Now we look at the - // ``assemble_face_term2'' function + // assemble_face_term2 function // that assembles the face terms // corresponding to the second // version of the DG method, @@ -470,8 +470,8 @@ void DGTransportEquation::assemble_face_term1( // terms are given as a sum of // integrals over all faces. Here we // need two additional cell matrices - // ``ui_ve_matrix'' and - // ``ue_ve_matrix'' that will store + // ui_ve_matrix and + // ue_ve_matrix that will store // contributions due to terms // involving ui and ve as well as ue // and ve. @@ -548,7 +548,7 @@ void DGTransportEquation::assemble_face_term2( // After these preparations, we // proceed with the main part of this // program. The main class, here - // called ``DGMethod'' is basically + // called DGMethod is basically // the main class of step-6. One of // the differences is that there's no // ConstraintMatrix object. This is, @@ -601,8 +601,8 @@ class DGMethod // solutions to the problems // corresponding to the two // different assembling routines - // ``assemble_system1'' and - // ``assemble_system2''; + // assemble_system1 and + // assemble_system2; Vector solution1; Vector solution2; Vector right_hand_side; @@ -677,51 +677,51 @@ void DGMethod::setup_system () // @sect4{Function: assemble_system1} // // We proceed with the - // ``assemble_system1'' function that + // assemble_system1 function that // implements the DG discretization // in its first version. This // function repeatedly calls the - // ``assemble_cell_term'', - // ``assemble_boundary_term'' and - // ``assemble_face_term1'' functions - // of the ``DGTransportEquation'' + // assemble_cell_term, + // assemble_boundary_term and + // assemble_face_term1 functions + // of the DGTransportEquation // object. The - // ``assemble_boundary_term'' covers + // assemble_boundary_term covers // the first case mentioned in the // introduction. // // 1. face is at boundary // // This function takes a - // ``FEFaceValues'' object as + // FEFaceValues object as // argument. In contrast to that - // ``assemble_face_term1'' - // takes two ``FEFaceValuesBase'' + // assemble_face_term1 + // takes two FEFaceValuesBase // objects; one for the shape // functions on the current cell and // the other for shape functions on // the neighboring cell under // consideration. Both objects are - // either of class ``FEFaceValues'' - // or of class ``FESubfaceValues'' + // either of class FEFaceValues + // or of class FESubfaceValues // (both derived from - // ``FEFaceValuesBase'') according to + // FEFaceValuesBase) according to // the remaining cases mentioned // in the introduction: // // 2. neighboring cell is finer - // (current cell: ``FESubfaceValues'', - // neighboring cell: ``FEFaceValues''); + // (current cell: FESubfaceValues, + // neighboring cell: FEFaceValues); // // 3. neighboring cell is of the same // refinement level (both, current // and neighboring cell: - // ``FEFaceValues''); + // FEFaceValues); // // 4. neighboring cell is coarser - // (current cell: ``FEFaceValues'', + // (current cell: FEFaceValues, // neighboring cell: - // ``FESubfaceValues''). + // FESubfaceValues). // // If we considered globally refined // meshes then only case 3 would @@ -738,9 +738,9 @@ void DGMethod::assemble_system1 () std::vector dofs_neighbor (dofs_per_cell); // First we create the - // ``update_flags'' for the - // ``FEValues'' and the - // ``FEFaceValues'' objects. + // update_flags for the + // FEValues and the + // FEFaceValues objects. const UpdateFlags update_flags = update_values | update_gradients | update_q_points @@ -764,27 +764,27 @@ void DGMethod::assemble_system1 () // vectors of the current cell. const UpdateFlags neighbor_face_update_flags = update_values; - // Then we create the ``FEValues'' + // Then we create the FEValues // object. Note, that since version // 3.2.0 of deal.II the constructor // of this class takes a - // ``Mapping'' object as first + // Mapping object as first // argument. Although the - // constructor without ``Mapping'' + // constructor without Mapping // argument is still supported it // is recommended to use the new // constructor. This reduces the // effect of `hidden magic' (the // old constructor implicitely - // assumes a ``MappingQ1'' mapping) + // assumes a MappingQ1 mapping) // and makes it easier to change // the mapping object later. FEValues fe_v ( mapping, fe, quadrature, update_flags); // Similarly we create the - // ``FEFaceValues'' and - // ``FESubfaceValues'' objects for + // FEFaceValues and + // FESubfaceValues objects for // both, the current and the // neighboring cell. Within the // following nested loop over all @@ -805,7 +805,7 @@ void DGMethod::assemble_system1 () // and vectors. Here we need two // cell matrices, both for face // terms that include test - // functions ``vi'' (internal shape + // functions vi (internal shape // functions, i.e. shape functions // of the current cell). To be more // precise, the first matrix will @@ -832,26 +832,26 @@ void DGMethod::assemble_system1 () for (;cell!=endc; ++cell) { // In the - // ``assemble_face_term1'' + // assemble_face_term1 // function contributions to // the cell matrices and the // cell vector are only // ADDED. Therefore on each // cell we need to reset the - // ``ui_vi_matrix'' and - // ``cell_vector'' to zero, + // ui_vi_matrix and + // cell_vector to zero, // before assembling the cell terms. ui_vi_matrix = 0; cell_vector = 0; - // Now we reinit the ``FEValues'' + // Now we reinit the FEValues // object for the current cell fe_v.reinit (cell); // and call the function // that assembles the cell // terms. The first argument is - // the ``FEValues'' that was + // the FEValues that was // previously reinit'ed on the // current cell. dg.assemble_cell_term(fe_v, @@ -872,7 +872,7 @@ void DGMethod::assemble_system1 () typename DoFHandler::face_iterator face=cell->face(face_no); // and clear the - // ``ue_vi_matrix'' on each + // ue_vi_matrix on each // face. ue_vi_matrix = 0; @@ -885,7 +885,7 @@ void DGMethod::assemble_system1 () if (face->at_boundary()) { // We reinit the - // ``FEFaceValues'' + // FEFaceValues // object to the // current face fe_v_face.reinit (cell, face_no); @@ -931,7 +931,7 @@ void DGMethod::assemble_system1 () // note that the // following part of // code will not work - // for ``dim==1''. + // for dim==1. if (face->has_children()) { // First we store @@ -944,7 +944,7 @@ void DGMethod::assemble_system1 () // neighbor-@>neighbor(neighbor2) // equals the // current cell - // ``cell''. + // cell. const unsigned int neighbor2= cell->neighbor_of_neighbor(face_no); @@ -957,7 +957,7 @@ void DGMethod::assemble_system1 () // and set the // cell // iterator - // ``neighbor_child'' + // neighbor_child // to the cell // placed // `behind' the @@ -986,12 +986,12 @@ void DGMethod::assemble_system1 () // We need to // reset the - // ``ue_vi_matrix'' + // ue_vi_matrix // on each // subface // because on // each subface - // the ``un'' + // the un // belong to // different // neighboring @@ -1005,7 +1005,7 @@ void DGMethod::assemble_system1 () // case (case // 2) we employ // the - // ``FESubfaceValues'' + // FESubfaceValues // of the // current // cell (here @@ -1037,7 +1037,7 @@ void DGMethod::assemble_system1 () // and // distribute - // ``ue_vi_matrix'' + // ue_vi_matrix // to the // system_matrix for (unsigned int i=0; i::assemble_system1 () system_matrix.add(dofs[i], dofs_neighbor[k], ue_vi_matrix(i,k)); } - // End of ``if - // (face-@>has_children())'' + // End of if + // (face-@>has_children()) } else { @@ -1073,7 +1073,7 @@ void DGMethod::assemble_system1 () // We reinit // the - // ``FEFaceValues'' + // FEFaceValues // of the // current and // neighboring @@ -1090,10 +1090,10 @@ void DGMethod::assemble_system1 () fe_v_face_neighbor, ui_vi_matrix, ue_vi_matrix); - // End of ``if + // End of if // (neighbor-@>level() // == - // cell-@>level())'' + // cell-@>level()) } else { @@ -1134,7 +1134,7 @@ void DGMethod::assemble_system1 () // Reinit the // appropriate - // ``FEFaceValues'' + // FEFaceValues // and assemble // the face // terms. @@ -1151,24 +1151,24 @@ void DGMethod::assemble_system1 () // Now we get the // dof indices of // the - // ``neighbor_child'' + // neighbor_child // cell, neighbor->get_dof_indices (dofs_neighbor); // and distribute the - // ``ue_vi_matrix''. + // ue_vi_matrix. for (unsigned int i=0; iface not at boundary: } // End of loop over all faces: } // Finally we distribute the - // ``ui_vi_matrix'' + // ui_vi_matrix for (unsigned int i=0; i::assemble_system1 () // @sect4{Function: assemble_system2} // // We proceed with the - // ``assemble_system2'' function that + // assemble_system2 function that // implements the DG discretization // in its second version. This // function is very similar to the - // ``assemble_system1'' + // assemble_system1 // function. Therefore, here we only // discuss the differences between // the two functions. This function // repeatedly calls the - // ``assemble_face_term2'' function + // assemble_face_term2 function // of the DGTransportEquation object, // that assembles the face terms // written as a sum of integrals over @@ -1236,7 +1236,7 @@ void DGMethod::assemble_system2 () const UpdateFlags neighbor_face_update_flags = update_values; // Here we do not need - // ``fe_v_face_neighbor'' as case 4 + // fe_v_face_neighbor as case 4 // does not occur. FEValues fe_v ( mapping, fe, quadrature, update_flags); @@ -1254,7 +1254,7 @@ void DGMethod::assemble_system2 () // Additionally we need the // following two cell matrices, // both for face term that include - // test function ``ve'' (external + // test function ve (external // shape functions, i.e. shape // functions of the neighboring // cell). To be more precise, the @@ -1469,15 +1469,15 @@ void DGMethod::solve (Vector &solution) // difference quotients including the // cell under consideration and its // neighbors. This is done by the - // ``DerivativeApproximation'' class + // DerivativeApproximation class // that computes the approximate // gradients in a way similar to the - // ``GradientEstimation'' described + // GradientEstimation described // in step-9 of this tutorial. In // fact, the - // ``DerivativeApproximation'' class + // DerivativeApproximation class // was developed following the - // ``GradientEstimation'' class of + // GradientEstimation class of // step-9. Relating to the // discussion in step-9, here we // consider $h^{1+d/2}|\nabla_h @@ -1491,7 +1491,7 @@ void DGMethod::solve (Vector &solution) template void DGMethod::refine_grid () { - // The ``DerivativeApproximation'' + // The DerivativeApproximation // class computes the gradients to // float precision. This is // sufficient as they are @@ -1567,14 +1567,14 @@ void DGMethod::output_results (const unsigned int cycle) const } - // The following ``run'' function is + // The following run function is // similar to previous examples. The // only difference is that the // problem is assembled and solved // twice on each refinement step; - // first by ``assemble_system1'' that + // first by assemble_system1 that // implements the first version and - // then by ``assemble_system2'' that + // then by assemble_system2 that // implements the second version of // writing the DG // discretization. Furthermore the @@ -1660,7 +1660,7 @@ void DGMethod::run () } } - // The following ``main'' function is + // The following main function is // similar to previous examples and // need not to be commented on. int main () diff --git a/deal.II/examples/step-13/step-13.cc b/deal.II/examples/step-13/step-13.cc index 4dab4ca04e..671c214b27 100644 --- a/deal.II/examples/step-13/step-13.cc +++ b/deal.II/examples/step-13/step-13.cc @@ -16,8 +16,8 @@ // a list of include files from the // library, and as usual they are in // the standard order which is - // ``base'' -- ``lac'' -- ``grid'' -- - // ``dofs'' -- ``fe'' -- ``numerics'' + // base -- lac -- grid -- + // dofs -- fe -- numerics // (as each of these categories // roughly builds upon previous // ones), then C++ standard headers: @@ -62,7 +62,7 @@ // equation. In fact, they can // evaluate every kind of solution, // as long as it is described by a - // ``DoFHandler'' object, and a + // DoFHandler object, and a // solution vector. We define them // here first, even before the // classes that actually generate the @@ -74,16 +74,16 @@ // From an abstract point of view, we // declare a pure base class // that provides an evaluation - // operator ``operator()'' which will + // operator operator() which will // do the evaluation of the solution // (whatever derived classes might - // consider an ``evaluation''). Since + // consider an evaluation). Since // this is the only real function of // this base class (except for some // bookkeeping machinery), one // usually terms such a class that - // only has an ``operator()'' a - // ``functor'' in C++ terminology, + // only has an operator() a + // functor in C++ terminology, // since it is used just like a // function object. // @@ -147,8 +147,8 @@ namespace Evaluation // Now for the abstract base class // of evaluation classes: its main // purpose is to declare a pure - // virtual function ``operator()'' - // taking a ``DoFHandler'' object, + // virtual function operator() + // taking a DoFHandler object, // and the solution vector. In // order to be able to use pointers // to this base class only, it also @@ -203,7 +203,7 @@ namespace Evaluation // like to extract a point value // from the solution, so the first // class does this in its - // ``operator()''. The actual point + // operator(). The actual point // is given to this class through // the constructor, as well as a // table object into which it will @@ -233,7 +233,7 @@ namespace Evaluation // In the step-9 example program, // we have already seen how such an // exception class can be declared, - // using the ``DeclExceptionN'' + // using the DeclExceptionN // macros. We use this mechanism // here again. // @@ -380,10 +380,10 @@ namespace Evaluation // beginning of the // function, for example // by a statement like - // ``Assert + // Assert // (dof_handler.get_fe().dofs_per_vertex // @> 0, - // ExcNotImplemented())'', + // ExcNotImplemented()), // which should make it // quite clear what is // going wrong when the @@ -395,7 +395,7 @@ namespace Evaluation // that that does not // hurt here, since the // statement - // ``cell-@>vertex_dof_index(vertex,0)'' + // cell-@>vertex_dof_index(vertex,0) // would fail if we asked // it to give us the DoF // index of a vertex if @@ -428,7 +428,7 @@ namespace Evaluation // solution there and the rest of // the computations were useless // anyway. So make sure through - // the ``AssertThrow'' macro + // the AssertThrow macro // already used in the step-9 // program that we have indeed // found this point. If this is @@ -436,7 +436,7 @@ namespace Evaluation // exception of the type that is // given to it as second // argument, but compared to a - // straightforward ``throw'' + // straightforward throw // statement, it fills the // exception object with a set of // additional information, for @@ -444,7 +444,7 @@ namespace Evaluation // line number where the // exception was generated, and // the condition that failed. If - // you have a ``catch'' clause in + // you have a catch clause in // your main function (as this // program has), you will catch // all exceptions that are not @@ -457,10 +457,10 @@ namespace Evaluation AssertThrow (evaluation_point_found, ExcEvaluationPointNotFound(evaluation_point)); // Note that we have used the - // ``Assert'' macro in other + // Assert macro in other // example programs as well. It // differed from the - // ``AssertThrow'' macro used + // AssertThrow macro used // here in that it simply aborts // the program, rather than // throwing an exception, and @@ -485,7 +485,7 @@ namespace Evaluation // program in debug mode, but // should be checked always, also // in production runs. Thus the - // use of the ``AssertThrow'' + // use of the AssertThrow // macro here. // Now, if we are sure that we @@ -502,12 +502,12 @@ namespace Evaluation // @sect4{Generating output} // A different, maybe slightly odd - // kind of ``evaluation'' of a + // kind of evaluation of a // solution is to output it to a // file in a graphical // format. Since in the evaluation // functions we are given a - // ``DoFHandler'' object and the + // DoFHandler object and the // solution vector, we have all we // need to do this, so we can do it // in an evaluation class. The @@ -532,9 +532,9 @@ namespace Evaluation // // Since this class which generates // the output is derived from the - // common ``EvaluationBase'' base + // common EvaluationBase base // class, its main interface is the - // ``operator()'' + // operator() // function. Furthermore, it has a // constructor taking a string that // will be used as the base part of @@ -555,21 +555,21 @@ namespace Evaluation // we write). // // Regarding the output format, the - // ``DataOutInterface'' class + // DataOutInterface class // (which is a base class of - // ``DataOut'' through which we + // DataOut through which we // will access its fields) provides // an enumeration field - // ``OutputFormat'', which lists + // OutputFormat, which lists // names for all supported output // formats. At the time of writing // of this program, the supported // graphics formats are represented - // by the enum values ``ucd'', - // ``gnuplot'', ``povray'', - // ``eps'', ``gmv'', ``tecplot'', - // ``tecplot_binary'', ``dx'', and - // ``vtk'', but this list will + // by the enum values ucd, + // gnuplot, povray, + // eps, gmv, tecplot, + // tecplot_binary, dx, and + // vtk, but this list will // certainly grow over time. Now, // within various functions of that // base class, you can use values @@ -578,10 +578,10 @@ namespace Evaluation // (for example the default suffix // used for files of each format), // and you can call a generic - // ``write'' function, which then + // write function, which then // branches to the - // ``write_gnuplot'', - // ``write_ucd'', etc functions + // write_gnuplot, + // write_ucd, etc functions // which we have used in previous // examples already, based on the // value of a second argument given @@ -636,14 +636,14 @@ namespace Evaluation // particularly interesting feature // over previous example programs // is the use of the - // ``DataOut::default_suffix'' + // DataOut::default_suffix // function, returning the usual // suffix for files of a given // format (e.g. ".eps" for // encapsulated postscript files, // ".gnuplot" for Gnuplot files), // and of the generic - // ``DataOut::write'' function with + // DataOut::write function with // a second argument, which // branches to the actual output // functions for the different @@ -652,14 +652,14 @@ namespace Evaluation // passed as second argument. // // Also note that we have to prefix - // ``this-@>'' to access a member + // this-@> to access a member // variable of the template // dependent base class. The reason // here, and further down in the // program is the same as the one // described in the step-7 example - // program (look for ``two-stage - // name lookup'' there). + // program (look for two-stage + // name lookup there). template void SolutionOutput::operator () (const DoFHandler &dof_handler, @@ -760,7 +760,7 @@ namespace LaplaceSolver // style used in Smalltalk or Java // programs, where all classes are // derived from entirely abstract - // classes ``Object'', even number + // classes Object, even number // representations. The author // admits that he does not // particularly like the use of @@ -805,7 +805,7 @@ namespace LaplaceSolver // have to make sure that the // triangulation exists until the // destructor exits. We do this by - // keeping a ``SmartPointer'' to + // keeping a SmartPointer to // this triangulation, which uses a // counter in the triangulation // class to denote the fact that @@ -826,10 +826,10 @@ namespace LaplaceSolver // by this we allow that derived // classes refine or coarsen the // triangulation within the - // ``refine_grid'' function. + // refine_grid function. // // Finally, we have a function - // ``n_dofs'' is only a tool for + // n_dofs is only a tool for // the driver functions to decide // whether we want to go on with // mesh refinement or not. It @@ -876,17 +876,17 @@ namespace LaplaceSolver // solving it, and calling the // postprocessor objects on the // solution. It implements the - // ``solve_problem'' and - // ``postprocess'' functions + // solve_problem and + // postprocess functions // declared in the base class. It // does not, however, implement the - // ``refine_grid'' method, as mesh + // refine_grid method, as mesh // refinement will be implemented // in a number of derived classes. // // It also declares a new abstract // virtual function, - // ``assemble_rhs'', that needs to + // assemble_rhs, that needs to // be overloaded in subclasses. The // reason is that we will implement // two different classes that will @@ -921,24 +921,24 @@ namespace LaplaceSolver // down to the base class's // constructor, or are stored and // used to generate a - // ``DoFHandler'' object + // DoFHandler object // later. Since finite elements and // quadrature formula should match, // it is also passed a quadrature // object. // - // The ``solve_problem'' sets up + // The solve_problem sets up // the data structures for the // actual solution, calls the // functions to assemble the linear // system, and solves it. // - // The ``postprocess'' function + // The postprocess function // finally takes an evaluation // object and applies it to the // computed solution. // - // The ``n_dofs'' function finally + // The n_dofs function finally // implements the pure virtual // function of the base class. template @@ -1045,7 +1045,7 @@ namespace LaplaceSolver // of the class. It does not do // much except store pointers to // the objects given, and generate - // ``DoFHandler'' object + // DoFHandler object // initialized with the given // pointer to a triangulation. This // causes the DoF handler to store @@ -1053,7 +1053,7 @@ namespace LaplaceSolver // already generate a finite // element numbering (we only ask // for that in the - // ``solve_problem'' function). + // solve_problem function). template Solver::Solver (Triangulation &triangulation, const FiniteElement &fe, @@ -1106,7 +1106,7 @@ namespace LaplaceSolver // As stated above, the - // ``postprocess'' function takes + // postprocess function takes // an evaluation object, and // applies it to the computed // solution. This function may be @@ -1122,7 +1122,7 @@ namespace LaplaceSolver } - // The ``n_dofs'' function should + // The n_dofs function should // be self-explanatory: template unsigned int @@ -1287,8 +1287,8 @@ namespace LaplaceSolver // overwriting their // respective // work. Previously, we have - // used the ``acquire'' and - // ``release'' functions of + // used the acquire and + // release functions of // the mutex to lock and // unlock the mutex, // respectively. While this @@ -1307,7 +1307,7 @@ namespace LaplaceSolver // in the middle of the // locked block, and forgets // that before we call - // ``return'', we also have + // return, we also have // to unlock the mutex. This // all is not be a problem // here, but we want to show @@ -1320,7 +1320,7 @@ namespace LaplaceSolver // mutex, and on running the // destructor unlocks it // again. This is called the - // ``scoped lock'' pattern + // scoped lock pattern // (apparently invented by // Doug Schmidt originally), // and it works because @@ -1328,7 +1328,7 @@ namespace LaplaceSolver // objects are also run when // we exit the function // either through a - // ``return'' statement, or + // return statement, or // when an exception is // raised. Thus, it is // guaranteed that the mutex @@ -1362,7 +1362,7 @@ namespace LaplaceSolver cell_matrix(i,j)); // Here, at the brace, the // current scope ends, so the - // ``lock'' variable goes out + // lock variable goes out // of existence and its // destructor the mutex is // unlocked. @@ -1391,7 +1391,7 @@ namespace LaplaceSolver // and do the second action in the // main thread. Since only one // thread is generated, we don't - // use the ``Threads::ThreadGroup'' + // use the Threads::ThreadGroup // class here, but rather use the // one created thread object // directly to wait for this @@ -1399,7 +1399,7 @@ namespace LaplaceSolver // // Note that taking up the address // of the - // ``DoFTools::make_hanging_node_constraints'' + // DoFTools::make_hanging_node_constraints // function is a little tricky, // since there are actually three // of them, one for each supported @@ -1407,7 +1407,7 @@ namespace LaplaceSolver // addresses of overloaded // functions is somewhat // complicated in C++, since the - // address-of operator ``&'' in + // address-of operator & in // that case returns more like a // set of values (the addresses of // all functions with that name), @@ -1428,9 +1428,9 @@ namespace LaplaceSolver // would like to have; for this, we // could use a cast, but for more // clarity, we assign it to a - // temporary ``mhnc_p'' (short for - // ``pointer to - // make_hanging_node_constraints'') + // temporary mhnc_p (short for + // pointer to + // make_hanging_node_constraints) // with the right type, and using // this pointer instead. template @@ -1453,7 +1453,7 @@ namespace LaplaceSolver DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); // Wait until the - // ``hanging_node_constraints'' + // hanging_node_constraints // object is fully set up, then // close it and use it to // condense the sparsity pattern: @@ -1523,13 +1523,13 @@ namespace LaplaceSolver // that denotes the right hand side // of the problem. A pointer to // this object is stored (again as - // a ``SmartPointer'', in order to + // a SmartPointer, in order to // make sure that the function // object is not deleted as long as // it is still used by this class). // // The only functional part of this - // class is the ``assemble_rhs'' + // class is the assemble_rhs // method that does what its name // suggests. template @@ -1566,7 +1566,7 @@ namespace LaplaceSolver - // ... as does the ``assemble_rhs'' + // ... as does the assemble_rhs // function. Since this is // explained in several of the // previous example programs, we @@ -1614,22 +1614,22 @@ namespace LaplaceSolver // By now, all functions of the // abstract base class except for - // the ``refine_grid'' function + // the refine_grid function // have been implemented. We will // now have two classes that // implement this function for the - // ``PrimalSolver'' class, one + // PrimalSolver class, one // doing global refinement, one a // form of local refinement. // // The first, doing global // refinement, is rather simple: // its main function just calls - // ``triangulation-@>refine_global - // (1);'', which does all the work. + // triangulation-@>refine_global + // (1);, which does all the work. // - // Note that since the ``Base'' - // base class of the ``Solver'' + // Note that since the Base + // base class of the Solver // class is virtual, we have to // declare a constructor that // initializes the immediate base @@ -1770,8 +1770,8 @@ namespace LaplaceSolver // exact solution the function // $u(x,y)=exp(x+sin(10y+5x^2))$. In more // than two dimensions, simply repeat - // the sine-factor with ``y'' - // replaced by ``z'' and so on. Given + // the sine-factor with y + // replaced by z and so on. Given // this, the following two classes // are probably straightforward from // the previous examples. @@ -1887,10 +1887,10 @@ run_simulation (LaplaceSolver::Base &solver, // your program). for (unsigned int step=0; true; ++step) { - // Then give the ``alive'' + // Then give the alive // indication for this // iteration. Note that the - // ``std::flush'' is needed to + // std::flush is needed to // have the text actually // appear on the screen, rather // than only in some buffer diff --git a/deal.II/examples/step-14/step-14.cc b/deal.II/examples/step-14/step-14.cc index 0d2c5a7166..855df1fadc 100644 --- a/deal.II/examples/step-14/step-14.cc +++ b/deal.II/examples/step-14/step-14.cc @@ -175,7 +175,7 @@ namespace Evaluation // from all adjacent cells. // // Given the interface of the - // ``PointValueEvaluation'' class, + // PointValueEvaluation class, // the declaration of this class // provides little surprise, and // neither does the constructor: @@ -264,10 +264,10 @@ namespace Evaluation // element fields at // certain points is done // through the - // ``FEValues'' class, so + // FEValues class, so // we use that. The // question is: the - // ``FEValues'' object + // FEValues object // needs to be a given a // quadrature formula and // can then compute the @@ -293,7 +293,7 @@ namespace Evaluation // above. // // Thus: initialize the - // ``FEValues'' object on + // FEValues object on // this cell, fe_values.reinit (cell); // and extract the @@ -385,7 +385,7 @@ namespace Evaluation // the grids generated. This again // can be done with one such // class. Its structure is analog - // to the ``SolutionOutput'' class + // to the SolutionOutput class // of the previous example program, // so we do not discuss it here in // more detail. Furthermore, @@ -453,11 +453,11 @@ namespace LaplaceSolver // This class is almost unchanged, // with the exception that it // declares two more functions: - // ``output_solution'' will be used + // output_solution will be used // to generate output files from // the actual solutions computed by // derived classes, and the - // ``set_refinement_cycle'' + // set_refinement_cycle // function by which the testing // framework sets the number of the // refinement cycle to a local @@ -511,7 +511,7 @@ namespace LaplaceSolver // @sect4{The Laplace Solver class} - // Likewise, the ``Solver'' class + // Likewise, the Solver class // is entirely unchanged and will // thus not be discussed. template @@ -761,13 +761,13 @@ namespace LaplaceSolver // @sect4{The PrimalSolver class} - // The ``PrimalSolver'' class is + // The PrimalSolver class is // also mostly unchanged except for // overloading the functions - // ``solve_problem'', ``n_dofs'', - // and ``postprocess'' of the base + // solve_problem, n_dofs, + // and postprocess of the base // class, and implementing the - // ``output_solution'' + // output_solution // function. These overloaded // functions do nothing particular // besides calling the functions of @@ -777,33 +777,33 @@ namespace LaplaceSolver // requires us to write such // functions for the following // scenario: Besides the - // ``PrimalSolver'' class, we will - // have a ``DualSolver'', both - // derived from ``Solver''. We will + // PrimalSolver class, we will + // have a DualSolver, both + // derived from Solver. We will // then have a final classes which // derived from these two, which // will then have two instances of - // the ``Solver'' class as its base + // the Solver class as its base // classes. If we want, for // example, the number of degrees // of freedom of the primal solver, // we would have to indicate this // like so: - // ``PrimalSolver@::n_dofs()''. + // PrimalSolver@::n_dofs(). // However, the compiler does not - // accept this since the ``n_dofs'' + // accept this since the n_dofs // function is actually from a base - // class of the ``PrimalSolver'' + // class of the PrimalSolver // class, so we have to inject the // name from the base to the // derived class using these // additional functions. // // Regarding the implementation of - // the ``output_solution'' + // the output_solution // function, we keep the - // ``GlobalRefinement'' and - // ``RefinementKelly'' classes in + // GlobalRefinement and + // RefinementKelly classes in // this program, and they can then // rely on the default // implementation of this function @@ -1205,10 +1205,10 @@ namespace LaplaceSolver // everything that describes a test // case: here, these are two // subclasses, one called - // ``BoundaryValues'' for the + // BoundaryValues for the // boundary values of the exact // solution, and one called - // ``RightHandSide'', and then a way + // RightHandSide, and then a way // to generate the coarse grid. Since // the solution of the previous // example program looked like curved @@ -1245,13 +1245,13 @@ namespace LaplaceSolver // right hand side by simply giving // the name of the outer class as a // template argument to a class which - // we call here ``Data::SetUp'', and + // we call here Data::SetUp, and // it then creates objects for the // inner classes. In this case, to // get all that characterizes the // curved ridge solution, we would // simply generate an instance of - // ``Data::SetUp@'', + // Data::SetUp@, // and everything we need to know // about the solution would be static // member variables and functions of @@ -1266,15 +1266,15 @@ namespace LaplaceSolver // addition by material properties, // Neumann values, different boundary // descriptors, etc. In that case, - // the ``SetUp'' class might consist + // the SetUp class might consist // of a dozen or more objects, and // each descriptor class (like the - // ``CurvedRidges'' class below) + // CurvedRidges class below) // would have to provide them. Then, // you will be happy to be able to // change from one set of data to // another by only changing the - // template argument to the ``SetUp'' + // template argument to the SetUp // class at one place, rather than at // many. // @@ -1290,7 +1290,7 @@ namespace LaplaceSolver // obvious way, see below, with // virtual abstract functions. It // forces us to introduce a second - // template parameter ``dim'' which + // template parameter dim which // we need for the base class (which // could be avoided using some // template magic, but we omit that), @@ -1300,17 +1300,17 @@ namespace LaplaceSolver // simple, you don't have to touch // the framework classes, only a // structure like the - // ``CurvedRidges'' one is needed. + // CurvedRidges one is needed. namespace Data { // @sect4{The SetUpBase and SetUp classes} // Based on the above description, - // the ``SetUpBase'' class then + // the SetUpBase class then // looks as follows. To allow using - // the ``SmartPointer'' class with + // the SmartPointer class with // this class, we derived from the - // ``Subscriptor'' class. + // Subscriptor class. template struct SetUpBase : public Subscriptor { @@ -1397,8 +1397,8 @@ namespace Data // The class that is used to // describe the boundary values and - // right hand side of the ``curved - // ridge'' problem already used in + // right hand side of the curved + // ridge problem already used in // the step-13 example program is // then like so: template @@ -1699,19 +1699,19 @@ namespace Data // why we have not chosen to // implement the classes implementing // a certain setup (like the - // ``CurvedRidges'' class) directly + // CurvedRidges class) directly // as classes derived from - // ``Data::SetUpBase''. Indeed, we + // Data::SetUpBase. Indeed, we // could have done very well so. The // only reason is that then we would // have to have member variables for // the solution and right hand side - // classes in the ``CurvedRidges'' + // classes in the CurvedRidges // class, as well as member functions // overloading the abstract functions // of the base class giving access to // these member variables. The - // ``SetUp'' class has the sole + // SetUp class has the sole // reason to relieve us from the need // to reiterate these member // variables and functions that would @@ -1728,21 +1728,21 @@ namespace Data // However, there might be good // reasons to actually implement // classes derived from - // ``Data::SetUpBase'', for example + // Data::SetUpBase, for example // if the solution or right hand side // classes require constructors that // take arguments, which the - // ``Data::SetUpBase'' class cannot + // Data::SetUpBase class cannot // provide. In that case, subclassing // is a worthwhile strategy. Other // possibilities for special cases // are to derive from - // ``Data::SetUp@'' where - // ``SomeSetUp'' denotes a class, or + // Data::SetUp@ where + // SomeSetUp denotes a class, or // even to explicitly specialize - // ``Data::SetUp@''. The + // Data::SetUp@. The // latter allows to transparently use - // the way the ``SetUp'' class is + // the way the SetUp class is // used for other set-ups, but with // special actions taken for special // arguments. @@ -1757,11 +1757,11 @@ namespace Data // here) was small, and the number of // test cases was small as well. One // then starts out by handcoding them - // into a number of ``switch'' + // into a number of switch // statements. Over time, projects // grow, and so does the number of // test cases. The number of - // ``switch'' statements grows with + // switch statements grows with // that, and their length as well, // and one starts to find ways to // consider impossible examples where @@ -1993,7 +1993,7 @@ namespace DualFunctional // average over all cells that // surround this point. The // question which cells - // ``surrounds'' the evaluation + // surrounds the evaluation // point is made dependent on the // mesh width by including those // cells for which the distance of @@ -2021,7 +2021,7 @@ namespace DualFunctional // to zero: rhs.reinit (dof_handler.n_dofs()); - // Initialize a ``FEValues'' + // Initialize a FEValues // object with a quadrature // formula, have abbreviations // for the number of quadrature @@ -2065,7 +2065,7 @@ namespace DualFunctional { // If we have found such a // cell, then initialize - // the ``FEValues'' object + // the FEValues object // and integrate the // x-component of the // gradient of each shape @@ -2125,15 +2125,15 @@ namespace LaplaceSolver // @sect4{The DualSolver class} // In the same way as the - // ``PrimalSolver'' class above, we + // PrimalSolver class above, we // now implement a - // ``DualSolver''. It has all the + // DualSolver. It has all the // same features, the only // difference is that it does not // take a function object denoting // a right hand side object, but // now takes a - // ``DualFunctionalBase'' object + // DualFunctionalBase object // that will assemble the right // hand side vector of the dual // problem. The rest of the class @@ -2144,12 +2144,12 @@ namespace LaplaceSolver // triangulation, but different // discretizations, it now becomes // clear why we have made the - // ``Base'' class a virtual one: + // Base class a virtual one: // since the final class will be // derived from both - // ``PrimalSolver'' as well as - // ``DualSolver'', it would have - // two ``Base'' instances, would we + // PrimalSolver as well as + // DualSolver, it would have + // two Base instances, would we // not have marked the inheritance // as virtual. Since in many // applications the base class @@ -2303,11 +2303,11 @@ namespace LaplaceSolver // In the private section, we // have two functions that are // used to call the - // ``solve_problem'' functions + // solve_problem functions // of the primal and dual base // classes. These two functions // will be called in parallel - // by the ``solve_problem'' + // by the solve_problem // function of this class. void solve_primal_problem (); void solve_dual_problem (); @@ -2354,8 +2354,8 @@ namespace LaplaceSolver // error estimates on cells and // faces, we need a number of // helper objects, such as - // ``FEValues'' and - // ``FEFaceValues'' functions, + // FEValues and + // FEFaceValues functions, // but also temporary objects // storing the values and // gradients of primal and dual @@ -2526,9 +2526,9 @@ namespace LaplaceSolver // In the implementation of this // class, we first have the - // constructors of the ``CellData'' - // and ``FaceData'' member classes, - // and the ``WeightedResidual'' + // constructors of the CellData + // and FaceData member classes, + // and the WeightedResidual // constructor. They only // initialize fields to their // correct lengths, so we do not @@ -2662,7 +2662,7 @@ namespace LaplaceSolver // Now, it is becoming more - // interesting: the ``refine_grid'' + // interesting: the refine_grid // function asks the error // estimator to compute the // cell-wise error indicators, then @@ -2708,13 +2708,13 @@ namespace LaplaceSolver // Since we want to output both the // primal and the dual solution, we - // overload the ``output_solution'' + // overload the output_solution // function. The only interesting // feature of this function is that // the primal and dual solutions // are defined on different finite // element spaces, which is not the - // format the ``DataOut'' class + // format the DataOut class // expects. Thus, we have to // transfer them to a common finite // element space. Since we want the @@ -2725,7 +2725,7 @@ namespace LaplaceSolver // primal space. For the // interpolation, there is a // library function, that takes a - // ``ConstraintMatrix'' object + // ConstraintMatrix object // including the hanging node // constraints. The rest is // standard. @@ -2734,17 +2734,17 @@ namespace LaplaceSolver // work-around worth mentioning: in // this function, as in a couple of // following ones, we have to - // access the ``DoFHandler'' + // access the DoFHandler // objects and solutions of both // the primal as well as of the // dual solver. Since these are - // members of the ``Solver'' base + // members of the Solver base // class which exists twice in the // class hierarchy leading to the // present class (once as base - // class of the ``PrimalSolver'' + // class of the PrimalSolver // class, once as base class of the - // ``DualSolver'' class), we have + // DualSolver class), we have // to disambiguate accesses to them // by telling the compiler a member // of which of these two instances @@ -2754,11 +2754,11 @@ namespace LaplaceSolver // through the class hierarchy // which disambiguates the base // class, for example writing - // ``PrimalSolver::dof_handler'' to + // PrimalSolver::dof_handler to // denote the member variable - // ``dof_handler'' from the - // ``Solver'' base class of the - // ``PrimalSolver'' + // dof_handler from the + // Solver base class of the + // PrimalSolver // class. Unfortunately, this // confuses gcc's version 2.96 (a // version that was intended as a @@ -2771,21 +2771,21 @@ namespace LaplaceSolver // Thus, we have to work around // this problem. We do this by // introducing references to the - // ``PrimalSolver'' and - // ``DualSolver'' components of the - // ``WeightedResidual'' object at + // PrimalSolver and + // DualSolver components of the + // WeightedResidual object at // the beginning of the // function. Since each of these // has an unambiguous base class - // ``Solver'', we can access the + // Solver, we can access the // member variables we want through // these references. However, we // are now accessing protected // member variables of these // classes through a pointer other - // than the ``this'' pointer (in + // than the this pointer (in // fact, this is of course the - // ``this'' pointer, but not + // this pointer, but not // explicitly). This finally is the // reason why we had to declare the // present class a friend of the @@ -2813,7 +2813,7 @@ namespace LaplaceSolver // Add the data vectors for which // we want output. Add them both, - // the ``DataOut'' functions can + // the DataOut functions can // handle as many data vectors as // you wish to write to output: data_out.add_data_vector (primal_solver.solution, @@ -2881,7 +2881,7 @@ namespace LaplaceSolver // the finite element space in // which we have solved the dual // problem: But, again as in the - // ``WeightedResidual::output_solution'' + // WeightedResidual::output_solution // function we first need to // create a ConstraintMatrix // including the hanging node @@ -2905,7 +2905,7 @@ namespace LaplaceSolver // element space of the primal // solution and subtracting it // from z: use the - // ``interpolate_difference'' + // interpolate_difference // function, that gives (z-I_hz) // in the element space of the // dual solution. @@ -3125,7 +3125,7 @@ namespace LaplaceSolver // residual contributions of // this cell, and put them // into the - // ``error_indicators'' + // error_indicators // variable: integrate_over_cell (cell, cell_index, primal_solution, @@ -3340,7 +3340,7 @@ namespace LaplaceSolver // finite element field on the // present cell. For this, // initialize the - // ``FEFaceValues'' object + // FEFaceValues object // corresponding to this side of // the face, and extract the // gradients using that @@ -3371,19 +3371,19 @@ namespace LaplaceSolver // to find out with which face of // the neighboring cell we have // to work, i.e. the - // ``home-many''the neighbor the + // home-manythe neighbor the // present cell is of the cell // behind the present face. For // this, there is a function, and // we put the result into a // variable with the name - // ``neighbor_neighbor'': + // neighbor_neighbor: const unsigned int neighbor_neighbor = cell->neighbor_of_neighbor (face_no); // Then define an abbreviation // for the neigbor cell, // initialize the - // ``FEFaceValues'' object on + // FEFaceValues object on // that cell, and extract the // gradients on that cell: const active_cell_iterator neighbor = cell->neighbor(face_no); @@ -3509,7 +3509,7 @@ namespace LaplaceSolver // were not the case, then // there would be either a // bug in the - // ``neighbor_neighbor'' + // neighbor_neighbor // function called above, or // -- worse -- some function // in the library did not @@ -3702,7 +3702,7 @@ struct Framework // values, and subsequently a // variable of that type. It // will default to - // ``dual_weighted_error_estimator''. + // dual_weighted_error_estimator. enum RefinementCriterion { dual_weighted_error_estimator, global_refinement, @@ -3731,13 +3731,13 @@ struct Framework // Next to last, a function // that is used as a weight // to the - // ``RefinementWeightedKelly'' + // RefinementWeightedKelly // class. The default value // of this pointer is zero, // but you have to set it to // some other value if you // want to use the - // ``weighted_kelly_indicator'' + // weighted_kelly_indicator // refinement criterion. SmartPointer > kelly_weight; @@ -3948,9 +3948,9 @@ int main () descriptor.refinement_criterion = Framework::ProblemDescription::dual_weighted_error_estimator; // Here, we could as well have - // used ``global_refinement'' + // used global_refinement // or - // ``weighted_kelly_indicator''. Note + // weighted_kelly_indicator. Note // that the information given // about dual finite elements, // dual functional, etc is only @@ -3973,9 +3973,9 @@ int main () // hand side. These are // prepackaged in classes. We // take here the description of - // ``Exercise_2_3'', but you + // Exercise_2_3, but you // can also use - // ``CurvedRidges@'': + // CurvedRidges@: descriptor.data = new Data::SetUp,dim> (); // Next set first a dual @@ -3986,12 +3986,12 @@ int main () // value at an // evaluation point, // represented by the classes - // ``PointValueEvaluation'' + // PointValueEvaluation // in the namespaces of // evaluation and dual // functional classes. You can // also set the - // ``PointXDerivativeEvaluation'' + // PointXDerivativeEvaluation // classes for the x-derivative // instead of the value // at the evaluation point. diff --git a/deal.II/examples/step-15/step-15.cc b/deal.II/examples/step-15/step-15.cc index d602acd41e..912a0d8bae 100644 --- a/deal.II/examples/step-15/step-15.cc +++ b/deal.II/examples/step-15/step-15.cc @@ -54,7 +54,7 @@ // The first thing we have here is a helper // function that computes an even power $|v|^n$ // of a vector $v$, by evaluating - // $(v\cdot v)^{n/2}. We need this in the + // $(v\cdot v)^{n/2}$. We need this in the // computations below where we do not want to // dwell on the fact that the gradient of the // solution is actually a scalar in the 1d @@ -69,10 +69,10 @@ // is obvious, note the assertion at the // beginning of the function body, which // makes sure that the exponent is indeed an - // even number (here, we use that ``n/2'' is + // even number (here, we use that n/2 is // computed in integer arithmetic, i.e. any // remainder of the division is - // lost). ``ExcMessage'' is a pre-defined + // lost). ExcMessage is a pre-defined // exception class that takes a string // argument explaining what goes wrong. It is // a simpler way to declare exceptions than @@ -82,7 +82,7 @@ // exception class, we lose the ability to // attach additional information at run-time // to the exception message, such as the - // value of the variable ``n''. By following + // value of the variable n. By following // the way explained in above example // programs, adding this feature is simple, // though. @@ -129,13 +129,13 @@ class InitializationValues : public Function<1> // So here comes the function that implements - // the function object. The ``base'' value is - // $x^{1/3}$, while ``random'' is a random + // the function object. The base value is + // $x^{1/3}$, while random is a random // number between -1 and 1 (note that - // ``rand()'' returns a random integer value - // between zero and ``RAND_MAX''; to convert + // rand() returns a random integer value + // between zero and RAND_MAX; to convert // it to a floating point value between 0 and - // 2, we have to divide by ``RAND_MAX'' and + // 2, we have to divide by RAND_MAX and // multiply by two -- note that the first // multiplication has to happen in floating // point arithmetic, so that the division is @@ -161,7 +161,7 @@ double InitializationValues::value (const Point<1> &p, // class. As in most of the previous example // programs, the public interface of the // class consists only of a constructor and a - // ``run'' function that does the actual + // run function that does the actual // work. The constructor takes an additional // argument that indicates the number of the // run we are presently performing. This @@ -174,13 +174,13 @@ double InitializationValues::value (const Point<1> &p, // the computations, doing one nonlinear // step, refineming the mesh, doing a line // search for step length computations, - // etc. The ``energy'' function computes the + // etc. The energy function computes the // value of the optimization functional on an // arbitrary finite element function with - // nodal values given on the ``DoFHandler'' + // nodal values given on the DoFHandler // given as an argument. Since it does not // depend on the state of this object, we - // declare this function as ``static''. + // declare this function as static. // // The member variables of this class are // what we have seen before, and the @@ -262,7 +262,7 @@ void MinimizationProblem<1>::initialize_solution () // size to the vector, and use library // function that takes a function object, // and interpolates the given vector living - // on a ``DoFHandler'' to this function + // on a DoFHandler to this function // object: present_solution.reinit (dof_handler.n_dofs()); VectorTools::interpolate (dof_handler, @@ -272,7 +272,7 @@ void MinimizationProblem<1>::initialize_solution () // Then we still have to make sure that we // get the boundary values right. This // could have been done inside the - // ``InitializationValues'' class, but it + // InitializationValues class, but it // is instructive to see how it can also be // done, in particular since it is so // simple in 1d. First, start out with an @@ -305,7 +305,7 @@ void MinimizationProblem<1>::initialize_solution () // cell to zero. Note that the zeroth // vertex is the left one, and that zero is // the only valid second argument to the - // call to ``vertex_dof_index'', since we + // call to vertex_dof_index, since we // have a scalar finite element; thus, // there is only a single component. present_solution(cell->vertex_dof_index(0,0)) = 0; @@ -377,7 +377,7 @@ void MinimizationProblem::assemble_step () matrix.reinit (sparsity_pattern); residual.reinit (dof_handler.n_dofs()); - // Then we initialize a ``FEValues'' object + // Then we initialize a FEValues object // with a 4-point Gauss quadrature // formula. This object will be used to // compute the values and gradients of the @@ -387,17 +387,17 @@ void MinimizationProblem::assemble_step () // nonlinear step as outlined in the // introduction to this example program. In // order to compute values and gradients, - // we need to pass the ``update_values'' - // and ``update_gradients'' flags to the + // we need to pass the update_values + // and update_gradients flags to the // constructor, and the - // ``update_JxW_values'' flag for the + // update_JxW_values flag for the // Jacobian times the weight at a // quadrature point. In addition, we need // to have the coordinate values of each // quadrature point in real space for the // $x-u^3$ terms; to get these from the - // ``FEValues'' object, we need to pass it - // the ``update_q_points'' flag. + // FEValues object, we need to pass it + // the update_q_points flag. // // It is a simple calculation to figure out // that for linear elements, the integrals @@ -439,7 +439,7 @@ void MinimizationProblem::assemble_step () // therefore need to have the values and // gradients of the previous solution at // the quadrature points. We will get them - // from the ``FEValues'' object above, and + // from the FEValues object above, and // will put them into the following two // variables: std::vector local_solution_values (n_q_points); @@ -467,13 +467,13 @@ void MinimizationProblem::assemble_step () // the previous solution at the // quadrature points. To get them, we // don't actually have to do much, - // except for giving the ``FEValues'' + // except for giving the FEValues // object the global node vector from // which to compute this data, and a // reference to the objects into which // to put them. After the calls, the - // ``local_solution_values'' and - // ``local_solution_values'' variables + // local_solution_values and + // local_solution_values variables // will contain values and gradients // for each of the quadrature points on // this cell. @@ -605,7 +605,7 @@ void MinimizationProblem::assemble_step () // this program is ever going to be run in // higher dimensions, then we should only // evaluate for indicator zero, which is - // why we have placed the ``if'' statement + // why we have placed the if statement // in front of the second function call. // // Note that we need zero boundary @@ -690,17 +690,17 @@ MinimizationProblem::line_search (const Vector &update) const for (unsigned int step=0; step<5; ++step) { // At the present location, which is - // ``present_solution+alpha*update'', + // present_solution+alpha*update, // evaluate the energy tmp = present_solution; tmp.add (alpha, update); const double f_a = energy (dof_handler, tmp); // Then determine a finite difference - // step length ``dalpha'', and also + // step length dalpha, and also // evaluate the energy functional at - // positions ``alpha+dalpha'' and - // ``alpha-dalpha'' along the search + // positions alpha+dalpha and + // alpha-dalpha along the search // direction: const double dalpha = (alpha != 0 ? alpha/100 : 0.01); @@ -847,7 +847,7 @@ MinimizationProblem::output_results () const // works in 1d. However, to make later // extension to higher space dimensions // simpler, we define a constant integer - // ``dim'' at the beginning of the function; + // dim at the beginning of the function; // by using this constant as template // argument in all places, we are actually // able to write most of the code as if it @@ -886,7 +886,7 @@ void MinimizationProblem<1>::refine_grid () // need to evaluate the gradient on the // neighbor cells. To avoid some of the // work needed to reinitialize a - // ``FEValues'' object on a cell, we define + // FEValues object on a cell, we define // another such object here that we will // only use for the neighbor cells. The // data we need from the side of the @@ -908,14 +908,14 @@ void MinimizationProblem<1>::refine_grid () // over all cells. Since we need to write // the result for each cell into // consecutive elements of a vector, we - // also keep a running index ``cell_index'' + // also keep a running index cell_index // that we increase with each cell treated. DoFHandler::active_cell_iterator cell = dof_handler.begin_active (), endc = dof_handler.end (); for (unsigned int cell_index = 0; cell!=endc; ++cell, ++cell_index) { - // After initializing the ``FEValues'' + // After initializing the FEValues // object on each cell, use it to // evaluate solution and first and // second derivatives of it at the @@ -962,7 +962,7 @@ void MinimizationProblem<1>::refine_grid () // The next step is to evaluate the // jump terms. To make computations // somewhat simpler (and to free up the - // ``local_*'' variables for use on + // local_* variables for use on // neighboring elements), we define // some convenience variables for the // positions of the left and right cell @@ -981,7 +981,7 @@ void MinimizationProblem<1>::refine_grid () // actually check for this. If this // would not be the case, an exception // of the (predefined) class - // ``ExcInternalError'' would be + // ExcInternalError would be // thrown. Of course, this does not // happen in this program, but it shows // a way of defensive coding: if you @@ -991,7 +991,7 @@ void MinimizationProblem<1>::refine_grid () // the library: the quadrature classes // do not promise any particular order // of their quadrature points, so the - // ``QTrapez'' class could in principle + // QTrapez class could in principle // change the order of its two // evaluation points. In that case, // your code would tell you that @@ -1007,12 +1007,12 @@ void MinimizationProblem<1>::refine_grid () // really does what it is hoped to do.) // // Given that we are now sure that - // ``x_left'' and ``x_right'', + // x_left and x_right, // extracted from the zeroth and first // quadrature point, are indeed the // left and right vertex of the cell, // we can also be sure that the values - // we extract for ``u_left'' et al. are + // we extract for u_left et al. are // the ones we expect them to be, since // the order of these values must of // course match the order of the @@ -1064,25 +1064,25 @@ void MinimizationProblem<1>::refine_grid () // cells may have totally // independent refinement // levels. Thus, we really need the - // ``while'' loop, not only an - // ``if'' clause. + // while loop, not only an + // if clause. DoFHandler::cell_iterator left_neighbor = cell->neighbor(0); while (left_neighbor->has_children()) left_neighbor = left_neighbor->child(1); // With the so-found neighbor, // initialize the second - // ``FEValues'' object to it, + // FEValues object to it, // extract the gradients of the // solution there, and from this // get the gradient at the // interface (this is the first - // element of ``local_gradients'', + // element of local_gradients, // since the right end point of the // neighbor cell has index 1) as a // scalar value (this is the zeroth // component of - // ``local_gradients[1]''. + // local_gradients[1]. neighbor_fe_values.reinit (left_neighbor); neighbor_fe_values.get_function_grads (present_solution, local_gradients); @@ -1126,7 +1126,7 @@ void MinimizationProblem<1>::refine_grid () // examples, however, we would like to // transfer the solution vector from the // old to the new grid. This is what the - // ``SolutionTransfer'' class is good for, + // SolutionTransfer class is good for, // but it requires some preliminary // work. First, we need to tag the cells // that we want to refine or coarsen, as @@ -1148,7 +1148,7 @@ void MinimizationProblem<1>::refine_grid () // situations, the library will silently // also have to refine the neighbor cell // once. It does so by calling the - // ``Triangulation@::prepare_coarsening_and_refinement'' + // Triangulation@::prepare_coarsening_and_refinement // function before actually doing the // refinement and coarsening. This function // flags a set of additional cells for @@ -1159,15 +1159,15 @@ void MinimizationProblem<1>::refine_grid () // this function are exactly the ones that // will actually be refined or // coarsened. Since the - // ``SolutionTransfer'' class needs this + // SolutionTransfer class needs this // information in order to store the data // from the old mesh and transfer to the // new one. triangulation.prepare_coarsening_and_refinement(); // With this out of the way, we initialize - // a ``SolutionTransfer'' object with the - // present ``DoFHandler'' and attach the + // a SolutionTransfer object with the + // present DoFHandler and attach the // solution vector to it: SolutionTransfer solution_transfer(dof_handler); solution_transfer.prepare_for_coarsening_and_refinement (present_solution); @@ -1180,7 +1180,7 @@ void MinimizationProblem<1>::refine_grid () // Finally, we retrieve the old solution // interpolated to the new mesh. Since the - // ``SolutionTransfer'' function does not + // SolutionTransfer function does not // actually store the values of the old // solution, but rather indices, we need to // preserve the old solution vector until @@ -1197,7 +1197,7 @@ void MinimizationProblem<1>::refine_grid () // actually unnecessary in 1d, but // necessary for higher space dimensions, // so we show it anyway: the result of what - // the ``SolutionTransfer'' class provides + // the SolutionTransfer class provides // is a vector that is interpolated from // the old to the new mesh. Unfortunately, // it does not necessarily have the right @@ -1211,9 +1211,9 @@ void MinimizationProblem<1>::refine_grid () hanging_node_constraints.close (); hanging_node_constraints.distribute (present_solution); // This is wasteful, since we create a - // ``ConstraintMatrix'' object that will be + // ConstraintMatrix object that will be // recreated again in the next call to - // ``setup_system_on_mesh'' immediately + // setup_system_on_mesh immediately // afterwards. A more efficient // implementation would make sure that it // is created only once. We don't care so @@ -1232,7 +1232,7 @@ void MinimizationProblem<1>::refine_grid () // computes the energy of a nodal vector in // the functional considered in this example // program. Its idea is simple: take a nodal - // vector and the ``DoFHandler'' object it is + // vector and the DoFHandler object it is // living on, then loop over all cells and // add up the local contributions to the // energy: @@ -1242,7 +1242,7 @@ MinimizationProblem::energy (const DoFHandler &dof_handler, const Vector &function) { // First define the quadrature formula and - // a ``FEValues'' object with which to + // a FEValues object with which to // compute the values of the input function // at the quadrature points. Note again // that the integrand is a polynomial of @@ -1273,7 +1273,7 @@ MinimizationProblem::energy (const DoFHandler &dof_handler, for (; cell!=endc; ++cell) { // On each cell, initialize the - // ``FEValues'' object, and extract + // FEValues object, and extract // values and gradients of the given // function: fe_values.reinit (cell); @@ -1302,7 +1302,7 @@ MinimizationProblem::energy (const DoFHandler &dof_handler, // So here is the driver function, - // ``run()''. It generate a coarse mesh, + // run(). It generate a coarse mesh, // refines it a couple of times, and // initializes the starting values. It then // goes into a loop in which we first set up @@ -1350,7 +1350,7 @@ void MinimizationProblem::run () } - // Finally: ``main()''. This function does + // Finally: main(). This function does // what all its counterparts in previous // examples already did, i.e. create an // object of the main class, and hand off diff --git a/deal.II/examples/step-17/step-17.cc b/deal.II/examples/step-17/step-17.cc index b16e099399..65969866ca 100644 --- a/deal.II/examples/step-17/step-17.cc +++ b/deal.II/examples/step-17/step-17.cc @@ -42,8 +42,8 @@ // need particularly for this example // program and that weren't in // step-8. First, we replace the - // standard output ``std::cout'' by a - // new stream ``pcout'' which is used + // standard output std::cout by a + // new stream pcout which is used // in parallel computations for // generating output only on one of // the MPI processes. @@ -83,9 +83,9 @@ // for partitioning our meshes so that they // can be efficiently distributed across an // MPI network. The partitioning algorithm is - // implemented in the ``GridTools'' class, + // implemented in the GridTools class, // and we need an additional include file for - // a function in ``DoFRenumbering'' that + // a function in DoFRenumbering that // allows to sort the indices associated with // degrees of freedom so that they are // numbered according to the subdomain they @@ -106,12 +106,12 @@ // copied verbatim from step-8, so we only // comment on the few things that are // different. There is one (cosmetic) change - // in that we let ``solve'' return a value, + // in that we let solve return a value, // namely the number of iterations it took to // converge, so that we can output this to // the screen at the appropriate place. In // addition, we introduce a stream-like - // variable ``pcout'', explained below: + // variable pcout, explained below: template class ElasticProblem { @@ -135,12 +135,12 @@ class ElasticProblem // to only have one process output // everything once, for example the one // with process number - // zero. ``ConditionalOStream'' does + // zero. ConditionalOStream does // exactly this: it acts as if it were a // stream, but only forwards to a real, // underlying stream if a flag is set. By // setting this condition to - // ``this_mpi_process==0'', we make sure + // this_mpi_process==0, we make sure // that output is only generated from the // first process and that we don't get // the same lines of output over and over @@ -148,8 +148,8 @@ class ElasticProblem // // With this simple trick, we make sure // that we don't have to guard each and - // every write to ``std::cout'' by a - // prefixed ``if(this_mpi_process==0)''. + // every write to std::cout by a + // prefixed if(this_mpi_process==0). ConditionalOStream pcout; // The next few variables are taken @@ -171,10 +171,10 @@ class ElasticProblem // fact that we use the parallel versions // is denoted the fact that we use the // classes from the - // ``PETScWrappers::MPI'' namespace; + // PETScWrappers::MPI namespace; // sequential versions of these classes - // are in the ``PETScWrappers'' - // namespace, i.e. without the ``MPI'' + // are in the PETScWrappers + // namespace, i.e. without the MPI // part). Note also that we do not use a // separate sparsity pattern, since PETSc // manages that as part of its matrix @@ -191,7 +191,7 @@ class ElasticProblem // computations. Note that if this is a // sequential job without support by MPI, // then PETSc provides some dummy type - // for ``MPI_Comm'', so we do not have to + // for MPI_Comm, so we do not have to // care here whether the job is really a // parallel one: MPI_Comm mpi_communicator; @@ -199,19 +199,19 @@ class ElasticProblem // Then we have two variables that tell // us where in the parallel world we // are. The first of the following - // variables, ``n_mpi_processes'' tells + // variables, n_mpi_processes tells // us how many MPI processes there exist // in total, while the second one, - // ``this_mpi_process'', indicates which + // this_mpi_process, indicates which // is the number of the present process // within this space of processes. The // latter variable will have a unique // value for each process between zero // and (less than) - // ``n_mpi_processes''. If this program + // n_mpi_processes. If this program // is run on a single machine without MPI - // support, then their values are ``1'' - // and ``0'', respectively. + // support, then their values are 1 + // and 0, respectively. const unsigned int n_mpi_processes; const unsigned int this_mpi_process; }; @@ -335,9 +335,9 @@ void ElasticProblem::setup_system () // system, there is one thing to do for a // parallel program: we need to assign // cells to each of the processes. We do - // this by splitting (``partitioning'') the + // this by splitting (partitioning) the // mesh cells into as many chunks - // (``subdomains'') as there are processes + // (subdomains) as there are processes // in this MPI job (if this is a sequential // job, then there is only one job and all // cells will get a zero as subdomain @@ -393,13 +393,13 @@ void ElasticProblem::setup_system () // to the number of degrees of freedom), // and also how many rows out of this // global size are to be stored locally - // (``n_local_dofs''). In addition, PETSc + // (n_local_dofs). In addition, PETSc // needs to know how to partition the // columns in the chunk of the matrix that // is stored locally; for square matrices, // the columns should be partitioned in the // same way as the rows (indicated by the - // second ``n_local_dofs'' in the call) but + // second n_local_dofs in the call) but // in the case of rectangular matrices one // has to partition the columns in the same // way as vectors are partitioned with @@ -445,7 +445,7 @@ void ElasticProblem::setup_system () // freedom are split in a way such that all // DoFs in the interior of cells and between // cells belonging to the same subdomain - // belong to the process that ``owns'' the + // belong to the process that owns the // cell. However, even then we sometimes need // to assemble on a cell with a neighbor that // belongs to a different process, and in @@ -457,7 +457,7 @@ void ElasticProblem::setup_system () // this by hand, PETSc does all this for us // by caching these elements locally, and // sending them to the other processes as - // necessary when we call the ``compress()'' + // necessary when we call the compress() // functions on the matrix and vector at the // end of this function. // @@ -491,7 +491,7 @@ void ElasticProblem::setup_system () // to do this first part: instead of // copying elements by hand into the // global matrix, we use the - // ``distribute_local_to_global'' + // distribute_local_to_global // functions below to take care of // hanging nodes at the same // time. The second step, elimination @@ -547,12 +547,12 @@ void ElasticProblem::assemble_system () // generality, the subdomain id is used to // split a domain into several parts (we do // this above, at the beginning of - // ``setup_system''), and which allows to + // setup_system), and which allows to // identify which subdomain a cell is // living on. In this application, we have // each process handle exactly one // subdomain, so we identify the terms - // ``subdomain'' and ``MPI process'' with + // subdomain and MPI process with // each other. // // Apart from this, assembling the local @@ -691,7 +691,7 @@ void ElasticProblem::assemble_system () // whether we should also delete the // column corresponding to a boundary // node, or keep it (and passing - // ``true'' as above means: yes, do + // true as above means: yes, do // eliminate the column). If we do, // then the resulting matrix will be // symmetric again if it was before; @@ -1067,7 +1067,7 @@ void ElasticProblem::refine_grid () // Now all processes have computed error // indicators for their own cells and // stored them in the respective elements - // of the ``local_error_per_cell'' + // of the local_error_per_cell // vector. The elements of this vector for // cells not on the present process are // zero. However, since all processes have @@ -1109,7 +1109,7 @@ void ElasticProblem::refine_grid () // process. They will subsequently have to // be copied into another process's memory // space then, an operation that PETSc does - // for us when we call the ``compress'' + // for us when we call the compress // function. This inefficiency could be // avoided with some more code, but we // refrain from it since it is not a major @@ -1158,8 +1158,8 @@ void ElasticProblem::refine_grid () // Lastly, here is the driver function. It is // almost unchanged from step-8, with the - // exception that we replace ``std::cout'' by - // the ``pcout'' stream. Apart from this, the + // exception that we replace std::cout by + // the pcout stream. Apart from this, the // only other cosmetic change is that we // output how many degrees of freedom there // are per process, and how many iterations @@ -1206,10 +1206,10 @@ void ElasticProblem::run () } - // So that's it, almost. ``main()'' works the + // So that's it, almost. main() works the // same way as most of the main functions in // the other example programs, i.e. it - // delegates work to the ``run'' function of + // delegates work to the run function of // a master object, and only wraps everything // into some code to catch exceptions: int main (int argc, char **argv) @@ -1220,17 +1220,17 @@ int main (int argc, char **argv) // PETSc requires that we initialize it // at the beginning of the program, and // un-initialize it at the end. So we - // call ``PetscInitialize'' and - // ``PetscFinalize''. The original code + // call PetscInitialize and + // PetscFinalize. The original code // sits in between, enclosed in braces // to make sure that the - // ``elastic_problem'' variable goes + // elastic_problem variable goes // out of scope (and is destroyed) // before we call - // ``PetscFinalize''. (If we wouldn't + // PetscFinalize. (If we wouldn't // use braces, the destructor of - // ``elastic_problem'' would run after - // ``PetscFinalize''; since the + // elastic_problem would run after + // PetscFinalize; since the // destructor involves calls to PETSc // functions, we would get strange // error messages from PETSc.) diff --git a/deal.II/examples/step-18/step-18.cc b/deal.II/examples/step-18/step-18.cc index 52f81117b8..8302e1d6b7 100644 --- a/deal.II/examples/step-18/step-18.cc +++ b/deal.II/examples/step-18/step-18.cc @@ -77,7 +77,7 @@ namespace QuasiStaticElasticity { - // @sect3{The ``PointHistory'' class} + // @sect3{The PointHistory class} // As was mentioned in the introduction, we // have to store the old stress in @@ -101,8 +101,8 @@ namespace QuasiStaticElasticity // constructors, destructors, or other // member functions. In such cases of // `dumb' classes, we usually opt to - // declare them as ``struct'' rather than - // ``class'', to indicate that they are + // declare them as struct rather than + // class, to indicate that they are // closer to C-style structures than // C++-style classes. template @@ -177,12 +177,12 @@ namespace QuasiStaticElasticity // need as tools. These are small // functions that are called in // inner loops, so we mark them as - // ``inline''. + // inline. // // The first one computes the // symmetric strain tensor for - // shape function ``shape_func'' at - // quadrature point ``q_point'' by + // shape function shape_func at + // quadrature point q_point by // forming the symmetric gradient // of this shape function. We need // that when we want to form the @@ -198,12 +198,12 @@ namespace QuasiStaticElasticity // avoided to compute any terms // that we could prove were zero // anyway. For this, we used the - // ``fe.system_to_component_index'' + // fe.system_to_component_index // function that returns in which // component a shape function was // zero, and also that the - // ``fe_values.shape_value'' and - // ``fe_values.shape_grad'' + // fe_values.shape_value and + // fe_values.shape_grad // functions only returned the // value and gradient of the single // non-zero component of a shape @@ -214,27 +214,27 @@ namespace QuasiStaticElasticity // it isn't terribly time critical, // we can get away with a simpler // technique: just ask the - // ``fe_values'' for the value or + // fe_values for the value or // gradient of a given component of // a given shape function at a // given quadrature point. This is // what the - // ``fe_values.shape_grad_component(shape_func,q_point,i)'' + // fe_values.shape_grad_component(shape_func,q_point,i) // call does: return the full - // gradient of the ``i''th + // gradient of the ith // component of shape function - // ``shape_func'' at quadrature - // point ``q_point''. If a certain + // shape_func at quadrature + // point q_point. If a certain // component of a certain shape // function is always zero, then // this will simply always return // zero. // // As mentioned, using - // ``fe_values.shape_grad_component'' + // fe_values.shape_grad_component // instead of the combination of - // ``fe.system_to_component_index'' - // and ``fe_values.shape_grad'' may + // fe.system_to_component_index + // and fe_values.shape_grad may // be less efficient, but its // implementation is optimized for // such cases and shouldn't be a @@ -255,8 +255,8 @@ namespace QuasiStaticElasticity // First, fill diagonal terms // which are simply the - // derivatives in direction ``i'' - // of the ``i'' component of the + // derivatives in direction i + // of the i component of the // vector-valued shape // function: for (unsigned int i=0; iSymmetricTensor class // makes sure that at least to // the outside the symmetric // entries are also filled (in @@ -297,7 +297,7 @@ namespace QuasiStaticElasticity // of a vector-valued field. If you // already have a solution field, // the - // ``fe_values.get_function_grads'' + // fe_values.get_function_grads // function allows you to extract // the gradients of each component // of your solution field at a @@ -315,7 +315,7 @@ namespace QuasiStaticElasticity // by filling first the diagonal // and then only one half of the // symmetric tensor (the - // ``SymmetricTensor'' class makes + // SymmetricTensor class makes // sure that it is sufficient to // write only one of the two // symmetric components). @@ -323,11 +323,11 @@ namespace QuasiStaticElasticity // Before we do this, though, we // make sure that the input has the // kind of structure we expect: - // that is that there are ``dim'' + // that is that there are dim // vector components, i.e. one // displacement component for each // coordinate direction. We test - // this with the ``Assert'' macro + // this with the Assert macro // that will simply abort our // program if the condition is not // met. @@ -430,7 +430,7 @@ namespace QuasiStaticElasticity // // The reason why we stress that is that // in this case we have that - // ``tan_angle==0''. Further down, we + // tan_angle==0. Further down, we // need to divide by that number in the // computation of the axis of rotation, // and we would get into trouble when @@ -477,7 +477,7 @@ namespace QuasiStaticElasticity - // @sect3{The ``TopLevel'' class} + // @sect3{The TopLevel class} // This is the main class of the // program. Since the namespace already @@ -498,7 +498,7 @@ namespace QuasiStaticElasticity // The external interface of the class, // however, is unchanged: it has a public // constructor and desctructor, and it has - // a ``run'' function that initiated all + // a run function that initiated all // the work. template class TopLevel @@ -633,9 +633,9 @@ namespace QuasiStaticElasticity std::vector > quadrature_point_history; // The way this object is accessed is - // through a ``user pointer'' that each + // through a user pointer that each // cell, face, or edge holds: it is a - // ``void*'' pointer that can be used + // void* pointer that can be used // by application programs to associate // arbitrary data to cells, faces, or // edges. What the program actually @@ -690,10 +690,10 @@ namespace QuasiStaticElasticity // for all, and instead get rid of the // distributed copy immediately. Thus, // note that the declaration of - // ``inremental_displacement'' does not + // inremental_displacement does not // denote a distribute vector as would // be indicated by the middle namespace - // ``MPI'': + // MPI: PETScWrappers::MPI::SparseMatrix system_matrix; PETScWrappers::MPI::Vector system_rhs; @@ -742,7 +742,7 @@ namespace QuasiStaticElasticity // Next, how many degrees of freedom // the present processor stores. This // is, of course, an abbreviation to - // ``local_dofs_per_process[this_mpi_process]''. + // local_dofs_per_process[this_mpi_process]. unsigned int n_local_dofs; // In the same direction, also @@ -753,7 +753,7 @@ namespace QuasiStaticElasticity // necessarily contiguously // numbered (when iterating // over them using - // ``active_cell_iterator''). + // active_cell_iterator). unsigned int n_local_cells; // Finally, we have a @@ -773,7 +773,7 @@ namespace QuasiStaticElasticity }; - // @sect3{The ``BodyForce'' class} + // @sect3{The BodyForce class} // Before we go on to the main // functionality of this program, we have @@ -796,11 +796,11 @@ namespace QuasiStaticElasticity // not electrically conducting or that // there are no significant electromagnetic // fields around. In that case, the body - // forces are simply ``rho g'', where - // ``rho'' is the material density and - // ``g'' is a vector in negative + // forces are simply rho g, where + // rho is the material density and + // g is a vector in negative // z-direction with magnitude 9.81 m/s^2. - // Both the density and ``g'' are defined + // Both the density and g are defined // in the function, and we take as the // density 7700 kg/m^3, a value commonly // assumed for steel. @@ -808,10 +808,10 @@ namespace QuasiStaticElasticity // To be a little more general and to be // able to do computations in 2d as well, // we realize that the body force is always - // a function returning a ``dim'' + // a function returning a dim // dimensional vector. We assume that // gravity acts along the negative - // direction of the last, i.e. ``dim-1''th + // direction of the last, i.e. dim-1th // coordinate. The rest of the // implementation of this function should // be mostly self-explanatory given similar @@ -821,7 +821,7 @@ namespace QuasiStaticElasticity // compiler warnings about unused function // arguments, we therefore comment out the // name of the first argument of the - // ``vector_value'' function: + // vector_value function: template class BodyForce : public Function { @@ -882,7 +882,7 @@ namespace QuasiStaticElasticity - // @sect3{The ``IncrementalBoundaryValue'' class} + // @sect3{The IncrementalBoundaryValue class} // In addition to body forces, movement can // be induced by boundary forces and forced @@ -923,7 +923,7 @@ namespace QuasiStaticElasticity // rest of the boundary is either // going to be fixed (and is then // described using an object of - // type ``ZeroFunction'') or free + // type ZeroFunction) or free // (Neumann-type, in which case // nothing special has to be done). // The implementation of the @@ -1001,7 +1001,7 @@ namespace QuasiStaticElasticity - // @sect3{Implementation of the ``TopLevel'' class} + // @sect3{Implementation of the TopLevel class} // Now for the implementation of the main // class. First, we initialize the @@ -1024,7 +1024,7 @@ namespace QuasiStaticElasticity // constructors and descructors. There are // no surprises here: we choose linear and // continuous finite elements for each of - // the ``dim'' vector components of the + // the dim vector components of the // solution, and a Gaussian quadrature // formula with 2 points in each coordinate // direction. The destructor should be @@ -1053,7 +1053,7 @@ namespace QuasiStaticElasticity // The last of the public functions is the // one that directs all the work, - // ``run()''. It initializes the variables + // run(). It initializes the variables // that describe where in time we presently // are, then runs the first time step, then // loops over all the other time @@ -1266,8 +1266,8 @@ namespace QuasiStaticElasticity // this happens is mostly a matter // of taste; here, we chose to do // it when grids are created since - // in the ``do_initial_timestep'' - // and ``do_timestep'' functions we + // in the do_initial_timestep + // and do_timestep functions we // want to output the number of // cells on each processor at a // point where we haven't called @@ -1344,17 +1344,17 @@ namespace QuasiStaticElasticity DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); hanging_node_constraints.condense (sparsity_pattern); // Note that we have used the - // ``CompressedSparsityPattern'' class + // CompressedSparsityPattern class // here that was already introduced in // step-11, rather than the - // ``SparsityPattern'' class that we have + // SparsityPattern class that we have // used in all other cases. The reason // for this is that for the latter class // to work we have to give an initial // upper bound for the number of entries // in each row, a task that is // traditionally done by - // ``DoFHandler::max_couplings_between_dofs()''. However, + // DoFHandler::max_couplings_between_dofs(). However, // this function suffers from a serious // problem: it has to compute an upper // bound to the number of nonzero entries @@ -1363,11 +1363,11 @@ namespace QuasiStaticElasticity // 3d. In effect, while it is quite // accurate in 2d, it often comes up with // much too large a number in 3d, and in - // that case the ``SparsityPattern'' + // that case the SparsityPattern // allocates much too much memory at // first, often several 100 MBs. This is // later corrected when - // ``DoFTools::make_sparsity_pattern'' is + // DoFTools::make_sparsity_pattern is // called and we realize that we don't // need all that much memory, but at time // it is already too late: for large @@ -1376,7 +1376,7 @@ namespace QuasiStaticElasticity // out-of-memory situations. // // In order to avoid this, we resort to - // the ``CompressedSparsityPattern'' + // the CompressedSparsityPattern // class that is slower but does not // require any up-front estimate on the // number of nonzero entries per row. It @@ -1423,7 +1423,7 @@ namespace QuasiStaticElasticity // After this point, no further explicit // knowledge of the sparsity pattern is // required any more and we can let the - // ``sparsity_pattern'' variable go out + // sparsity_pattern variable go out // of scope without any problem. // The last task in this function @@ -1458,7 +1458,7 @@ namespace QuasiStaticElasticity // stresses. In addition, // assembling the matrix is made // significantly more transparent - // by using the ``SymmetricTensor'' + // by using the SymmetricTensor // class: note the elegance of // forming the scalar products of // symmetric tensors of rank 2 and @@ -1514,10 +1514,10 @@ namespace QuasiStaticElasticity // symmetric gradients (strains) of // the shape functions at a given // quadrature point from the - // ``FEValues'' object, and the + // FEValues object, and the // elegance with which we form the - // triple contraction ``eps_phi_i : - // C : eps_phi_j''; the latter + // triple contraction eps_phi_i : + // C : eps_phi_j; the latter // needs to be compared to the // clumsy computations needed in // step-17, both in the @@ -1608,7 +1608,7 @@ namespace QuasiStaticElasticity // already did in previous // programs. A slight // complication is that the - // ``apply_boundary_values'' + // apply_boundary_values // function wants to have a // solution vector compatible // with the matrix and right hand @@ -1674,10 +1674,10 @@ namespace QuasiStaticElasticity // direction. For the boundary // with indicator 1 (top // surface), we use the - // ``IncrementalBoundaryValues'' + // IncrementalBoundaryValues // class, but we specify an // additional argument to the - // ``VectorTools::interpolate_boundary_values'' + // VectorTools::interpolate_boundary_values // function denoting which vector // components it should apply to; // this is a vector of bools for @@ -1754,9 +1754,9 @@ namespace QuasiStaticElasticity // vector and initialize it with // the contents of the local // variable (remember that the - // ``apply_boundary_values'' + // apply_boundary_values // function called in - // ``assemble_system'' preset the + // assemble_system preset the // values of boundary nodes in this // vector), solve with it, and at // the end of the function copy it @@ -1810,25 +1810,25 @@ namespace QuasiStaticElasticity // the introduction. // // The crucial part of this function is to - // give the ``DataOut'' class a way to only + // give the DataOut class a way to only // work on the cells that the present // process owns. This class is already // well-equipped for that: it has two - // virtual functions ``first_cell'' and - // ``next_cell'' that return the first cell + // virtual functions first_cell and + // next_cell that return the first cell // to be worked on, and given one cell // return the next cell to be worked on. By // default, these functions return the // first active cell (i.e. the first one // that has no children) and the next // active cell. What we have to do here is - // derive a class from ``DataOut'' that + // derive a class from DataOut that // overloads these two functions to only // iterate over those cells with the right // subdomain indicator. // // We do this at the beginning of this - // function. The ``first_cell'' function + // function. The first_cell function // just starts with the first active cell, // and then iterates to the next cells // while the cell presently under @@ -1838,7 +1838,7 @@ namespace QuasiStaticElasticity // we don't try to keep iterating when we // have hit the end iterator. // - // The ``next_cell'' function could be + // The next_cell function could be // implemented in a similar way. However, // we use this occasion as a pretext to // introduce one more thing that the @@ -1965,7 +1965,7 @@ namespace QuasiStaticElasticity // other words, it seems as if we can't // compute the average stresses for all // cells. However, remember that our - // class derived from ``DataOut'' only + // class derived from DataOut only // iterates over those cells that // actually do belong to the present // processor, i.e. we don't have to @@ -2053,7 +2053,7 @@ namespace QuasiStaticElasticity // determine the name of the file // we will want to write it // to. We compose it of the - // prefix ``solution-'', followed + // prefix solution-, followed // by a representation of the // present time written as a // fixed point number so that @@ -2082,8 +2082,8 @@ namespace QuasiStaticElasticity // would overflow if there were // 1000 processes or more. Note // that we choose to use - // ``AssertThrow'' rather than - // ``Assert'' since the number of + // AssertThrow rather than + // Assert since the number of // processes is a variable that // depends on input files or the // way the process is started, @@ -2091,7 +2091,7 @@ namespace QuasiStaticElasticity // in the program // code. Therefore, it is // inappropriate to use - // ``Assert'' that is optimized + // Assert that is optimized // away in optimized mode, // whereas here we actually can // assume that users will run the @@ -2152,7 +2152,7 @@ namespace QuasiStaticElasticity // output to the console to update the // person watching the screen on what is // going on. As in step-17, the use of - // ``pcout'' instead of ``std::cout'' makes + // pcout instead of std::cout makes // sure that only one of the parallel // processes is actually writing to the // console, without having to explicitly @@ -2323,9 +2323,9 @@ namespace QuasiStaticElasticity // function. First, how we get the // displacement field at a given vertex // using the - // ``cell-@>vertex_dof_index(v,d)'' function - // that returns the index of the ``d''th - // degree of freedom at vertex ``v'' of the + // cell-@>vertex_dof_index(v,d) function + // that returns the index of the dth + // degree of freedom at vertex v of the // given cell. In the present case, // displacement in the k-th coordinate // direction corresonds to the kth @@ -2333,7 +2333,7 @@ namespace QuasiStaticElasticity // function like this bears a certain risk, // because it uses knowledge of the order // of elements that we have taken together - // for this program in the ``FESystem'' + // for this program in the FESystem // element. If we decided to add an // additional variable, for example a // pressure variable for stabilization, and @@ -2347,7 +2347,7 @@ namespace QuasiStaticElasticity // associated with vertices. This is indeed // the case for the present Q1 element, as // would be for all Qp elements of - // polynomial order ``p''. However, it + // polynomial order p. However, it // would not hold for discontinuous // elements, or elements for mixed // formulations. Secondly, it also rests on @@ -2371,23 +2371,23 @@ namespace QuasiStaticElasticity // be. For general finite elements, the way // to go would be to take a quadrature // formula with the quadrature points in - // the vertices of a cell. The ``QTrapez'' + // the vertices of a cell. The QTrapez // formula for the trapezoidal rule does // exactly this. With this quadrature // formula, we would then initialize an - // ``FEValues'' object in each cell, and + // FEValues object in each cell, and // use the - // ``FEValues::get_function_values'' + // FEValues::get_function_values // function to obtain the values of the // solution function in the quadrature // points, i.e. the vertices of the // cell. These are the only values that we // really need, i.e. we are not at all // interested in the weights (or the - // ``JxW'' values) associated with this + // JxW values) associated with this // particular quadrature formula, and this // can be specified as the last argument in - // the constructor to ``FEValues''. The + // the constructor to FEValues. The // only point of minor inconvenience in // this scheme is that we have to figure // out which quadrature point corresponds @@ -2399,7 +2399,7 @@ namespace QuasiStaticElasticity // this short function is the way in which // the triangulation class exports // information about its vertices: through - // the ``Triangulation::n_vertices'' + // the Triangulation::n_vertices // function, it advertises how many // vertices there are in the // triangulation. Not all of them are @@ -2410,9 +2410,9 @@ namespace QuasiStaticElasticity // the number of a vertex once it has come // into existence, even if vertices with // lower number go away. Secondly, the - // location returned by ``cell-@>vertex(v)'' + // location returned by cell-@>vertex(v) // is not only a read-only object of type - // ``Point@'', but in fact a reference + // Point@, but in fact a reference // that can be written to. This allows to // move around the nodes of a mesh with // relative ease, but it is worth pointing @@ -2460,7 +2460,7 @@ namespace QuasiStaticElasticity // history variables, such as the existing // stresses in the material, that we store // in each quadrature point. As mentioned - // above, we use the ``user_pointer'' for + // above, we use the user_pointer for // this that is available in each cell. // // To put this into larger perspective, we @@ -2513,7 +2513,7 @@ namespace QuasiStaticElasticity // Next, allocate as many quadrature // objects as we need. Since the - // ``resize'' function does not actually + // resize function does not actually // shrink the amount of allocated memory // if the requested new size is smaller // than the old size, we resort to a @@ -2523,7 +2523,7 @@ namespace QuasiStaticElasticity // and then swap the contents of the old // vector and this temporary // variable. This makes sure that the - // ``quadrature_point_history'' is now + // quadrature_point_history is now // really empty, and we can let the // temporary variable that now holds the // previous contents of the vector go out @@ -2531,7 +2531,7 @@ namespace QuasiStaticElasticity // step. we can then re-allocate as many // elements as we need, with the vector // default-initializing the - // ``PointHistory'' objects, which + // PointHistory objects, which // includes setting the stress variables // to zero. { @@ -2569,7 +2569,7 @@ namespace QuasiStaticElasticity // this function that forget to update // all uses of a variable at the same // time. Recall that constructs using the - // ``Assert'' macro are optimized away in + // Assert macro are optimized away in // optimized mode, so do not affect the // run time of optimized runs: Assert (history_index == quadrature_point_history.size(), @@ -2674,7 +2674,7 @@ namespace QuasiStaticElasticity template void TopLevel::update_quadrature_point_history () { - // First, set up an ``FEValues'' object + // First, set up an FEValues object // by which we will evaluate the // incremental displacements and the // gradients thereof at the quadrature @@ -2709,7 +2709,7 @@ namespace QuasiStaticElasticity &quadrature_point_history.back(), ExcInternalError()); - // Then initialize the ``FEValues'' + // Then initialize the FEValues // object on the present cell, and // extract the gradients of the // displacement at the quadrature @@ -2762,7 +2762,7 @@ namespace QuasiStaticElasticity // tensor by contraction from // the left and right, after we // expand the symmetric tensor - // ``new_stress'' into a full + // new_stress into a full // tensor: const SymmetricTensor<2,dim> rotated_new_stress = symmetrize(transpose(rotation) * @@ -2783,7 +2783,7 @@ namespace QuasiStaticElasticity // result. When // assigning the result // to a - // ``SymmetricTensor'', + // SymmetricTensor, // the constuctor of // that class checks // the symmetry and @@ -2809,9 +2809,9 @@ namespace QuasiStaticElasticity // This ends the project specific // namespace - // ``QuasiStaticElasticity''. The + // QuasiStaticElasticity. The // rest is as usual and as already - // shown in step-17: A ``main()'' + // shown in step-17: A main() // function that initializes and // terminates PETSc, calls the // classes that do the actual work, diff --git a/deal.II/examples/step-19/step-19.cc b/deal.II/examples/step-19/step-19.cc index 6382e2685e..9b5d30a731 100644 --- a/deal.II/examples/step-19/step-19.cc +++ b/deal.II/examples/step-19/step-19.cc @@ -66,7 +66,7 @@ std::string output_format; // prints a general message, and then goes on // to list the parameters that are allowed in // the parameter file (the - // ``ParameterHandler'' class has a function + // ParameterHandler class has a function // to do exactly this; see the results // section for what it prints): void @@ -105,7 +105,7 @@ print_usage_message () // format, we nevertheless want to show how // to work with parameter files. // - // In short, the ``ParameterHandler'' class + // In short, the ParameterHandler class // works as follows: one declares the entries // of parameters that can be given in input // files together, and later on one can read @@ -115,11 +115,11 @@ print_usage_message () // value specified in the declaration of that // parameter is used. After that, the program // can query the values assigned to certain - // parameters from the ``ParameterHandler'' + // parameters from the ParameterHandler // object. // // Declaring parameters can be done using the - // ``ParameterHandler::declare_entry'' + // ParameterHandler::declare_entry // function. It's arguments are the name of a // parameter, a default value (given as a // string, even if the parameter is numeric @@ -127,26 +127,26 @@ print_usage_message () // describes constraints on values that may // be passed to this parameter. In the // example below, we use an object of type - // ``Patterns::Anything'' to denote that + // Patterns::Anything to denote that // there are no constraints on file names // (this is, of course, not true -- the // operating system does have constraints, // but from an application standpoint, almost // all names are valid). In other cases, one // may, for example, use - // ``Patterns::Integer'' to make sure that + // Patterns::Integer to make sure that // only parameters are accepted that can be // interpreted as integer values (it is also // possible to specify bounds for integer // values, and all values outside this range - // are rejected), ``Patterns::Double'' for + // are rejected), Patterns::Double for // floating point values, classes that make // sure that the given parameter value is a // comma separated list of things, etc. Take - // a look at the ``Patterns'' namespace to + // a look at the Patterns namespace to // see what is possible. // - // The fourth argument to ``declare_entry'' + // The fourth argument to declare_entry // is a help string that can be printed to // document what this parameter is meant to // be used for and other information you may @@ -155,7 +155,7 @@ print_usage_message () // fourth argument is the empty string. // // I always wanted to have an example program - // describing the ``ParameterHandler'' class, + // describing the ParameterHandler class, // because it is so particularly useful. It // would have been useful in a number of // previous example programs (for example, in @@ -174,13 +174,13 @@ print_usage_message () // out: declaring and querying these // parameters was already done centralized in // one place of the libray, namely the - // ``DataOutInterface'' class that handles + // DataOutInterface class that handles // exactly this -- managing parameters for // input and output. // // So the second function call in this // function is to let the - // ``DataOutInterface'' declare a good number + // DataOutInterface declare a good number // of parameters that control everything from // the output format to what kind of output // should be generated if output is written @@ -194,14 +194,14 @@ print_usage_message () // options up front, when output is // generated, rather than playing around with // them later on. The call to - // ``DataOutInterface::declare_parameters'' + // DataOutInterface::declare_parameters // declares entries that allow to specify // them in the parameter input file during // run-time. If the parameter file does not // contain entries for them, defaults are // taken. // - // As a final note: ``DataOutInterface'' is a + // As a final note: DataOutInterface is a // template, because it is usually used to // write output for a specific space // dimension. However, this program is @@ -212,7 +212,7 @@ print_usage_message () // parameter. Fortunately, declaring // parameters is something that is space // dimension independent, so we can just pick - // one arbitrarily. We pick ``1'', but it + // one arbitrarily. We pick 1, but it // could have been any other number as well. void declare_parameters () { @@ -301,7 +301,7 @@ void declare_parameters () // the list of input files can't, so at least // one parameter needs to be there. Together // with the name of the program (the zeroth - // parameter), ``argc'' must therefore be at + // parameter), argc must therefore be at // least 2. If this is not the case, we print // an error message and exit: void @@ -316,7 +316,7 @@ parse_command_line (const int argc, // Next, collect all parameters in a list // that will be somewhat simpler to handle - // than the ``argc''/``argv'' mechanism. We + // than the argc/argv mechanism. We // omit the name of the executable at the // zeroth index: std::list args; @@ -325,12 +325,12 @@ parse_command_line (const int argc, // Then process all these // parameters. If the parameter is - // ``-p'', then there must be a + // -p, then there must be a // parameter file following (which // we should then read), in case of - // ``-x'' it is the name of an + // -x it is the name of an // output format. Finally, for - // ``-o'' it is the name of the + // -o it is the name of the // output file. In all cases, once // we've treated a parameter, we // remove it from the list of @@ -378,10 +378,10 @@ parse_command_line (const int argc, // the dummy subsection, we would // write something like this to // extract the value of the boolean - // flag (the ``prm.get'' function + // flag (the prm.get function // returns the value of a parameter // as a string, whereas the - // ``prm.get_X'' functions return a + // prm.get_X functions return a // value already converted to a // different type): prm.enter_subsection ("Dummy subsection"); @@ -467,11 +467,11 @@ parse_command_line (const int argc, // from all the input file, and read in the // first file through a stream. Note that // every time we open a file, we use the - // ``AssertThrow'' macro to check whether the + // AssertThrow macro to check whether the // file is really readable -- if it isn't // then this will trigger an exception and // corresponding output will be generated - // from the exception handler in ``main()'': + // from the exception handler in main(): template void do_convert () { @@ -502,7 +502,7 @@ void do_convert () // stream, and parse what we got as the // name of the output format into an // identifier. Fortunately, the - // ``DataOutBase'' class has a function + // DataOutBase class has a function // that does this parsing for us, i.e. it // knows about all the presently supported // output formats and makes sure that they @@ -548,16 +548,16 @@ void do_convert () // // So here is what we do: from the first // input file, we determine (using a function - // in ``DataOutBase'' that exists for this + // in DataOutBase that exists for this // purpose) these dimensions. We then have a // series of switches that dispatch, - // statically, to the ``do_convert'' + // statically, to the do_convert // functions with different template // arguments. Not pretty, but works. Apart // from this, the function does nothing -- // except making sure that it covered the // dimensions for which it was called, using - // the ``AssertThrow'' macro at places in the + // the AssertThrow macro at places in the // code that shouldn't be reached: void convert () { diff --git a/deal.II/examples/step-2/step-2.cc b/deal.II/examples/step-2/step-2.cc index 0eb5c4a5e3..3e54ec731a 100644 --- a/deal.II/examples/step-2/step-2.cc +++ b/deal.II/examples/step-2/step-2.cc @@ -141,7 +141,7 @@ void make_grid (Triangulation<2> &triangulation) // field on the triangulation. // // This function shows how to do this. The - // object to consider is the ``DoFHandler'' + // object to consider is the DoFHandler // class template. Before we do so, however, // we first need something that describes how // many degrees of freedom are to be @@ -150,7 +150,7 @@ void make_grid (Triangulation<2> &triangulation) // finite element space, the finite element // base class stores this information. In the // present context, we therefore create an - // object of the derived class ``FE_Q'' that + // object of the derived class FE_Q that // describes Lagrange elements. Its // constructor takes one argument that states // the polynomial degree of the element, @@ -162,7 +162,7 @@ void make_grid (Triangulation<2> &triangulation) // given to the constructor would instead // give us a bi-cubic element with one degree // of freedom per vertex, two per line, and - // four inside the cell. In general, ``FE_Q'' + // four inside the cell. In general, FE_Q // denotes the family of continuous elements // with complete polynomials // (i.e. tensor-product polynomials) up to @@ -170,14 +170,14 @@ void make_grid (Triangulation<2> &triangulation) // // We first need to create an object of this // class and then pass it on to the - // ``DoFHandler'' object to allocate storage + // DoFHandler object to allocate storage // for the degrees of freedom (in deal.II - // lingo: we ``distribute degrees of - // freedom''). Note that the DoFHandler + // lingo: we distribute degrees of + // freedom). Note that the DoFHandler // object will store a reference to this // finite element object, so we need have to // make sure its lifetime is at least as long - // as that of the ``DoFHandler''; one way to + // as that of the DoFHandler; one way to // make sure this is so is to make it static // as well, in order to prevent its // preemptive destruction. (However, the @@ -273,18 +273,18 @@ void distribute_dofs (DoFHandler<2> &dof_handler) sparsity_pattern.compress (); // What actually happens in this call is // the following: upon creation of a - // ``SparsityPattern'' object, memory is + // SparsityPattern object, memory is // allocated for a maximum number of // entries per row (20 in our case). The // call to - // ``DoFTools::make_sparsity_pattern'' then + // DoFTools::make_sparsity_pattern then // actually allocates entries as necessary, // but the number of nonzero entries in any // given row may be less than the 20 we // have allocated memory for. To save // memory and to simplify some other // operations, one then needs to - // ``compress'' the sparsity pattern before + // compress the sparsity pattern before // anything else. // With this, we can now write the results @@ -301,7 +301,7 @@ void distribute_dofs (DoFHandler<2> &dof_handler) // the sparsity pattern is symmetric. This // should not come as a surprise, since we // have not given the - // ``DoFTools::make_sparsity_pattern'' any + // DoFTools::make_sparsity_pattern any // information that would indicate that our // bilinear form may couple shape functions // in a non-symmetric way. You will also @@ -363,7 +363,7 @@ void distribute_dofs (DoFHandler<2> &dof_handler) // sparsity pattern is more localized around // the diagonal. The only interesting part of // the function is the first call to - // ``DoFRenumbering::Cuthill_McKee'', the + // DoFRenumbering::Cuthill_McKee, the // rest is essentially as before: void renumber_dofs (DoFHandler<2> &dof_handler) { @@ -390,7 +390,7 @@ void renumber_dofs (DoFHandler<2> &dof_handler) // 100,000s). // It is worth noting that the - // ``DoFRenumbering'' class offers a number + // DoFRenumbering class offers a number // of other algorithms as well to renumber // degrees of freedom. For example, it would // of course be ideal if all couplings were @@ -404,7 +404,7 @@ void renumber_dofs (DoFHandler<2> &dof_handler) // possible by enumerating degrees of freedom // from the inflow boundary along streamlines // to the outflow boundary. Not surprisingly, - // ``DoFRenumbering'' also has algorithms for + // DoFRenumbering also has algorithms for // this. @@ -413,7 +413,7 @@ void renumber_dofs (DoFHandler<2> &dof_handler) // Finally, this is the main program. The // only thing it does is to allocate and // create the triangulation, then create a - // ``DoFHandler'' object and associate it to + // DoFHandler object and associate it to // the triangulation, and finally call above // two functions on it: int main () diff --git a/deal.II/examples/step-20/step-20.cc b/deal.II/examples/step-20/step-20.cc index d9f151f570..b6f0137641 100644 --- a/deal.II/examples/step-20/step-20.cc +++ b/deal.II/examples/step-20/step-20.cc @@ -59,12 +59,12 @@ // spatial dependence, we consider it // a tensor-valued function. The // following include file provides - // the ``TensorFunction'' class that + // the TensorFunction class that // offers such functionality: #include - // @sect3{The ``MixedLaplaceProblem'' class template} + // @sect3{The MixedLaplaceProblem class template} // Again, since this is an adaptation // of step-6, the main class is @@ -77,7 +77,7 @@ // argument (and that there is a // corresponding member variable to // store this value) and the addition - // of the ``compute_error'' function + // of the compute_error function // in which, no surprise, we will // compute the difference between the // exact and the numerical solution @@ -137,10 +137,10 @@ class MixedLaplaceProblem // exact solution for later // computations of the error. Note // that these functions have one, - // one, and ``dim+1'' components, + // one, and dim+1 components, // respectively, and that we pass the // number of components down to the - // ``Function@'' base class. For + // Function@ base class. For // the exact solution, we only // declare the function that actually // returns the entire solution vector @@ -234,7 +234,7 @@ ExactSolution::vector_value (const Point &p, // because this is all that appears // in the weak form -- the inverse of // the permeability tensor, - // ``KInverse''. For the purpose of + // KInverse. For the purpose of // verifying the exactness of the // solution and determining // convergence orders, this tensor is @@ -253,22 +253,22 @@ ExactSolution::vector_value (const Point &p, // Possibly unsurprising, deal.II // also has a base class not only for // scalar and generally vector-valued - // functions (the ``Function'' base + // functions (the Function base // class) but also for functions that // return tensors of fixed dimension - // and rank, the ``TensorFunction'' + // and rank, the TensorFunction // template. Here, the function under // consideration returns a dim-by-dim // matrix, i.e. a tensor of rank 2 - // and dimension ``dim''. We then + // and dimension dim. We then // choose the template arguments of // the base class appropriately. // // The interface that the - // ``TensorFunction'' class provides + // TensorFunction class provides // is essentially equivalent to the - // ``Function'' class. In particular, - // there exists a ``value_list'' + // Function class. In particular, + // there exists a value_list // function that takes a list of // points at which to evaluate the // function, and returns the values @@ -384,8 +384,8 @@ double extract_p (const FEValuesBase &fe_values, // // The only thing worth describing // here is the constructor call of - // the ``fe'' variable. The - // ``FESystem'' class to which this + // the fe variable. The + // FESystem class to which this // variable belongs has a number of // different constructors that all // refer to binding simpler elements @@ -394,20 +394,20 @@ double extract_p (const FEValuesBase &fe_values, // want to couple a single RT(degree) // element with a single DQ(degree) // element. The constructor to - // ``FESystem'' that does this + // FESystem that does this // requires us to specity first the // first base element (the - // ``FE_RaviartThomas'' object of + // FE_RaviartThomas object of // given degree) and then the number // of copies for this base element, // and then similarly the kind and - // number of ``FE_DGQ'' + // number of FE_DGQ // elements. Note that the Raviart - // Thomas element already has ``dim'' + // Thomas element already has dim // vector components, so that the // coupled element will have - // ``dim+1'' vector components, the - // first ``dim'' of which correspond + // dim+1 vector components, the + // first dim of which correspond // to the velocity variable whereas the // last one corresponds to the // pressure. @@ -416,10 +416,10 @@ double extract_p (const FEValuesBase &fe_values, // we constructed this element from // its base elements, with the way we // have done so in step-8: there, we - // have built it as ``fe - // (FE_Q@(1), dim)'', i.e. we - // have simply used ``dim'' copies of - // the ``FE_Q(1)'' element, one copy + // have built it as fe + // (FE_Q@(1), dim), i.e. we + // have simply used dim copies of + // the FE_Q(1) element, one copy // for the displacement in each // coordinate direction. template @@ -475,11 +475,11 @@ void MixedLaplaceProblem::make_grid_and_dofs () // blocks, so that we can allocate // an appropriate amount of // space. To this end, we call the - // ``DoFTools::count_dofs_per_component'' + // DoFTools::count_dofs_per_component // function that counts how many // shape functions are non-zero for // a particular vector - // component. We have ``dim+1'' + // component. We have dim+1 // vector components, and we have // to use the knowledge that for // Raviart-Thomas elements all @@ -527,8 +527,8 @@ void MixedLaplaceProblem::make_grid_and_dofs () // step, we allocate a 2x2 block // pattern and then reinitialize // each of the blocks to its - // correct size using the ``n_u'' - // and ``n_p'' variables defined + // correct size using the n_u + // and n_p variables defined // above that hold the number of // velocity and pressure // variables. In this second step, @@ -540,7 +540,7 @@ void MixedLaplaceProblem::make_grid_and_dofs () // its knowledge about the sizes of // the blocks it manages; this // happens with the - // ``sparsity_pattern.collect_sizes()'' + // sparsity_pattern.collect_sizes() // call: const unsigned int n_couplings = dof_handler.max_couplings_between_dofs(); @@ -589,7 +589,7 @@ void MixedLaplaceProblem::make_grid_and_dofs () // are all the usual steps, with the // addition that we do not only // allocate quadrature and - // ``FEValues'' objects for the cell + // FEValues objects for the cell // terms, but also for face // terms. After that, we define the // usual abbreviations for variables, @@ -753,13 +753,13 @@ void MixedLaplaceProblem::assemble_system () // rather only comment on // implementational aspects. - // @sect4{The ``InverseMatrix'' class template} + // @sect4{The InverseMatrix class template} // The first component of our linear // solver scheme was the creation of // a class that acts like the inverse // of a matrix, i.e. which has a - // ``vmult'' function that multiplies + // vmult function that multiplies // a vector with an inverse matrix by // solving a linear system. // @@ -768,19 +768,19 @@ void MixedLaplaceProblem::assemble_system () // purpose of this class, two // comments are in order. First, the // class is derived from the - // ``Subscriptor'' class so that we - // can use the ``SmartPointer'' class + // Subscriptor class so that we + // can use the SmartPointer class // with inverse matrix objects. The - // use of the ``Subscriptor'' class + // use of the Subscriptor class // has been explained before in // step-7 and step-20. The present // class also sits on the receiving // end of this - // ``Subscriptor''/``SmartPointer'' + // Subscriptor/SmartPointer // pair: it holds its pointer to the // matrix it is supposed to be the // inverse of through a - // ``SmartPointer'' to make sure that + // SmartPointer to make sure that // this matrix is not destroyed while // we still have a pointer to it. // @@ -795,7 +795,7 @@ void MixedLaplaceProblem::assemble_system () // vectors that it will release again // at the end of its operation. What // this means is that through - // repeated calls to the ``vmult'' + // repeated calls to the vmult // function of this class we have to // allocate and release vectors over // and over again. @@ -806,20 +806,20 @@ void MixedLaplaceProblem::assemble_system () // only once? In fact, deal.II offers // a way to do exactly this. What all // the linear solvers do is not to - // allocate memory using ``new'' and - // ``delete'', but rather to allocate + // allocate memory using new and + // delete, but rather to allocate // them from an object derived from - // the ``VectorMemory'' class (see + // the VectorMemory class (see // the module on Vector memory // management in the API reference // manual). By default, the linear // solvers use a derived class - // ``PrimitiveVectorMemory'' that, + // PrimitiveVectorMemory that, // ever time a vector is requested, - // allocates one using ``new'', and - // calls ``delete'' on it again once + // allocates one using new, and + // calls delete on it again once // the solver returns it to the - // ``PrimitiveVectorMemory'' + // PrimitiveVectorMemory // object. This is the appropriate // thing to do if we do not // anticipate that the vectors may be @@ -834,7 +834,7 @@ void MixedLaplaceProblem::assemble_system () // vector memory object holds on to // them for later requests by linear // solvers. The - // ``GrowingVectorMemory'' class does + // GrowingVectorMemory class does // exactly this: when asked by a // linear solver for a vector, it // first looks whether it has unused @@ -843,19 +843,19 @@ void MixedLaplaceProblem::assemble_system () // simply grows its pool. Vectors are // only returned to the C++ runtime // memory system once the - // ``GrowingVectorMemory'' object is + // GrowingVectorMemory object is // destroyed itself. // // What we therefore need to do is // have the present matrix have an // object of type - // ``GrowingVectorMemory'' as a + // GrowingVectorMemory as a // member variable and use it // whenever we create a linear solver // object. There is a slight // complication here: Since the - // ``vmult'' function is marked as - // ``const'' (it doesn't change the + // vmult function is marked as + // const (it doesn't change the // state of the object, after all, // and simply operates on its // arguments), it can only pass an @@ -869,10 +869,10 @@ void MixedLaplaceProblem::assemble_system () // such attempt as an error, if we // didn't make use of a rarely used // feature of C++: we mark the - // variable as ``mutable''. What this + // variable as mutable. What this // does is to allow us to change a // member variable even from a - // ``const'' member function. + // const member function. template class InverseMatrix : public Subscriptor { @@ -922,7 +922,7 @@ void InverseMatrix::vmult (Vector &dst, } - // @sect4{The ``SchurComplement'' class template} + // @sect4{The SchurComplement class template} // The next class is the Schur // complement class. Its rationale @@ -930,26 +930,26 @@ void InverseMatrix::vmult (Vector &dst, // in the introduction. The only // things we would like to note is // that the class, too, is derived - // from the ``Subscriptor'' class and + // from the Subscriptor class and // that as mentioned above it stores // pointers to the entire block // matrix and the inverse of the mass // matrix block using - // ``SmartPointer'' objects. + // SmartPointer objects. // - // The ``vmult'' function requires + // The vmult function requires // two temporary vectors that we do // not want to re-allocate and free // every time we call this // function. Since here, we have full // control over the use of these // vectors (unlike above, where a - // class called by the ``vmult'' + // class called by the vmult // function required these vectors, - // not the ``vmult'' function + // not the vmult function // itself), we allocate them // directly, rather than going - // through the ``VectorMemory'' + // through the VectorMemory // mechanism. However, again, these // member variables do not carry any // state between successive calls to @@ -957,14 +957,14 @@ void InverseMatrix::vmult (Vector &dst, // (i.e., we never care what values // they were set to the last time a // member function was called), we - // mark these vectors as ``mutable''. + // mark these vectors as mutable. // // The rest of the (short) // implementation of this class is // straightforward if you know the // order of matrix-vector // multiplications performed by the - // ``vmult'' function: + // vmult function: class SchurComplement : public Subscriptor { public: @@ -1001,20 +1001,20 @@ void SchurComplement::vmult (Vector &dst, } - // @sect4{The ``ApproximateSchurComplement'' class template} + // @sect4{The ApproximateSchurComplement class template} // The third component of our solver // and preconditioner system is the // class that approximates the Schur // complement so we can form a - // ``InverseMatrix@'' + // InverseMatrix@ // object that approximates the // inverse of the Schur // complement. It follows the same // pattern as the Schur complement // class, with the only exception // that we do not multiply with the - // inverse mass matrix in ``vmult'', + // inverse mass matrix in vmult, // but rather just do a single Jacobi // step. Consequently, the class also // does not have to store a pointer @@ -1153,7 +1153,7 @@ void MixedLaplaceProblem::solve () // // To compute errors in the solution, // we have already introduced the - // ``VectorTools::integrate_difference'' + // VectorTools::integrate_difference // function in step-7 and // step-11. However, there we only // dealt with scalar solutions, @@ -1170,7 +1170,7 @@ void MixedLaplaceProblem::solve () // have to do is to `mask' the // components that we are interested // in. This is easily done: the - // ``VectorTools::integrate_difference'' + // VectorTools::integrate_difference // function takes as its last // argument a pointer to a weight // function (the parameter defaults @@ -1184,19 +1184,19 @@ void MixedLaplaceProblem::solve () // should pass a function that // represents the constant vector // with a unit value in component - // ``dim'', whereas for the velocity + // dim, whereas for the velocity // the constant vector should be one - // in the first ``dim'' components, + // in the first dim components, // and zero in the location of the // pressure. // // In deal.II, the - // ``ComponentSelectFunction'' does + // ComponentSelectFunction does // exactly this: it wants to know how // many vector components the // function it is to represent should // have (in our case this would be - // ``dim+1'', for the joint + // dim+1, for the joint // velocity-pressure space) and which // individual or range of components // should be equal to one. We @@ -1207,7 +1207,7 @@ void MixedLaplaceProblem::solve () // and a vector in which we will // store the cellwise errors as // computed by - // ``integrate_difference'': + // integrate_difference: template void MixedLaplaceProblem::compute_errors () const { @@ -1227,7 +1227,7 @@ void MixedLaplaceProblem::compute_errors () const // quadrature. This actually // presents a slight twist here: if // we naively chose an object of - // type ``QGauss@(degree+1)'' + // type QGauss@(degree+1) // as one may be inclined to do // (this is what we used for // integrating the linear system), @@ -1247,7 +1247,7 @@ void MixedLaplaceProblem::compute_errors () const // ingration. To avoid this // problem, we simply use a // trapezoidal rule and iterate it - // ``degree+2'' times in each + // degree+2 times in each // coordinate direction (again as // explained in step-7): QTrapez<1> q_trapez; @@ -1349,7 +1349,7 @@ void MixedLaplaceProblem::run () } - // @sect3{The ``main'' function} + // @sect3{The main function} // The main function we stole from // step-6 instead of step-4. It is diff --git a/deal.II/examples/step-21/step-21.cc b/deal.II/examples/step-21/step-21.cc index e4dafe6f06..42c0571bb4 100644 --- a/deal.II/examples/step-21/step-21.cc +++ b/deal.II/examples/step-21/step-21.cc @@ -55,7 +55,7 @@ // refinement indicator. #include // Finally we do some time comparison - // using the ``Timer'' class. + // using the Timer class. #include // And this again is C++: @@ -100,10 +100,10 @@ // // First we define the classes // representing the equation-specific - // functions. Both classes, ``RHS'' - // and ``BoundaryValues'', are - // derived from the ``Function'' - // class. Only the ``value_list'' + // functions. Both classes, RHS + // and BoundaryValues, are + // derived from the Function + // class. Only the value_list // function are implemented because // only lists of function values are // computed rather than single @@ -128,18 +128,18 @@ class BoundaryValues: public Function }; - // The class ``Beta'' represents the + // The class Beta represents the // vector valued flow field of the // linear transport equation and is - // not derived from the ``Function'' + // not derived from the Function // class as we prefer to get function - // values of type ``Point'' rather + // values of type Point rather // than of type - // ``Vector@''. This, because + // Vector@. This, because // there exist scalar products - // between ``Point'' and ``Point'' as - // well as between ``Point'' and - // ``Tensor'', simplifying terms like + // between Point and Point as + // well as between Point and + // Tensor, simplifying terms like // $\beta\cdot n$ and // $\beta\cdot\nabla v$. // @@ -161,7 +161,7 @@ class Beta // The implementation of the - // ``value_list'' functions of these + // value_list functions of these // classes are rather simple. For // simplicity the right hand side is // set to be zero but will be @@ -235,14 +235,14 @@ void BoundaryValues::value_list(const std::vector > &points, // Next we define the // equation-dependent and // DG-method-dependent class - // ``DGTransportEquation''. Its + // DGTransportEquation. Its // member functions were already // mentioned in the Introduction and // will be explained // below. Furthermore it includes // objects of the previously defined - // ``Beta'', ``RHS'' and - // ``BoundaryValues'' function + // Beta, RHS and + // BoundaryValues function // classes. template class DGTransportEquation @@ -287,15 +287,15 @@ DGTransportEquation::DGTransportEquation () // @sect4{Function: assemble_cell_term} // - // The ``assemble_cell_term'' + // The assemble_cell_term // function assembles the cell terms // of the discretization. - // ``u_v_matrix'' is a cell matrix, + // u_v_matrix is a cell matrix, // i.e. for a DG method of degree 1, // it is of size 4 times 4, and - // ``cell_vector'' is of size 4. + // cell_vector is of size 4. // When this function is invoked, - // ``fe_v'' is already reinit'ed with the + // fe_v is already reinit'ed with the // current cell before and includes // all shape values needed. template @@ -304,13 +304,13 @@ void DGTransportEquation::assemble_cell_term( FullMatrix &u_v_matrix, Vector &cell_vector) const { - // First we ask ``fe_v'' for the + // First we ask fe_v for the // shape gradients, shape values and // quadrature weights, const std::vector &JxW = fe_v.get_JxW_values (); // Then the flow field beta and the - // ``rhs_function'' are evaluated at + // rhs_function are evaluated at // the quadrature points, std::vector > beta (fe_v.n_quadrature_points); std::vector rhs (fe_v.n_quadrature_points); @@ -337,10 +337,10 @@ void DGTransportEquation::assemble_cell_term( // @sect4{Function: assemble_boundary_term} // - // The ``assemble_boundary_term'' + // The assemble_boundary_term // function assembles the face terms // at boundary faces. When this - // function is invoked, ``fe_v'' is + // function is invoked, fe_v is // already reinit'ed with the current // cell and current face. Hence it // provides the shape values on that @@ -352,7 +352,7 @@ void DGTransportEquation::assemble_boundary_term( Vector &cell_vector) const { // Again, as in the previous - // function, we ask the ``FEValues'' + // function, we ask the FEValues // object for the shape values and // the quadrature weights const std::vector &JxW = fe_v.get_JxW_values (); @@ -400,7 +400,7 @@ void DGTransportEquation::assemble_boundary_term( // @sect4{Function: assemble_face_term1} // - // The ``assemble_face_term1'' + // The assemble_face_term1 // function assembles the face terms // corresponding to the first version // of the DG method, cf. above. For @@ -409,7 +409,7 @@ void DGTransportEquation::assemble_boundary_term( // all cell boundaries. // // When this function is invoked, - // ``fe_v'' and ``fe_v_neighbor'' are + // fe_v and fe_v_neighbor are // already reinit'ed with the current // cell and the neighoring cell, // respectively, as well as with the @@ -418,16 +418,16 @@ void DGTransportEquation::assemble_boundary_term( // on the face. // // In addition to the cell matrix - // ``u_v_matrix'' this function has + // u_v_matrix this function has // got a new argument - // ``un_v_matrix'', that stores + // un_v_matrix, that stores // contributions to the system matrix // that are based on outer values of // u, see $\hat u_h$ in the // introduction, and inner values of // v, see $v_h$. Here we note that - // ``un'' is the short notation for - // ``u_neighbor'' and represents + // un is the short notation for + // u_neighbor and represents // $\hat u_h$. template void DGTransportEquation::assemble_face_term1( @@ -484,7 +484,7 @@ void DGTransportEquation::assemble_face_term1( // @sect4{Function: assemble_face_term2} // // Now we look at the - // ``assemble_face_term2'' function + // assemble_face_term2 function // that assembles the face terms // corresponding to the second // version of the DG method, @@ -492,8 +492,8 @@ void DGTransportEquation::assemble_face_term1( // terms are given as a sum of // integrals over all faces. Here we // need two additional cell matrices - // ``u_vn_matrix'' and - // ``un_vn_matrix'' that will store + // u_vn_matrix and + // un_vn_matrix that will store // contributions due to terms // involving u and vn as well as un // and vn. @@ -569,7 +569,7 @@ void DGTransportEquation::assemble_face_term2( // After these preparations, we // proceed with the main part of this // program. The main class, here - // called ``DGMethod'' is basically + // called DGMethod is basically // the main class of step 6. One of // the differences is that there's no // ConstraintMatrix object. This is, @@ -628,8 +628,8 @@ class DGMethod // solutions to the problems // corresponding to the two // different assembling routines - // ``assemble_system1'' and - // ``assemble_system2''; + // assemble_system1 and + // assemble_system2; Vector solution1; Vector solution2; Vector right_hand_side; @@ -714,51 +714,51 @@ void DGMethod::setup_system () // @sect4{Function: assemble_system1} // // We proceed with the - // ``assemble_system1'' function that + // assemble_system1 function that // implements the DG discretization // in its first version. This // function repeatedly calls the - // ``assemble_cell_term'', - // ``assemble_boundary_term'' and - // ``assemble_face_term1'' functions - // of the ``DGTransportEquation'' + // assemble_cell_term, + // assemble_boundary_term and + // assemble_face_term1 functions + // of the DGTransportEquation // object. The - // ``assemble_boundary_term'' covers + // assemble_boundary_term covers // the first case mentioned in the // introduction. // // 1. face is at boundary // // This function takes a - // ``FEFaceValues'' object as + // FEFaceValues object as // argument. In contrast to that - // ``assemble_face_term1'' - // takes two ``FEFaceValuesBase'' + // assemble_face_term1 + // takes two FEFaceValuesBase // objects; one for the shape // functions on the current cell and // the other for shape functions on // the neighboring cell under // consideration. Both objects are - // either of class ``FEFaceValues'' - // or of class ``FESubfaceValues'' + // either of class FEFaceValues + // or of class FESubfaceValues // (both derived from - // ``FEFaceValuesBase'') according to + // FEFaceValuesBase) according to // the remaining cases mentioned // in the introduction: // // 2. neighboring cell is finer - // (current cell: ``FESubfaceValues'', - // neighboring cell: ``FEFaceValues''); + // (current cell: FESubfaceValues, + // neighboring cell: FEFaceValues); // // 3. neighboring cell is of the same // refinement level (both, current // and neighboring cell: - // ``FEFaceValues''); + // FEFaceValues); // // 4. neighboring cell is coarser - // (current cell: ``FEFaceValues'', + // (current cell: FEFaceValues, // neighboring cell: - // ``FESubfaceValues''). + // FESubfaceValues). // // If we considered globally refined // meshes then only case 3 would @@ -771,9 +771,9 @@ template void DGMethod::assemble_system1 () { // First we create the - // ``UpdateFlags'' for the - // ``FEValues'' and the - // ``FEFaceValues'' objects. + // UpdateFlags for the + // FEValues and the + // FEFaceValues objects. const UpdateFlags update_flags = update_values | update_gradients | update_q_points @@ -797,7 +797,7 @@ void DGMethod::assemble_system1 () // vectors of the current cell. const UpdateFlags neighbor_face_update_flags = update_values; - // Then we create the ``FEValues'' + // Then we create the FEValues // object. Here, we use the default // MappingQ1. different mapping // create a MappingCollection first @@ -806,8 +806,8 @@ void DGMethod::assemble_system1 () hp::FEValues fe_v_x (fe_collection, quadratures, update_flags); // Similarly we create the - // ``FEFaceValues'' and - // ``FESubfaceValues'' objects for + // FEFaceValues and + // FESubfaceValues objects for // both, the current and the // neighboring cell. Within the // following nested loop over all @@ -828,7 +828,7 @@ void DGMethod::assemble_system1 () // and vectors. Here we need two // cell matrices, both for face // terms that include test - // functions ``v'' (shape functions + // functions v (shape functions // of the current cell). To be more // precise, the first matrix will // include the `u and v terms' and @@ -859,26 +859,26 @@ void DGMethod::assemble_system1 () std::vector dofs_neighbor; // In the - // ``assemble_face_term1'' + // assemble_face_term1 // function contributions to // the cell matrices and the // cell vector are only // ADDED. Therefore on each // cell we need to reset the - // ``u_v_matrix'' and - // ``cell_vector'' to zero, + // u_v_matrix and + // cell_vector to zero, // before assembling the cell terms. u_v_matrix = 0; cell_vector = 0; - // Now we reinit the ``FEValues'' + // Now we reinit the FEValues // object for the current cell fe_v_x.reinit (cell); // and call the function // that assembles the cell // terms. The first argument is - // the ``FEValues'' that was + // the FEValues that was // previously reinit'ed on the // current cell. dg.assemble_cell_term(fe_v_x.get_present_fe_values (), @@ -900,7 +900,7 @@ void DGMethod::assemble_system1 () typename hp::DoFHandler::face_iterator face=cell->face(face_no); // and clear the - // ``un_v_matrix'' on each + // un_v_matrix on each // face. un_v_matrix = 0; @@ -913,7 +913,7 @@ void DGMethod::assemble_system1 () if (face->at_boundary()) { // We reinit the - // ``FEFaceValues'' + // FEFaceValues // object to the // current face fe_v_face_x.reinit (cell, face_no); @@ -959,7 +959,7 @@ void DGMethod::assemble_system1 () // note that the // following part of // code will not work - // for ``dim==1''. + // for dim==1. if (face->has_children()) { // First we store @@ -972,7 +972,7 @@ void DGMethod::assemble_system1 () // neighbor-@>neighbor(neighbor2) // equals the // current cell - // ``cell''. + // cell. const unsigned int neighbor2= cell->neighbor_of_neighbor(face_no); @@ -985,7 +985,7 @@ void DGMethod::assemble_system1 () // and set the // cell // iterator - // ``neighbor_child'' + // neighbor_child // to the cell // placed // `behind' the @@ -1029,12 +1029,12 @@ void DGMethod::assemble_system1 () // We need to // reset the - // ``un_v_matrix'' + // un_v_matrix // on each // subface // because on // each subface - // the ``un'' + // the un // belong to // different // neighboring @@ -1048,7 +1048,7 @@ void DGMethod::assemble_system1 () // case (case // 2) we employ // the - // ``FESubfaceValues'' + // FESubfaceValues // of the // current // cell (here @@ -1081,7 +1081,7 @@ void DGMethod::assemble_system1 () // and // distribute - // ``un_v_matrix'' + // un_v_matrix // to the // system_matrix for (unsigned int i=0; iget_fe().dofs_per_cell; ++i) @@ -1089,8 +1089,8 @@ void DGMethod::assemble_system1 () system_matrix.add(dofs[i], dofs_neighbor[k], un_v_matrix(i,k)); } - // End of ``if - // (face-@>has_children())'' + // End of if + // (face-@>has_children()) } else { @@ -1125,7 +1125,7 @@ void DGMethod::assemble_system1 () // We reinit // the - // ``FEFaceValues'' + // FEFaceValues // of the // current and // neighboring @@ -1142,10 +1142,10 @@ void DGMethod::assemble_system1 () fe_v_face_neighbor_x.get_present_fe_values (), u_v_matrix, un_v_matrix); - // End of ``if + // End of if // (neighbor-@>level() // == - // cell-@>level())'' + // cell-@>level()) } else { @@ -1194,7 +1194,7 @@ void DGMethod::assemble_system1 () // Reinit the // appropriate - // ``FEFaceValues'' + // FEFaceValues // and assemble // the face // terms. @@ -1211,25 +1211,25 @@ void DGMethod::assemble_system1 () // Now we get the // dof indices of // the - // ``neighbor_child'' + // neighbor_child // cell, dofs_neighbor.resize (neighbor->get_fe().dofs_per_cell); neighbor->get_dof_indices (dofs_neighbor); // and distribute the - // ``un_v_matrix''. + // un_v_matrix. for (unsigned int i=0; iget_fe().dofs_per_cell; ++i) for (unsigned int k=0; kget_fe().dofs_per_cell; ++k) system_matrix.add(dofs[i], dofs_neighbor[k], un_v_matrix(i,k)); } - // End of ``face not at boundary'': + // End of face not at boundary: } // End of loop over all faces: } // Finally we distribute the - // ``u_v_matrix'' + // u_v_matrix for (unsigned int i=0; i::assemble_system1 () // @sect4{Function: assemble_system2} // // We proceed with the - // ``assemble_system2'' function that + // assemble_system2 function that // implements the DG discretization // in its second version. This // function is very similar to the - // ``assemble_system1'' + // assemble_system1 // function. Therefore, here we only // discuss the differences between // the two functions. This function // repeatedly calls the - // ``assemble_face_term2'' function + // assemble_face_term2 function // of the DGTransportEquation object, // that assembles the face terms // written as a sum of integrals over @@ -1293,7 +1293,7 @@ void DGMethod::assemble_system2 () const UpdateFlags neighbor_face_update_flags = update_values; // Here we do not need - // ``fe_v_face_neighbor'' as case 4 + // fe_v_face_neighbor as case 4 // does not occur. hp::FEValues fe_v_x ( fe_collection, quadratures, update_flags); @@ -1312,7 +1312,7 @@ void DGMethod::assemble_system2 () // Additionally we need the // following two cell matrices, // both for face term that include - // test function ``vn'' (shape + // test function vn (shape // functions of the neighboring // cell). To be more precise, the // first matrix will include the `u @@ -1548,15 +1548,15 @@ void DGMethod::solve (Vector &solution) // difference quotients including the // cell under consideration and its // neighbors. This is done by the - // ``DerivativeApproximation'' class + // DerivativeApproximation class // that computes the approximate // gradients in a way similar to the - // ``GradientEstimation'' described + // GradientEstimation described // in Step 9 of this tutorial. In // fact, the - // ``DerivativeApproximation'' class + // DerivativeApproximation class // was developed following the - // ``GradientEstimation'' class of + // GradientEstimation class of // Step 9. Relating to the // discussion in Step 9, here we // consider $h^{1+d/2}|\nabla_h @@ -1570,7 +1570,7 @@ void DGMethod::solve (Vector &solution) template void DGMethod::refine_grid () { - // The ``DerivativeApproximation'' + // The DerivativeApproximation // class computes the gradients to // float precision. This is // sufficient as they are @@ -1672,14 +1672,14 @@ void DGMethod::output_results (const unsigned int cycle) const } - // The following ``run'' function is + // The following run function is // similar to previous examples. The // only difference is that the // problem is assembled and solved // twice on each refinement step; - // first by ``assemble_system1'' that + // first by assemble_system1 that // implements the first version and - // then by ``assemble_system2'' that + // then by assemble_system2 that // implements the second version of // writing the DG // discretization. Furthermore the @@ -1767,7 +1767,7 @@ void DGMethod::run () } } - // The following ``main'' function is + // The following main function is // similar to previous examples and // need not to be commented on. int main () diff --git a/deal.II/examples/step-3/step-3.cc b/deal.II/examples/step-3/step-3.cc index a00e322675..821b0e044b 100644 --- a/deal.II/examples/step-3/step-3.cc +++ b/deal.II/examples/step-3/step-3.cc @@ -91,7 +91,7 @@ #include - // @sect3{The ``LaplaceProblem'' class} + // @sect3{The LaplaceProblem class} // Instead of the procedural programming of // previous examples, we encapsulate @@ -238,7 +238,7 @@ void LaplaceProblem::make_grid_and_dofs () // freedom. This is done by using the // distribute_dofs function, as we have // seen in the step-2 example. Since we use - // the ``FE_Q'' class with a polynomial + // the FE_Q class with a polynomial // degree of 1, i.e. bilinear elements, // this associates one degree of freedom // with each vertex. While we're at @@ -262,7 +262,7 @@ void LaplaceProblem::make_grid_and_dofs () // instead of giving a magically obtained // maximal number of nonzero entries per // row, we now use a function in the - // ``DoFHandler'' class that can compute + // DoFHandler class that can compute // this number for us: sparsity_pattern.reinit (dof_handler.n_dofs(), dof_handler.n_dofs(), @@ -357,7 +357,7 @@ void LaplaceProblem::make_grid_and_dofs () // objects. That's too much, so there is one // type of class that orchestrates // information exchange between these three: - // the ``FEValues'' class. If given one + // the FEValues class. If given one // instance of each three of these objects, // it will be able to provide you with // information about values and gradients of @@ -405,22 +405,22 @@ void LaplaceProblem::assemble_system () // actually need is given as a bitwise // connection of flags as the third // argument to the constructor of - // ``FEValues''. Since these values have to + // FEValues. Since these values have to // be recomputed, or updated, every time we // go to a new cell, all of these flags - // start with the prefix ``update_'' and + // start with the prefix update_ and // then indicate what it actually is that // we want updated. The flag to give if we // want the values of the shape functions - // computed is ``update_values''; for the + // computed is update_values; for the // gradients it is - // ``update_gradients''. The determinants + // update_gradients. The determinants // of the Jacobians and the quadrature // weights are always used together, so // only the products (Jacobians times - // weights, or short ``JxW'') are computed; + // weights, or short JxW) are computed; // since we need them, we have to list - // ``update_JxW_values'' as well: + // update_JxW_values as well: FEValues<2> fe_values (fe, quadrature_formula, update_values | update_gradients | update_JxW_values); // The advantage of this proceeding is that @@ -567,7 +567,7 @@ void LaplaceProblem::assemble_system () // determinant and the quadrature point // weight (that one gets together by // the call to - // ``fe_values.JxW''). Finally, this is + // fe_values.JxW). Finally, this is // repeated for all shape functions // phi_i and phi_j: for (unsigned int i=0; iVectorTools::interpolate_boundary_values. Its // parameters are (omitting parameters for // which default values exist and that we // don't care about): the DoFHandler object @@ -694,9 +694,9 @@ void LaplaceProblem::assemble_system () // the boundary. // // The function describing the boundary - // values is an object of type ``Function'' + // values is an object of type Function // or of a derived class. One of the - // derived classes is ``ZeroFunction'', + // derived classes is ZeroFunction, // which describes (not unexpectedly) a // function which is zero everywhere. We // create such an object in-place and pass @@ -712,7 +712,7 @@ void LaplaceProblem::assemble_system () // here for all entries). This // mapping of DoF numbers to // boundary values is done by the - // ``std::map'' class. + // std::map class. std::map boundary_values; VectorTools::interpolate_boundary_values (dof_handler, 0, @@ -755,7 +755,7 @@ void LaplaceProblem::solve () // First, we need to have an object that // knows how to tell the CG algorithm when // to stop. This is done by using a - // ``SolverControl'' object, and as + // SolverControl object, and as // stopping criterion we say: stop after a // maximum of 1000 iterations (which is far // more than is needed for 1089 variables; @@ -766,13 +766,13 @@ void LaplaceProblem::solve () // the one which stops the iteration: SolverControl solver_control (1000, 1e-12); // Then we need the solver itself. The - // template parameters to the ``SolverCG'' + // template parameters to the SolverCG // class are the matrix type and the type // of the vectors, but the empty angle // brackets indicate that we simply take // the default arguments (which are - // ``SparseMatrix@'' and - // ``Vector@''): + // SparseMatrix@ and + // Vector@): SolverCG<> cg (solver_control); // Now solve the system of equations. The @@ -807,13 +807,13 @@ void LaplaceProblem::output_results () const // To write the output to a file, // we need an object which knows // about output formats and the - // like. This is the ``DataOut'' class, + // like. This is the DataOut class, // and we need an object of that // type: DataOut<2> data_out; // Now we have to tell it where to take the // values from which it shall write. We - // tell it which ``DoFHandler'' object to + // tell it which DoFHandler object to // use, and the solution vector (and // the name by which the solution variable // shall appear in the output file). If @@ -830,7 +830,7 @@ void LaplaceProblem::output_results () const // handle. The reason is that we // have separated the frontend // (which knows about how to treat - // ``DoFHandler'' objects and data + // DoFHandler objects and data // vectors) from the back end (which // knows many different output formats) // and use an intermediate data @@ -857,7 +857,7 @@ void LaplaceProblem::output_results () const // Finally, the last function of this class // is the main function which calls all the - // other functions of the ``LaplaceProblem'' + // other functions of the LaplaceProblem // class. The order in which this is done // resembles the order in which most finite // element programs work. Since the names are @@ -872,7 +872,7 @@ void LaplaceProblem::run () } - // @sect3{The ``main'' function} + // @sect3{The main function} // This is the main function of the // program. Since the concept of a diff --git a/deal.II/examples/step-4/step-4.cc b/deal.II/examples/step-4/step-4.cc index 067c93ef13..d6ff4ec32a 100644 --- a/deal.II/examples/step-4/step-4.cc +++ b/deal.II/examples/step-4/step-4.cc @@ -50,10 +50,10 @@ #include - // @sect3{The ``LaplaceProblem'' class template} + // @sect3{The LaplaceProblem class template} // This is again the same - // ``LaplaceProblem'' class as in the + // LaplaceProblem class as in the // previous example. The only // difference is that we have now // declared it as a class with a @@ -183,9 +183,9 @@ class BoundaryValues : public Function // knows the size of the loop at compile time // (remember that at the time when you define // the template, the compiler doesn't know - // the value of ``dim'', but when it later + // the value of dim, but when it later // encounters a statement or declaration - // ``RightHandSide@<2@>'', it will take the + // RightHandSide@<2@>, it will take the // template, replace all occurrences of dim // by 2 and compile the resulting function); // in other words, at the time of compiling @@ -197,7 +197,7 @@ class BoundaryValues : public Function // above right away. // // The last thing to note is that a - // ``Point@'' denotes a point in + // Point@ denotes a point in // dim-dimensionsal space, and its individual // components (i.e. `x', `y', // ... coordinates) can be accessed using the @@ -232,35 +232,35 @@ double BoundaryValues::value (const Point &p, - // @sect3{Implementation of the ``LaplaceProblem'' class} + // @sect3{Implementation of the LaplaceProblem class} // Next for the implementation of the class // template that makes use of the functions // above. As before, we will write everything // as templates that have a formal parameter - // ``dim'' that we assume unknown at the time + // dim that we assume unknown at the time // we define the template functions. Only // later, the compiler will find a - // declaration of ``LaplaceProblem@<2@>'' (in - // the ``main'' function, actually) and - // compile the entire class with ``dim'' + // declaration of LaplaceProblem@<2@> (in + // the main function, actually) and + // compile the entire class with dim // replaced by 2, a process referred to as // `instantiation of a template'. When doing // so, it will also replace instances of - // ``RightHandSide@'' by - // ``RightHandSide@<2@>'' and instantiate the + // RightHandSide@ by + // RightHandSide@<2@> and instantiate the // latter class from the class template. // // In fact, the compiler will also find a - // declaration ``LaplaceProblem@<3@>'' in - // ``main()''. This will cause it to again go + // declaration LaplaceProblem@<3@> in + // main(). This will cause it to again go // back to the general - // ``LaplaceProblem@'' template, replace - // all occurrences of ``dim'', this time by + // LaplaceProblem@ template, replace + // all occurrences of dim, this time by // 3, and compile the class a second // time. Note that the two instantiations - // ``LaplaceProblem@<2@>'' and - // ``LaplaceProblem@<3@>'' are completely + // LaplaceProblem@<2@> and + // LaplaceProblem@<3@> are completely // independent classes; their only common // feature is that they are both instantiated // from the same general template, but they @@ -273,7 +273,7 @@ double BoundaryValues::value (const Point &p, // @sect4{LaplaceProblem::LaplaceProblem} // After this introduction, here is the - // constructor of the ``LaplaceProblem'' + // constructor of the LaplaceProblem // class. It specifies the desired polynomial // degree of the finite elements and // associates the DoFHandler to the @@ -298,7 +298,7 @@ LaplaceProblem::LaplaceProblem () : // square [-1,1]x[-1,1] in 2D, or on // the cube [-1,1]x[-1,1]x[-1,1] in // 3D; both can be termed - // ``hyper_cube'', so we may use the + // hyper_cube, so we may use the // same function in whatever // dimension we are. Of course, the // functions that create a hypercube @@ -315,7 +315,7 @@ LaplaceProblem::LaplaceProblem () : // either. This function therefore looks // exactly like in the previous example, // although it performs actions that in their - // details are quite different if ``dim'' + // details are quite different if dim // happens to be 3. The only significant // difference from a user's perspective is // the number of cells resulting, which is @@ -405,11 +405,11 @@ void LaplaceProblem::assemble_system () // quadrature points on the cell we are // presently on (previously, we only // required values and gradients of the - // shape function from the ``FEValues'' + // shape function from the FEValues // object, as well as the quadrature - // weights, ``JxW''). We can tell the - // ``FEValues'' object to do for us by also - // giving it the ``update_q_points'' flag: + // weights, JxW). We can tell the + // FEValues object to do for us by also + // giving it the update_q_points flag: FEValues fe_values (fe, quadrature_formula, update_values | update_gradients | update_q_points | update_JxW_values); @@ -435,7 +435,7 @@ void LaplaceProblem::assemble_system () // Note, that a cell is a quadrilateral in // two space dimensions, but a hexahedron // in 3D. In fact, the - // ``active_cell_iterator'' data type is + // active_cell_iterator data type is // something different, depending on the // dimension we are in, but to the outside // world they look alike and you will @@ -491,22 +491,22 @@ void LaplaceProblem::assemble_system () // and j at point q_point and multiply // it with the scalar weights JxW. This // is actually what happens: - // ``fe_values.shape_grad(i,q_point)'' - // returns a ``dim'' dimensional + // fe_values.shape_grad(i,q_point) + // returns a dim dimensional // vector, represented by a - // ``Tensor@<1,dim@>'' object, and the + // Tensor@<1,dim@> object, and the // operator* that multiplies it with // the result of - // ``fe_values.shape_grad(j,q_point)'' - // makes sure that the ``dim'' + // fe_values.shape_grad(j,q_point) + // makes sure that the dim // components of the two vectors are // properly contracted, and the result // is a scalar floating point number // that then is multiplied with the // weights. Internally, this operator* // makes sure that this happens - // correctly for all ``dim'' components - // of the vectors, whether ``dim'' be + // correctly for all dim components + // of the vectors, whether dim be // 2, 3, or any other space dimension; // from a user's perspective, this is // not something worth bothering with, @@ -537,10 +537,10 @@ void LaplaceProblem::assemble_system () // values in this example, contrary to the // one before. This is a simple task, we // only have to replace the - // ``ZeroFunction'' used there by an object + // ZeroFunction used there by an object // of the class which describes the // boundary values we would like to use - // (i.e. the ``BoundaryValues'' class + // (i.e. the BoundaryValues class // declared above): std::map boundary_values; VectorTools::interpolate_boundary_values (dof_handler, @@ -599,8 +599,8 @@ void LaplaceProblem::solve () // obtain it see the ReadMe file of // deal.II). To write data in this format, we // simply replace the - // ``data_out.write_gnuplot'' call by - // ``data_out.write_gmv''. + // data_out.write_gnuplot call by + // data_out.write_gmv. // // Since the program will run both 2d and 3d // versions of the laplace solver, we use the @@ -647,15 +647,15 @@ void LaplaceProblem::run () } - // @sect3{The ``main'' function} + // @sect3{The main function} // And this is the main function. It also // looks mostly like in step-3, but if you // look at the code below, note how we first // create a variable of type - // ``LaplaceProblem@<2@>'' (forcing the + // LaplaceProblem@<2@> (forcing the // compiler to compile the class template - // with ``dim'' replaced by ``2'') and run a + // with dim replaced by 2) and run a // 2d simulation, and then we do the whole // thing over in 3d. // @@ -680,12 +680,12 @@ void LaplaceProblem::run () // // Each of the two blocks is enclosed in // braces to make sure that the - // ``laplace_problem_2d'' variable goes out + // laplace_problem_2d variable goes out // of scope (and releases the memory it // holds) before we move on to allocate // memory for the 3d case. Without the // additional braces, the - // ``laplace_problem_2d'' variable would only + // laplace_problem_2d variable would only // be destroyed at the end of the function, // i.e. after running the 3d problem, and // would needlessly hog memory while the 3d @@ -698,7 +698,7 @@ void LaplaceProblem::run () // starting residual and the number of the // iteration where convergence was // detected. This can be suppressed through - // the ``deallog.depth_console(0)'' call. + // the deallog.depth_console(0) call. // // The rationale here is the following: the // deallog (i.e. deal-log, not de-allog) diff --git a/deal.II/examples/step-5/step-5.cc b/deal.II/examples/step-5/step-5.cc index c87b12618b..0caaae616b 100644 --- a/deal.II/examples/step-5/step-5.cc +++ b/deal.II/examples/step-5/step-5.cc @@ -52,21 +52,21 @@ // ... and this is too: We will // convert integers to strings using // the C++ stringstream class - // ``ostringstream'': + // ostringstream: #include - // @sect3{The ``LaplaceProblem'' class template} + // @sect3{The LaplaceProblem class template} // The main class is mostly as in the // previous example. The most visible // change is that the function - // ``make_grid_and_dofs'' has been + // make_grid_and_dofs has been // removed, since creating the grid - // is now done in the ``run'' + // is now done in the run // function and the rest of its // functionality is now in - // ``setup_system''. Apart from this, + // setup_system. Apart from this, // everything is as before. template class LaplaceProblem @@ -93,7 +93,7 @@ class LaplaceProblem }; - // @sect3{Nonconstant coefficients, using ``Assert''} + // @sect3{Nonconstant coefficients, using Assert} // In step-4, we showed how to use // non-constant boundary values and @@ -101,18 +101,18 @@ class LaplaceProblem // we want to use a variable // coefficient in the elliptic // operator instead. Of course, the - // suitable object is a ``Function'', + // suitable object is a Function, // as we have used for the right hand // side and boundary values in the // last example. We will use it // again, but we implement another - // function ``value_list'' which + // function value_list which // takes a list of points and returns // the values of the function at // these points as a list. The reason // why such a function is reasonable // although we can get all the - // information from the ``value'' + // information from the value // function as well will be explained // below when assembling the matrix. // @@ -166,7 +166,7 @@ double Coefficient::value (const Point &p, // points at once. Of course, we need // to make sure that the values are // the same as if we would ask the - // ``value'' function for each point + // value function for each point // individually. // // This method takes three @@ -177,9 +177,9 @@ double Coefficient::value (const Point &p, // component that should be zero here // since we only have a single scalar // function. Now, of course the size - // of the output array (``values'') + // of the output array (values) // must be the same as that of the - // input array (``points''), and we + // input array (points), and we // could simply assume that. However, // in practice, it turns out that // more than 90 per cent of @@ -188,7 +188,7 @@ double Coefficient::value (const Point &p, // invalid array sizes, etc, so we // should try to make sure that the // parameters are valid. For this, - // the ``Assert'' macro is a good means, + // the Assert macro is a good means, // since it makes sure that the // condition which is given as first // argument is valid, and if not @@ -209,7 +209,7 @@ double Coefficient::value (const Point &p, // should not slow down the program // too much if you want to do large // computations. To this end, the - // ``Assert'' macro is only used in + // Assert macro is only used in // debug mode and expands to nothing // if in optimized mode. Therefore, // while you test your program on @@ -240,10 +240,10 @@ double Coefficient::value (const Point &p, // mode to optimized mode is to go // edit the Makefile in this // directory. It should have a line - // ``debug-mode = on''; simply - // replace it by ``debug-mode = off'' + // debug-mode = on; simply + // replace it by debug-mode = off // and recompile your program. The - // output of the ``make'' program + // output of the make program // should already indicate to you // that the program is now compiled // in optimized mode, and it will @@ -259,7 +259,7 @@ double Coefficient::value (const Point &p, // two arrays is one of the most // frequent checks, which is why // there is already an exception - // class ``ExcDimensionMismatch'' + // class ExcDimensionMismatch // that takes the sizes of two // vectors and prints some output in // case the condition is violated: @@ -277,7 +277,7 @@ void Coefficient::value_list (const std::vector > &points, // trigger this exception at the // end of the main program, and // what output results from this - // (see the ``Results'' section of + // (see the Results section of // this example program). You will // certainly notice that the output // is quite well suited to quickly @@ -295,7 +295,7 @@ void Coefficient::value_list (const std::vector > &points, // While we're at it, we can do // another check: the coefficient // is a scalar, but the - // ``Function'' class also + // Function class also // represents vector-valued // function. A scalar function must // therefore be considered as a @@ -305,9 +305,9 @@ void Coefficient::value_list (const std::vector > &points, // ask is zero (we always count // from zero). The following // assertion checks this. If the - // condition in the ``Assert'' call + // condition in the Assert call // is violated, an exception of - // type ``ExcRange'' will be + // type ExcRange will be // triggered; that class takes the // violating index as first // argument, and the second and @@ -320,11 +320,11 @@ void Coefficient::value_list (const std::vector > &points, // zero, of course. (The interval // is half open since we also want // to write exceptions like - // ``ExcRange(i,0,v.size())'', + // ExcRange(i,0,v.size()), // where an index must be between // zero but less than the size of // an array. To save us the effort - // of writing ``v.size()-1'' in + // of writing v.size()-1 in // many places, the range is // defined as half-open.) Assert (component == 0, @@ -332,7 +332,7 @@ void Coefficient::value_list (const std::vector > &points, // The rest of the function is // uneventful: we define - // ``n_q_points'' as an + // n_q_points as an // abbreviation for the number of // points for which function values // are requested, and then simply @@ -349,7 +349,7 @@ void Coefficient::value_list (const std::vector > &points, } - // @sect3{The ``LaplaceProblem'' class implementation} + // @sect3{The LaplaceProblem class implementation} // @sect4{LaplaceProblem::LaplaceProblem} @@ -365,7 +365,7 @@ LaplaceProblem::LaplaceProblem () : // @sect4{LaplaceProblem::setup_system} // This is the function - // ``make_grid_and_dofs'' from the + // make_grid_and_dofs from the // previous example, minus the // generation of the grid. Everything // else is unchanged: @@ -455,7 +455,7 @@ void LaplaceProblem::assemble_system () // here. // // Then, below, we will ask the - // ``coefficient'' function object + // coefficient function object // to compute the values of the // coefficient at all quadrature // points on one cell at once. The @@ -513,7 +513,7 @@ void LaplaceProblem::assemble_system () // use it in the computation of the // local contributions. This is // what we do in the call to - // ``coefficient.value_list'' in + // coefficient.value_list in // the fourth line of the loop. // // The second change is how we make @@ -593,7 +593,7 @@ void LaplaceProblem::assemble_system () // (symmetric successive // overrelaxation), with a relaxation // factor of 1.2. For this purpose, - // the ``SparseMatrix'' class has a + // the SparseMatrix class has a // function which does one SSOR step, // and we need to package the address // of this function together with the @@ -601,12 +601,12 @@ void LaplaceProblem::assemble_system () // (which is the matrix to be // inverted) and the relaxation // factor into one object. The - // ``PreconditionSSOR'' class does - // this for us. (``PreconditionSSOR'' + // PreconditionSSOR class does + // this for us. (PreconditionSSOR // class takes a template argument // denoting the matrix type it is // supposed to work on. The default - // value is ``SparseMatrix@'', + // value is SparseMatrix@, // which is exactly what we need // here, so we simply stick with the // default and do not specify @@ -614,7 +614,7 @@ void LaplaceProblem::assemble_system () // // With this, the rest of the // function is trivial: instead of - // the ``PreconditionIdentity'' + // the PreconditionIdentity // object we have created before, we // now use the preconditioner we have // declared, and the CG solver will @@ -738,12 +738,12 @@ void LaplaceProblem::output_results (const unsigned int cycle) const // which the results are to be // written. We would like to have // it of the form - // ``solution-N.eps'', where N is + // solution-N.eps, where N is // the number of the refinement // cycle. Thus, we have to convert // an integer to a part of a // string; this can be done using - // the ``sprintf'' function, but in + // the sprintf function, but in // C++ there is a more elegant way: // write everything into a special // stream (just like writing into a @@ -753,7 +753,7 @@ void LaplaceProblem::output_results (const unsigned int cycle) const // conversions from integer to // strings, and one could as well // use stream modifiers such as - // ``setw'', ``setprecision'', and + // setw, setprecision, and // so on. In C++, you can do this // by using the so-called stringstream // classes: @@ -770,9 +770,9 @@ void LaplaceProblem::output_results (const unsigned int cycle) const << ".eps"; // We can get whatever we wrote to the - // stream using the ``str()'' function. The + // stream using the str() function. The // result is a string which we have to - // convert to a char* using the ``c_str()'' + // convert to a char* using the c_str() // function. Use that as filename for the // output stream and then write the data to // the file: @@ -787,7 +787,7 @@ void LaplaceProblem::output_results (const unsigned int cycle) const // The second to last thing in this // program is the definition of the - // ``run()'' function. In contrast to + // run() function. In contrast to // the previous programs, we will // compute on a sequence of meshes // that after each iteration is @@ -819,7 +819,7 @@ void LaplaceProblem::run () // previous examples, we // have already used some // of the functions from - // the ``GridGenerator'' + // the GridGenerator // class. Here we would // like to read a grid from // a file where the cells @@ -872,7 +872,7 @@ void LaplaceProblem::run () // particular in this case // one would of course try // to do something else if - // ``dim'' is not equal to + // dim is not equal to // two, e.g. create a grid // using library // functions. Aborting a @@ -900,8 +900,8 @@ void LaplaceProblem::run () // grid. It is in UCD // (unstructured cell data) // format (but the ending - // of the ``UCD''-file is - // ``inp''), as supported + // of the UCD-file is + // inp), as supported // as input format by the // AVS Explorer (a // visualization program), @@ -910,10 +910,10 @@ void LaplaceProblem::run () // If you like to use // another input format, // you have to use an other - // ``grid_in.read_xxx'' + // grid_in.read_xxx // function. (See the // documentation of the - // ``GridIn'' class to find + // GridIn class to find // out what input formats // are presently // supported.) @@ -963,7 +963,7 @@ void LaplaceProblem::run () } - // @sect3{The ``main'' function} + // @sect3{The main function} // The main function looks mostly // like the one in the previous @@ -978,10 +978,10 @@ int main () // Finally, we have promised to // trigger an exception in the - // ``Coefficient'' class through - // the ``Assert'' macro we have + // Coefficient class through + // the Assert macro we have // introduced there. For this, we - // have to call its ``value_list'' + // have to call its value_list // function with two arrays of // different size (the number in // parentheses behind the diff --git a/deal.II/examples/step-6/step-6.cc b/deal.II/examples/step-6/step-6.cc index 98bc5bea55..354055da98 100644 --- a/deal.II/examples/step-6/step-6.cc +++ b/deal.II/examples/step-6/step-6.cc @@ -45,7 +45,7 @@ // will import the declaration of // H1-conforming finite element shape // functions. This family of finite - // elements is called ``FE_Q'', and + // elements is called FE_Q, and // was used in all examples before // already to define the usual bi- or // tri-linear elements, but we will @@ -60,13 +60,13 @@ // refined grids (just the grid, not // the solution) in each step, so we // need the following include file - // instead of ``grid_in.h'': + // instead of grid_in.h: #include // When using locally refined grids, - // we will get so-called ``hanging - // nodes''. However, the standard + // we will get so-called hanging + // nodes. However, the standard // finite element methods assumes // that the discrete solution spaces // be continuous, so we need to make @@ -102,12 +102,12 @@ - // @sect3{The ``LaplaceProblem'' class template} + // @sect3{The LaplaceProblem class template} // The main class is again almost // unchanged. Two additions, however, // are made: we have added the - // ``refine_grid'' function, which is + // refine_grid function, which is // used to adaptively refine the grid // (instead of the global refinement // in the previous examples), and a @@ -211,7 +211,7 @@ void Coefficient::value_list (const std::vector > &points, } - // @sect3{The ``LaplaceProblem'' class implementation} + // @sect3{The LaplaceProblem class implementation} // @sect4{LaplaceProblem::LaplaceProblem} @@ -221,9 +221,9 @@ void Coefficient::value_list (const std::vector > &points, // quadratic element. To do so, we // only have to replace the // constructor argument (which was - // ``1'' in all previous examples) by + // 1 in all previous examples) by // the desired polynomial degree - // (here ``2''): + // (here 2): template LaplaceProblem::LaplaceProblem () : dof_handler (triangulation), @@ -238,9 +238,9 @@ LaplaceProblem::LaplaceProblem () : // to add it is a subtle change in // the order of data elements in the // class as compared to all previous - // examples: the ``dof_handler'' + // examples: the dof_handler // object was defined before and not - // after the ``fe'' object. Of course + // after the fe object. Of course // we could have left this order // unchanged, but we would like to // show what happens if the order is @@ -254,25 +254,25 @@ LaplaceProblem::LaplaceProblem () : // following: when we distribute the // degrees of freedom using the // function call - // ``dof_handler.distribute_dofs()'', - // the ``dof_handler'' also stores a + // dof_handler.distribute_dofs(), + // the dof_handler also stores a // pointer to the finite element in // use. Since this pointer is used // every now and then until either // the degrees of freedom are // re-distributed using another // finite element object or until the - // ``dof_handler'' object is + // dof_handler object is // destroyed, it would be unwise if // we would allow the finite element // object to be deleted before the - // ``dof_handler'' object. To + // dof_handler object. To // disallow this, the DoF handler // increases a counter inside the // finite element object which counts // how many objects use that finite // element (this is what the - // ``Subscriptor''/``SmartPointer'' + // Subscriptor/SmartPointer // class pair is used for, in case // you want something like this for // your own programs; see step-7 for @@ -321,7 +321,7 @@ LaplaceProblem::LaplaceProblem () : // exactly the behavior sketched // above. The reason is that member // variables of the - // ``LaplaceProblem'' class are + // LaplaceProblem class are // destructed bottom-up (i.e. in // reverse order of their declaration // in the class), as always in @@ -331,19 +331,19 @@ LaplaceProblem::LaplaceProblem () : // declaration is below the one of // the DoF handler. This triggers the // situation above, and an exception - // will be raised when the ``fe'' + // will be raised when the fe // object is destructed. What needs // to be done is to tell the - // ``dof_handler'' object to release + // dof_handler object to release // its lock to the finite element. Of - // course, the ``dof_handler'' will + // course, the dof_handler will // only release its lock if it really // does not need the finite element // any more, i.e. when all finite // element related data is deleted // from it. For this purpose, the - // ``DoFHandler'' class has a - // function ``clear'' which deletes + // DoFHandler class has a + // function clear which deletes // all degrees of freedom, and // releases its lock to the finite // element. After this, you can @@ -398,7 +398,7 @@ LaplaceProblem::~LaplaceProblem () // there are now 9 degrees of freedom // per cell, not only four, that can // couple with each other. The - // ``dof_Handler.max_couplings_between_dofs()'' + // dof_Handler.max_couplings_between_dofs() // call will take care of this, // however: template @@ -423,7 +423,7 @@ void LaplaceProblem::setup_system () // hanging nodes. In the class // desclaration, we have already // allocated space for an object - // ``hanging_node_constraints'' + // hanging_node_constraints // that will hold a list of these // constraints (they form a matrix, // which is reflected in the name @@ -441,17 +441,17 @@ void LaplaceProblem::setup_system () DoFTools::make_hanging_node_constraints (dof_handler, hanging_node_constraints); - // The next step is ``closing'' + // The next step is closing // this object. For this note that, // in principle, the - // ``ConstraintMatrix'' class can + // ConstraintMatrix class can // hold other constraints as well, // i.e. constraints that do not // stem from hanging // nodes. Sometimes, it is useful // to use such constraints, in // which case they may be added to - // the ``ConstraintMatrix'' object + // the ConstraintMatrix object // after the hanging node // constraints were computed. After // all constraints have been added, @@ -459,7 +459,7 @@ void LaplaceProblem::setup_system () // rearranged to perform some // actions more efficiently. This // postprocessing is done using the - // ``close()'' function, after which + // close() function, after which // no further constraints may be // added any more: hanging_node_constraints.close (); @@ -474,9 +474,9 @@ void LaplaceProblem::setup_system () // some space for them here. Since // the process of elimination of // these constrained nodes is - // called ``condensation'', the + // called condensation, the // functions that eliminate them - // are called ``condense'' for both + // are called condense for both // the system matrix and right hand // side, as well as for the // sparsity pattern. @@ -510,7 +510,7 @@ void LaplaceProblem::setup_system () // polynomial degree in the finite // element shape functions. This is // easy to change: the constructor of - // the ``QGauss'' class takes the + // the QGauss class takes the // number of quadrature points in // each space direction. Previously, // we had two points for bilinear @@ -524,15 +524,15 @@ void LaplaceProblem::setup_system () // worth noting, however, that under // the hood several things are // different than before. First, the - // variables ``dofs_per_cell'' and - // ``n_q_points'' now are 9 each, + // variables dofs_per_cell and + // n_q_points now are 9 each, // where they were 4 // before. Introducing such variables // as abbreviations is a good // strategy to make code work with // different elements without having // to change too much code. Secondly, - // the ``fe_values'' object of course + // the fe_values object of course // needs to do other things as well, // since the shape functions are now // quadratic, rather than linear, in @@ -620,7 +620,7 @@ void LaplaceProblem::assemble_system () // of the matrix and all other // entries for this line are set to // zero) but the computed values - // are invalid (the ``condense'' + // are invalid (the condense // function modifies the system so // that the values in the solution // corresponding to constrained @@ -628,7 +628,7 @@ void LaplaceProblem::assemble_system () // system still has a well-defined // solution; we compute the correct // values for these nodes at the - // end of the ``solve'' function). + // end of the solve function). // As almost all the stuff before, // the interpolation of boundary @@ -676,7 +676,7 @@ void LaplaceProblem::assemble_system () // have to do is to use the // constraints to assign to them the // values that they should have. This - // process, called ``distributing'' + // process, called distributing // hanging nodes, computes the values // of constrained nodes from the // values of the unconstrained ones, @@ -705,7 +705,7 @@ void LaplaceProblem::solve () // Instead of global refinement, we // now use a slightly more elaborate // scheme. We will use the - // ``KellyErrorEstimator'' class + // KellyErrorEstimator class // which implements an error // estimator for the Laplace // equation; it can in principle @@ -745,7 +745,7 @@ void LaplaceProblem::solve () // way to test an adaptive program. // // The way the estimator works is to - // take a ``DoFHandler'' object + // take a DoFHandler object // describing the degrees of freedom // and a vector of values for each // degree of freedom as input and @@ -753,12 +753,12 @@ void LaplaceProblem::solve () // for each active cell of the // triangulation (i.e. one value for // each of the - // ``triangulation.n_active_cells()'' + // triangulation.n_active_cells() // cells). To do so, it needs two // additional pieces of information: // a quadrature formula on the faces // (i.e. quadrature formula on - // ``dim-1'' dimensional objects. We + // dim-1 dimensional objects. We // use a 3-point Gauss rule again, a // pick that is consistent and // appropriate with the choice @@ -790,7 +790,7 @@ void LaplaceProblem::solve () // corresponding Neumann values. This // information is represented by an // object of type - // ``FunctionMap@::type'' that is + // FunctionMap@::type that is // essentially a map from boundary // indicators to function objects // describing Neumann boundary values @@ -829,7 +829,7 @@ void LaplaceProblem::refine_grid () // The above function returned one // error indicator value for each // cell in the - // ``estimated_error_per_cell'' + // estimated_error_per_cell // array. Refinement is now done as // follows: refine those 30 per // cent of the cells with the @@ -948,9 +948,9 @@ void LaplaceProblem::refine_grid () // hack with an explicit assertion at // the beginning of the function. If // this assertion is triggered, - // i.e. when ``cycle'' is larger than + // i.e. when cycle is larger than // or equal to 10, an exception of - // type ``ExcNotImplemented'' is + // type ExcNotImplemented is // raised, indicating that some // functionality is not implemented // for this case (the functionality @@ -977,8 +977,8 @@ void LaplaceProblem::output_results (const unsigned int cycle) const // @sect4{LaplaceProblem::run} // The final function before - // ``main()'' is again the main - // driver of the class, ``run()''. It + // main() is again the main + // driver of the class, run(). It // is similar to the one of step-5, // except that we generate a file in // the program again instead of @@ -1087,7 +1087,7 @@ void LaplaceProblem::run () } - // @sect3{The ``main'' function} + // @sect3{The main function} // The main function is unaltered in // its functionality from the @@ -1107,11 +1107,11 @@ void LaplaceProblem::run () // for all, this kind of exceptions // is not switched off in optimized // mode, in contrast to the - // ``Assert'' macro which we have + // Assert macro which we have // used to test against programming // errors. If uncaught, these // exceptions propagate the call tree - // up to the ``main'' function, and + // up to the main function, and // if they are not caught there // either, the program is aborted. In // many cases, like if there is not @@ -1124,7 +1124,7 @@ void LaplaceProblem::run () // useful to write any larger program // in this way, and you can do so by // more or less copying this function - // except for the ``try'' block that + // except for the try block that // actually encodes the functionality // particular to the present // application. @@ -1148,8 +1148,8 @@ int main () // exception that was thrown is an // object of a class that is // derived from the C++ standard - // class ``exception'', then we can - // use the ``what'' member function + // class exception, then we can + // use the what member function // to get a string which describes // the reason why the exception was // thrown. @@ -1157,12 +1157,12 @@ int main () // The deal.II exception classes // are all derived from the // standard class, and in - // particular, the ``exc.what()'' + // particular, the exc.what() // function will return // approximately the same string as // would be generated if the // exception was thrown using the - // ``Assert'' macro. You have seen + // Assert macro. You have seen // the output of such an exception // in the previous example, and you // then know that it contains the @@ -1176,7 +1176,7 @@ int main () // much that we can do except // exiting the program with an // error code (this is what the - // ``return 1;'' does): + // return 1; does): catch (std::exception &exc) { std::cerr << std::endl << std::endl @@ -1193,7 +1193,7 @@ int main () // If the exception that was thrown // somewhere was not an object of a // class derived from the standard - // ``exception'' class, then we + // exception class, then we // can't do anything at all. We // then simply print an error // message and exit. diff --git a/deal.II/examples/step-7/step-7.cc b/deal.II/examples/step-7/step-7.cc index 9fe8898ac5..6c8afa4768 100644 --- a/deal.II/examples/step-7/step-7.cc +++ b/deal.II/examples/step-7/step-7.cc @@ -42,7 +42,7 @@ // In this example, we will not use the // numeration scheme which is used per - // default by the ``DoFHandler'' class, but + // default by the DoFHandler class, but // will renumber them using the Cuthill-McKee // algorithm. As has already been explained // in step-2, the necessary functions are @@ -52,23 +52,23 @@ // how we can make sure that objects // are not deleted while they are // still in use. For this purpose, - // deal.II has the ``SmartPointer'' + // deal.II has the SmartPointer // helper class, which is declared in // this file: #include // Next, we will want to use the - // ``integrate_difference'' function + // integrate_difference function // mentioned in the introduction, and we are - // going to use a ``ConvergenceTable'' that + // going to use a ConvergenceTable that // collects all important data during a run // and prints it at the end as a table. These // comes from the following two files: #include #include // And finally, we need to use the - // ``FEFaceValues'' class, which is + // FEFaceValues class, which is // declared in the same file as the - // ``FEValues'' class: + // FEValues class: #include // We need one more include from standard @@ -141,20 +141,20 @@ class SolutionBase // First we assign values to the centers for // the 1d case, where we place the centers // equidistantly at -1/3, 0, and 1/3. The - // ``template @<@>'' header for this definition + // template @<@> header for this definition // indicates an explicit specialization. This // means, that the variable belongs to a // template, but that instead of providing // the compiler with a template from which it // can specialize a concrete variable by - // substituting ``dim'' with some concrete + // substituting dim with some concrete // value, we provide a specialization - // ourselves, in this case for ``dim=1''. If + // ourselves, in this case for dim=1. If // the compiler then sees a reference to this // variable in a place where the template // argument equals one, it knows that it // doesn't have to generate the variable from - // a template by substituting ``dim'', but + // a template by substituting dim, but // can immediately use the following // definition: template <> @@ -165,7 +165,7 @@ SolutionBase<1>::source_centers[SolutionBase<1>::n_source_centers] Point<1>(+1.0 / 3.0) }; // Likewise, we can provide an explicit - // specialization for ``dim=2''. We place the + // specialization for dim=2. We place the // centers for the 2d case as follows: template <> const Point<2> @@ -180,7 +180,7 @@ SolutionBase<2>::source_centers[SolutionBase<2>::n_source_centers] // dimensions. In this case, we simply // provide the compiler with a template from // which it can generate a concrete - // instantiation by substituting ``dim'' with + // instantiation by substituting dim with // a concrete value: template const double SolutionBase::width = 1./3.; @@ -193,10 +193,10 @@ const double SolutionBase::width = 1./3.; // the classes representing these // two. They both represent // continuous functions, so they are - // derived from the ``Function@'' + // derived from the Function@ // base class, and they also inherit // the characteristics defined in the - // ``SolutionBase'' class. + // SolutionBase class. // // The actual classes are declared in the // following. Note that in order to compute @@ -207,15 +207,15 @@ const double SolutionBase::width = 1./3.; // more than we have done in previous // examples, where all we provided was the // value at one or a list of - // points. Fortunately, the ``Function'' + // points. Fortunately, the Function // class also has virtual functions for the // gradient, so we can simply overload the // respective virtual member functions in the - // ``Function'' base class. Note that the - // gradient of a function in ``dim'' space - // dimensions is a vector of size ``dim'', + // Function base class. Note that the + // gradient of a function in dim space + // dimensions is a vector of size dim, // i.e. a tensor of rank 1 and dimension - // ``dim''. As for so many other things, the + // dim. As for so many other things, the // library provides a suitable class for // this. // @@ -249,10 +249,10 @@ class Solution : public Function, // elements of a base class that is // template dependent (in this case // the elements of - // ``SolutionBase@''), then the + // SolutionBase@), then the // C++ language forces us to write - // ``this-@>n_source_centers'' (for - // example). Note that the ``this-@>'' + // this-@>n_source_centers (for + // example). Note that the this-@> // qualification is not necessary if // the base class is not template // dependent, and also that the gcc @@ -262,8 +262,8 @@ class Solution : public Function, // is necessary is complicated; some // books on C++ may explain it, so if // you are interested you can look it - // up under the phrase ``two-stage - // (name) lookup''. + // up under the phrase two-stage + // (name) lookup. template double Solution::value (const Point &p, const unsigned int) const @@ -284,22 +284,22 @@ double Solution::value (const Point &p, // gradient of the solution. In order to // accumulate the gradient from the // contributions of the exponentials, we - // allocate an object ``return_value'' that + // allocate an object return_value that // denotes the mathematical quantity of a - // tensor of rank ``1'' and dimension - // ``dim''. Its default constructor sets it + // tensor of rank 1 and dimension + // dim. Its default constructor sets it // to the vector containing only zeroes, so // we need not explicitly care for its // initialization. // // Note that we could as well have taken the - // type of the object to be ``Point@'' - // instead of ``Tensor@<1,dim@>''. Tensors of + // type of the object to be Point@ + // instead of Tensor@<1,dim@>. Tensors of // rank 1 and points are almost exchangeable, // and have only very slightly different // mathematical meanings. In fact, the - // ``Point@'' class is derived from the - // ``Tensor@<1,dim@>'' class, which makes up + // Point@ class is derived from the + // Tensor@<1,dim@> class, which makes up // for their mutual exchange ability. Their // main difference is in what they logically // mean: points are points in space, such as @@ -420,7 +420,7 @@ double RightHandSide::value (const Point &p, // mode as arguments. // // The rest of the member functions are as - // before except for the ``process_solution'' + // before except for the process_solution // function: After the solution has been // computed, we perform some analysis on it, // such as computing the error in various @@ -476,7 +476,7 @@ class HelmholtzProblem // a triangulation object, and we // have a finite element object, // and we also have an object of - // type ``DoFHandler'' that uses + // type DoFHandler that uses // both of the first two. These // three objects all have a // lifetime that is rather long @@ -487,10 +487,10 @@ class HelmholtzProblem // they are destroyed at the very // end. The question is: can we // guarantee that the two objects - // which the ``DoFHandler'' uses, + // which the DoFHandler uses, // live at least as long as they // are in use? This means that - // the ``DoFHandler'' must have some + // the DoFHandler must have some // kind of lock on the // destruction of the other // objects, and it can only @@ -518,12 +518,12 @@ class HelmholtzProblem // to such potentially dangerous // pointers are derived from a // class called - // ``Subscriptor''. For example, - // the ``Triangulation'', - // ``DoFHandler'', and a base - // class of the ``FiniteElement'' + // Subscriptor. For example, + // the Triangulation, + // DoFHandler, and a base + // class of the FiniteElement // class are derived from - // ``Subscriptor''. This latter + // Subscriptor. This latter // class does not offer much // functionality, but it has a // built-in counter which we can @@ -541,9 +541,9 @@ class HelmholtzProblem // // On the other hand, if an object of a // class that is derived from the - // ``Subscriptor'' class is destroyed, it + // Subscriptor class is destroyed, it // also has to call the destructor of the - // ``Subscriptor'' class. In this + // Subscriptor class. In this // destructor, there // will then be a check whether the // counter is really zero. If @@ -608,32 +608,32 @@ class HelmholtzProblem // for the programmer to do so // herself. The class that // actually does all this is - // called ``SmartPointer'' and + // called SmartPointer and // takes as template parameter // the data type of the object // which it shall point to. The // latter type may be any class, // as long as it is derived from - // the ``Subscriptor'' class. + // the Subscriptor class. // // In the present example program, we // want to protect the finite element // object from the situation that for // some reason the finite element pointed // to is destroyed while still in use. We - // therefore use a ``SmartPointer'' to + // therefore use a SmartPointer to // the finite element object; since the // finite element object is actually // never changed in our computations, we - // pass a ``const FiniteElement@'' as + // pass a const FiniteElement@ as // template argument to the - // ``SmartPointer'' class. Note that the + // SmartPointer class. Note that the // pointer so declared is assigned at // construction time of the solve object, // and destroyed upon destruction, so the // lock on the destruction of the finite // element object extends throughout the - // lifetime of this ``HelmholtzProblem'' + // lifetime of this HelmholtzProblem // object. Triangulation triangulation; DoFHandler dof_handler; @@ -665,20 +665,20 @@ class HelmholtzProblem // (like the number of cells, or the L2 // error of the numerical solution) will // be generated and later printed. The - // ``TableHandler'' can be used to + // TableHandler can be used to // collect all this data and to output it // at the end of the run as a table in a // simple text or in LaTeX // format. Here we don't only use the - // ``TableHandler'' but we use the - // derived class ``ConvergenceTable'' + // TableHandler but we use the + // derived class ConvergenceTable // that additionally evaluates rates of // convergence: ConvergenceTable convergence_table; }; - // @sect3{The ``HelmholtzProblem'' class implementation} + // @sect3{The HelmholtzProblem class implementation} // @sect4{HelmholtzProblem::HelmholtzProblem} @@ -798,7 +798,7 @@ void HelmholtzProblem::setup_system () // bi-quadratic elements and therefore have // to use sufficiently accurate quadrature // formula. In addition, we need to compute - // integrals over faces, i.e. ``dim-1'' + // integrals over faces, i.e. dim-1 // dimensional objects. The declaration of a // face quadrature formula is then // straightforward: @@ -850,7 +850,7 @@ void HelmholtzProblem::assemble_system () // cell) to evaluate the right hand // side function. The object we use // to get at this information is - // the ``FEValues'' class discussed + // the FEValues class discussed // previously. // // For the face integrals, we only @@ -863,7 +863,7 @@ void HelmholtzProblem::assemble_system () // from the exact solution object // (see below). The class that gives // us this information is called - // ``FEFaceValues'': + // FEFaceValues: FEValues fe_values (*fe, quadrature_formula, update_values | update_gradients | update_q_points | update_JxW_values); @@ -886,7 +886,7 @@ void HelmholtzProblem::assemble_system () // the right hand side object are only // querying data, never changing the // object. We can therefore declare it - // ``const'': + // const: const RightHandSide right_hand_side; std::vector rhs_values (n_q_points); @@ -958,17 +958,17 @@ void HelmholtzProblem::assemble_system () // nonzero. To this end, we // loop over all faces and // check whether its boundary - // indicator equals ``1'', + // indicator equals 1, // which is the value that we // have assigned to that // portions of the boundary // composing Gamma2 in the - // ``run()'' function further + // run() function further // below. (The // default value of boundary - // indicators is ``0'', so faces + // indicators is 0, so faces // can only have an indicator - // equal to ``1'' if we have + // equal to 1 if we have // explicitly set it.) for (unsigned int face=0; face::faces_per_cell; ++face) if (cell->face(face)->at_boundary() @@ -988,9 +988,9 @@ void HelmholtzProblem::assemble_system () // computation of the // contour integral. This // is done using the - // ``reinit'' function + // reinit function // which we already know - // from the ``FEValue'' + // from the FEValue // class: fe_face_values.reinit (cell, face); @@ -1008,7 +1008,7 @@ void HelmholtzProblem::assemble_system () // vector to the face at the // present quadrature point // obtained from the - // ``fe_face_values'' + // fe_face_values // object. This is then used to // compute the additional // contribution of this face to @@ -1053,7 +1053,7 @@ void HelmholtzProblem::assemble_system () // we interpolate boundary values // (denoted by the second parameter // to - // ``interpolate_boundary_values'') + // interpolate_boundary_values) // does not represent the whole // boundary any more. Rather, it is // that portion of the boundary @@ -1135,8 +1135,8 @@ void HelmholtzProblem::solve () // // At the end of the switch, we have a // default case that looks slightly strange: - // an ``Assert'' statement with a ``false'' - // condition. Since the ``Assert'' macro + // an Assert statement with a false + // condition. Since the Assert macro // raises an error whenever the condition is // false, this means that whenever we hit // this statement the program will be @@ -1217,7 +1217,7 @@ void HelmholtzProblem::process_solution (const unsigned int cycle) // the difference between computed // numerical solution and the // continuous solution (described - // by the ``Solution'' class + // by the Solution class // defined at the top of this // file), we first need a vector // that will hold the norm of the @@ -1225,8 +1225,8 @@ void HelmholtzProblem::process_solution (const unsigned int cycle) // accuracy with 16 digits is not // so important for these // quantities, we save some memory - // by using ``float'' instead of - // ``double'' values. + // by using float instead of + // double values. // // The next step is to use a function // from the library which computes the @@ -1253,7 +1253,7 @@ void HelmholtzProblem::process_solution (const unsigned int cycle) // cell, and taking the square root // of that value. This is // equivalent to taking the l2 - // (lower case ``l'') norm of the + // (lower case l) norm of the // vector of norms on each cell: Vector difference_per_cell (triangulation.n_active_cells()); VectorTools::integrate_difference (dof_handler, @@ -1266,9 +1266,9 @@ void HelmholtzProblem::process_solution (const unsigned int cycle) // By same procedure we get the H1 // semi-norm. We re-use the - // ``difference_per_cell'' vector since it + // difference_per_cell vector since it // is no longer used after computing the - // ``L2_error'' variable above. + // L2_error variable above. VectorTools::integrate_difference (dof_handler, solution, Solution(), @@ -1292,7 +1292,7 @@ void HelmholtzProblem::process_solution (const unsigned int cycle) // by iterating the trapezoidal // rule five times in each space // direction. Note that the - // constructor of the ``QIterated'' + // constructor of the QIterated // class takes a one-dimensional // quadrature rule and a number // that tells it how often it shall @@ -1308,7 +1308,7 @@ void HelmholtzProblem::process_solution (const unsigned int cycle) // maximum value over all cell-wise // entries, an operation that is // conveniently done using the - // ``Vector@::linfty'' function: + // Vector@::linfty function: const QTrapez<1> q_trapez; const QIterated q_iterated (q_trapez, 5); VectorTools::integrate_difference (dof_handler, @@ -1323,7 +1323,7 @@ void HelmholtzProblem::process_solution (const unsigned int cycle) // computed, we finally write some // output. In addition, we add the // important data to the - // ``TableHandler'' by specifying + // TableHandler by specifying // the key of the column and the value. // Note that it is not necessary to // define column keys beforehand -- it is @@ -1356,7 +1356,7 @@ void HelmholtzProblem::process_solution (const unsigned int cycle) // @sect4{HelmholtzProblem::run} // As in previous example programs, - // the ``run'' function controls the + // the run function controls the // flow of execution. The basic // layout is as in previous examples: // an outer loop over successively @@ -1376,10 +1376,10 @@ void HelmholtzProblem::process_solution (const unsigned int cycle) // For this, we will use the // following convention: Faces // belonging to Gamma1 will have the - // boundary indicator ``0'' (which is + // boundary indicator 0 (which is // the default, so we don't have to // set it explicitely), and faces - // belonging to Gamma2 will use ``1'' + // belonging to Gamma2 will use 1 // as boundary indicator. To set // these values, we loop over all // cells, then over all faces of a @@ -1387,7 +1387,7 @@ void HelmholtzProblem::process_solution (const unsigned int cycle) // part of the boundary that we want // to denote by Gamma2, and if so set // its boundary indicator to - // ``1''. For the present program, we + // 1. For the present program, we // consider the left and bottom // boundaries as Gamma2. We determine // whether a face is part of that @@ -1476,7 +1476,7 @@ void HelmholtzProblem::run () // statements which we have already // discussed in previous examples. The // first step is to generate a suitable - // filename (called ``gmv_filename'' here, + // filename (called gmv_filename here, // since we want to output data in GMV // format; we add the prefix to distinguish // the filename from that used for other @@ -1506,18 +1506,18 @@ void HelmholtzProblem::run () // end, the finite element base class // stores the maximal polynomial degree of // shape functions in each coordinate - // variable as a variable ``degree'', and + // variable as a variable degree, and // we use for the switch statement (note // that the polynomial degree of bilinear // shape functions is really 2, since they - // contain the term ``x*y''; however, the + // contain the term x*y; however, the // polynomial degree in each coordinate // variable is still only 1). We again use // the same defensive programming technique // to safeguard against the case that the // polynomial degree has an unexpected - // value, using the ``Assert (false, - // ExcNotImplemented())'' idiom in the + // value, using the Assert (false, + // ExcNotImplemented()) idiom in the // default branch of the switch statement: switch (fe->degree) { @@ -1578,9 +1578,9 @@ void HelmholtzProblem::run () // // In order to allow writing more // than one sub-cell per actual - // cell, the ``build_patches'' + // cell, the build_patches // function accepts a parameter - // (the default is ``1'', which is + // (the default is 1, which is // why you haven't seen this // parameter in previous // examples). This parameter @@ -1588,15 +1588,15 @@ void HelmholtzProblem::run () // per space direction each cell // shall be subdivided for // output. For example, if you give - // ``2'', this leads to 4 cells in + // 2, this leads to 4 cells in // 2D and 8 cells in 3D. For // quadratic elements, two // sub-cells per space direction is // obviously the right choice, so // this is what we choose. In // general, for elements of - // polynomial order ``q'', we use - // ``q'' subdivisions, and the + // polynomial order q, we use + // q subdivisions, and the // order of the elements is // determined in the same way as // above. @@ -1613,7 +1613,7 @@ void HelmholtzProblem::run () // After graphical output, we would also // like to generate tables from the error // computations we have done in - // ``process_solution''. There, we have + // process_solution. There, we have // filled a table object with the number of // cells for each refinement step as well // as the errors in different norms. @@ -1627,8 +1627,8 @@ void HelmholtzProblem::run () // fixed point notation. However, for // columns one would like to see in // scientific notation another function - // call sets the ``scientific_flag'' to - // ``true'', leading to floating point + // call sets the scientific_flag to + // true, leading to floating point // representation of numbers. convergence_table.set_precision("L2", 3); convergence_table.set_precision("H1", 3); @@ -1641,7 +1641,7 @@ void HelmholtzProblem::run () // For the output of a table into a LaTeX // file, the default captions of the // columns are the keys given as argument - // to the ``add_value'' functions. To have + // to the add_value functions. To have // TeX captions that differ from the // default ones you can specify them by the // following function calls. @@ -1665,7 +1665,7 @@ void HelmholtzProblem::run () // After this, we can finally write the // table to the standard output stream - // ``std::cout'' (after one extra empty + // std::cout (after one extra empty // line, to make things look // prettier). Note, that the output in text // format is quite simple and that @@ -1722,9 +1722,9 @@ void HelmholtzProblem::run () // output the convergence // rates. This may be done by the // functionality the - // ``ConvergenceTable'' offers over + // ConvergenceTable offers over // the regular - // ``TableHandler''. However, we do + // TableHandler. However, we do // it only for global refinement, // since for adaptive refinement // the determination of something @@ -1767,14 +1767,14 @@ void HelmholtzProblem::run () convergence_table.set_column_order (new_order); // For everything that happened - // to the ``ConvergenceTable'' + // to the ConvergenceTable // until this point, it would // have been sufficient to use // a simple - // ``TableHandler''. Indeed, the - // ``ConvergenceTable'' is + // TableHandler. Indeed, the + // ConvergenceTable is // derived from the - // ``TableHandler'' but it offers + // TableHandler but it offers // the additional functionality // of automatically evaluating // convergence rates. For @@ -1873,7 +1873,7 @@ int main () // order to destroy the // respective objects (i.e. the // finite element and the - // ``HelmholtzProblem'' object) + // HelmholtzProblem object) // at the end of the block and // before we go to the next // run. This avoids conflicts @@ -1882,7 +1882,7 @@ int main () // is released immediately // after one of the three runs // has finished, and not only - // at the end of the ``try'' + // at the end of the try // block. { std::cout << "Solving with Q1 elements, adaptive refinement" << std::endl diff --git a/deal.II/examples/step-8/step-8.cc b/deal.II/examples/step-8/step-8.cc index a77bb2ba69..6d14741c89 100644 --- a/deal.II/examples/step-8/step-8.cc +++ b/deal.II/examples/step-8/step-8.cc @@ -56,19 +56,19 @@ #include - // @sect3{The ``ElasticProblem'' class template} + // @sect3{The ElasticProblem class template} // The main class is, except for its // name, almost unchanged with // respect to the step-6 example. // // The only change is the use of a - // different class for the ``fe'' + // different class for the fe // variable: Instead of a concrete // finite element class such as - // ``FE_Q'', we now use a more - // generic one, ``FESystem''. In - // fact, ``FESystem'' is not really a + // FE_Q, we now use a more + // generic one, FESystem. In + // fact, FESystem is not really a // finite element itself in that it // does not implement shape functions // of its own. Rather, it is a class @@ -77,7 +77,7 @@ // one vector-valued finite // element. In our case, we will // compose the vector-valued element - // of ``FE_Q(1)'' objects, as shown + // of FE_Q(1) objects, as shown // below in the constructor of this // class. template @@ -142,9 +142,9 @@ class RightHandSide : public Function // The next change is that we // want a replacement for the - // ``value'' function of the + // value function of the // previous examples. There, a - // second parameter ``component'' + // second parameter component // was given, which denoted which // component was requested. Here, // we implement a function that @@ -153,12 +153,12 @@ class RightHandSide : public Function // once, in the second argument // of the function. The obvious // name for such a replacement - // function is ``vector_value''. + // function is vector_value. // // Secondly, in analogy to the - // ``value_list'' function, there + // value_list function, there // is a function - // ``vector_value_list'', which + // vector_value_list, which // returns the values of the // vector-valued function at // several points at once: @@ -174,9 +174,9 @@ class RightHandSide : public Function // right hand side class. As said // above, it only passes down to the // base class the number of - // components, which is ``dim'' in + // components, which is dim in // the present case (one force - // component in each of the ``dim'' + // component in each of the dim // space directions). // // Some people would have moved the @@ -201,7 +201,7 @@ RightHandSide::RightHandSide () // Next the function that returns // the whole vector of values at the - // point ``p'' at once. + // point p at once. // // To prevent cases where the return // vector has not previously been set @@ -228,7 +228,7 @@ RightHandSide::RightHandSide () // not be removed if we can't rely on // the assumption that the vector // already has the correct size; this - // is in contract to the ``Assert'' + // is in contract to the Assert // call that is completely removed if // the program is compiled in // optimized mode. @@ -268,13 +268,13 @@ void RightHandSide::vector_value (const Point &p, // two objects that denote the // centers of these areas. Note // that upon construction of the - // ``Point'' objects, all + // Point objects, all // components are set to zero. Point point_1, point_2; point_1(0) = 0.5; point_2(0) = -0.5; - // If now the point ``p'' is in a + // If now the point p is in a // circle (sphere) of radius 0.2 // around one of these points, then // set the force in x-direction to @@ -285,7 +285,7 @@ void RightHandSide::vector_value (const Point &p, else values(0) = 0; - // Likewise, if ``p'' is in the + // Likewise, if p is in the // vicinity of the origin, then set // the y-force to 1, otherwise to // zero: @@ -323,7 +323,7 @@ void RightHandSide::vector_value_list (const std::vector > &poin // points. In one of the previous // examples, we have explained why // the - // ``value_list''/``vector_value_list'' + // value_list/vector_value_list // function had been introduced: to // prevent us from calling virtual // functions too frequently. On the @@ -336,7 +336,7 @@ void RightHandSide::vector_value_list (const std::vector > &poin // // We can prevent this situation by // calling - // ``RightHandSide@::vector_valued'' + // RightHandSide@::vector_valued // on each point in the input // list. Note that by giving the // full name of the function, @@ -346,7 +346,7 @@ void RightHandSide::vector_value_list (const std::vector > &poin // and not to use the virtual // function call mechanism that // would be used if we had just - // called ``vector_value''. This is + // called vector_value. This is // important, since the compiler // generally can't make any // assumptions which function is @@ -363,8 +363,8 @@ void RightHandSide::vector_value_list (const std::vector > &poin // inline above function into the // present location. (Note that we // have declared the - // ``vector_value'' function above - // ``inline'', though modern + // vector_value function above + // inline, though modern // compilers are also able to // inline functions even if they // have not been declared as @@ -382,9 +382,9 @@ void RightHandSide::vector_value_list (const std::vector > &poin // functions in the same way. Using // this forwarding mechanism, we // only have to change a single - // place (the ``vector_value'' + // place (the vector_value // function), and the second place - // (the ``vector_value_list'' + // (the vector_value_list // function) will always be // consistent with it. At the same // time, using virtual function @@ -399,7 +399,7 @@ void RightHandSide::vector_value_list (const std::vector > &poin - // @sect3{The ``ElasticProblem'' class implementation} + // @sect3{The ElasticProblem class implementation} // @sect4{ElasticProblem::ElasticProblem} @@ -417,9 +417,9 @@ void RightHandSide::vector_value_list (const std::vector > &poin // would like to stack together // equals the number of components // the solution function has, which - // is ``dim'' since we consider + // is dim since we consider // displacement in each space - // direction. The ``FESystem'' class + // direction. The FESystem class // can handle this: we pass it the // finite element of which we would // like to compose the system of, and @@ -431,7 +431,7 @@ ElasticProblem::ElasticProblem () dof_handler (triangulation), fe (FE_Q(1), dim) {} - // In fact, the ``FESystem'' class + // In fact, the FESystem class // has several more constructors // which can perform more complex // operations than just stacking @@ -458,7 +458,7 @@ ElasticProblem::~ElasticProblem () // Setting up the system of equations // is identitical to the function // used in the step-6 example. The - // ``DoFHandler'' class and all other + // DoFHandler class and all other // classes used here are fully aware // that the finite element we want to // use is vector-valued, and take @@ -514,19 +514,19 @@ void ElasticProblem::setup_system () // are the same as before, however: // setting up a suitable quadrature // formula, initializing an - // ``FEValues'' object for the + // FEValues object for the // (vector-valued) finite element we // use as well as the quadrature // object, and declaring a number of // auxiliary arrays. In addition, we // declare the ever same two - // abbreviations: ``n_q_points'' and - // ``dofs_per_cell''. The number of + // abbreviations: n_q_points and + // dofs_per_cell. The number of // degrees of freedom per cell we now // obviously ask from the composed // finite element rather than from // the underlying scalar Q1 - // element. Here, it is ``dim'' times + // element. Here, it is dim times // the number of degrees of freedom // per cell of the Q1 element, though // this is not explicit knowledge we @@ -579,11 +579,11 @@ void ElasticProblem::assemble_system () // we now have a vector-valued // right hand side, which is why // the data type of the - // ``rhs_values'' array is + // rhs_values array is // changed. We initialize it by - // ``n_q_points'' elements, each of - // which is a ``Vector@'' - // with ``dim'' elements. + // n_q_points elements, each of + // which is a Vector@ + // with dim elements. RightHandSide right_hand_side; std::vector > rhs_values (n_q_points, Vector(dim)); @@ -620,35 +620,35 @@ void ElasticProblem::assemble_system () // example. One of the few // comments in place is that we // can compute the number - // ``comp(i)'', i.e. the index + // comp(i), i.e. the index // of the only nonzero vector // component of shape function - // ``i'' using the - // ``fe.system_to_component_index(i).first'' + // i using the + // fe.system_to_component_index(i).first // function call below. // // (By accessing the - // ``first'' variable of + // first variable of // the return value of the - // ``system_to_component_index'' + // system_to_component_index // function, you might // already have guessed // that there is more in // it. In fact, the // function returns a - // ``std::pair@'', of + // std::pair@, of // which the first element - // is ``comp(i)'' and the + // is comp(i) and the // second is the value - // ``base(i)'' also noted + // base(i) also noted // in the introduction, i.e. // the index // of this shape function // within all the shape // functions that are nonzero // in this component, - // i.e. ``base(i)'' in the + // i.e. base(i) in the // diction of the // introduction. This is not a // number that we are usually @@ -678,7 +678,7 @@ void ElasticProblem::assemble_system () // + (mu d_i u_j, // d_j v_i). // Note that - // ``shape_grad(i,q_point)'' + // shape_grad(i,q_point) // returns the // gradient of // the only @@ -690,7 +690,7 @@ void ElasticProblem::assemble_system () // point // q_point. The // component - // ``comp(i)'' of + // comp(i) of // the gradient, // which is the // derivative of @@ -813,7 +813,7 @@ void ElasticProblem::assemble_system () // modification: since the solution // function is vector-valued, so // need to be the boundary - // values. The ``ZeroFunction'' + // values. The ZeroFunction // constructor accepts a parameter // that tells it that it shall // represent a vector valued, @@ -821,11 +821,11 @@ void ElasticProblem::assemble_system () // many components. By default, // this parameter is equal to one, // in which case the - // ``ZeroFunction'' object would + // ZeroFunction object would // represent a scalar // function. Since the solution - // vector has ``dim'' components, - // we need to pass ``dim'' as + // vector has dim components, + // we need to pass dim as // number of components to the zero // function as well. std::map boundary_values; @@ -919,7 +919,7 @@ void ElasticProblem::refine_grid () // been shown in previous examples // already. The only difference is // that the solution function is - // vector valued. The ``DataOut'' + // vector valued. The DataOut // class takes care of this // automatically, but we have to give // each component of the solution @@ -948,7 +948,7 @@ void ElasticProblem::output_results (const unsigned int cycle) const // number of components is the same // as the number of dimensions we // are working in, the following - // ``switch'' statement is used. + // switch statement is used. // // We note that some graphics // programs have restriction as to @@ -971,11 +971,11 @@ void ElasticProblem::output_results (const unsigned int cycle) const // the program die if we run upon a // case which we did not // consider. Remember that the - // ``Assert'' macro generates an + // Assert macro generates an // exception if the condition in // the first parameter is not // satisfied. Of course, the - // condition ``false'' can never be + // condition false can never be // satisfied, so the program will // always abort whenever it gets to // the default statement: @@ -1025,7 +1025,7 @@ void ElasticProblem::output_results (const unsigned int cycle) const // @sect4{ElasticProblem::run} - // The ``run'' function does the same + // The run function does the same // things as in step-6, for // example. This time, we use the // square [-1,1]^d as domain, and we @@ -1033,7 +1033,7 @@ void ElasticProblem::output_results (const unsigned int cycle) const // starting the first iteration. // // The reason is the following: we - // use the ``Gauss'' quadrature + // use the Gauss quadrature // formula with two points in each // direction for integration of the // right hand side; that means that @@ -1063,11 +1063,11 @@ void ElasticProblem::output_results (const unsigned int cycle) const // The unfortunate thing is that if // the discrete solution is constant, // then the error indicators computed - // by the ``KellyErrorEstimator'' + // by the KellyErrorEstimator // class are zero for each cell as // well, and the call to - // ``refine_and_coarsen_fixed_number'' - // on the ``triangulation'' object + // refine_and_coarsen_fixed_number + // on the triangulation object // will not flag any cells for // refinement (why should it if the // indicated error is zero for each @@ -1087,10 +1087,10 @@ void ElasticProblem::output_results (const unsigned int cycle) const // needs to be able to see the right // hand side. Thus, we refine twice // globally. (Note that the - // ``refine_global'' function is not - // part of the ``GridRefinement'' + // refine_global function is not + // part of the GridRefinement // class in which - // ``refine_and_coarsen_fixed_number'' + // refine_and_coarsen_fixed_number // is declared, for example. The // reason is first that it is not an // algorithm that computed refinement @@ -1098,7 +1098,7 @@ void ElasticProblem::output_results (const unsigned int cycle) const // importantly that it actually // performs the refinement, in // contrast to the functions in - // ``GridRefinement'' that only flag + // GridRefinement that only flag // cells without actually refining // the grid.) template @@ -1132,7 +1132,7 @@ void ElasticProblem::run () } } - // @sect3{The ``main'' function} + // @sect3{The main function} // The main function is again exactly // like in step-6 (apart from the diff --git a/deal.II/examples/step-9/step-9.cc b/deal.II/examples/step-9/step-9.cc index 3c4d0fe1eb..51e8b177e7 100644 --- a/deal.II/examples/step-9/step-9.cc +++ b/deal.II/examples/step-9/step-9.cc @@ -47,11 +47,11 @@ // functions are declared which we // need to start new threads and to // wait for threads to return - // (i.e. the ``Thread'' class - // and the ``spawn'' functions). The + // (i.e. the Thread class + // and the spawn functions). The // second file has a class - // ``MultithreadInfo'' (and a global - // object ``multithread_info'' of + // MultithreadInfo (and a global + // object multithread_info of // that type) which can be used to // query the number of processors in // your system, which is often useful @@ -61,8 +61,8 @@ #include // The next new include file declares - // a base class ``TensorFunction'' - // not unlike the ``Function'' class, + // a base class TensorFunction + // not unlike the Function class, // but with the difference that the // return value is tensor-valued // rather than scalar of @@ -159,7 +159,7 @@ class AdvectionProblem // local contributions of a cell // to the global matrix at the // same time. This is done using - // a ``Mutex'', which is an + // a Mutex, which is an // object that can be owned by // only one thread at a time. If // a thread wants to write to the @@ -172,12 +172,12 @@ class AdvectionProblem // was not compiled to support // multithreading (which you have // to specify at the time you - // call the ``./configure'' + // call the ./configure // script in the top-level // directory), then a dummy the // actual data type of the // typedef - // ``Threads::ThreadMutex'' is a + // Threads::ThreadMutex is a // class that provides all the // functions needed for a mutex, // but does nothing when they are @@ -200,14 +200,14 @@ class AdvectionProblem // vector field with as many compents // as there are space dimensions. One // could now use a class derived from - // the ``Function'' base class, as we + // the Function base class, as we // have done for boundary values and // coefficients in previous examples, // but there is another possibility // in the library, namely a base // class that describes tensor valued // functions. In contrast to the - // usual ``Function'' objects, we + // usual Function objects, we // provide the compiler with // knowledge on the size of the // objects of the return type. This @@ -216,7 +216,7 @@ class AdvectionProblem // simple for usual vector-valued // functions where memory has to be // allocated on the heap (thus, the - // ``Function::vector_value'' + // Function::vector_value // function has to be given the // address of an object into which // the result is to be written, in @@ -230,9 +230,9 @@ class AdvectionProblem // applications, to be honest... // // The interface of the - // ``TensorFunction'' class is + // TensorFunction class is // relatively close to that of the - // ``Function'' class, so there is + // Function class, so there is // probably no need to comment in // detail the following declaration: template @@ -262,8 +262,8 @@ class AdvectionField : public TensorFunction<1,dim> // reasonable. The format is // basically as follows: use the // name of one of the macros - // ``DeclExceptionN'', where - // ``N'' denotes the number of + // DeclExceptionN, where + // N denotes the number of // additional parameters which // the exception object shall // take. In this case, as we want @@ -271,27 +271,27 @@ class AdvectionField : public TensorFunction<1,dim> // the sizes of two vectors // differ, we need two arguments, // so we use - // ``DeclException2''. The first + // DeclException2. The first // parameter then describes the // name of the exception, while // the following declare the data // types of the parameters. The // last argument is a sequence of // output directives that will be - // piped into the ``std::cerr'' + // piped into the std::cerr // object, thus the strange - // format with the leading ``@<@<'' + // format with the leading @<@< // operator and the like. Note // that we can access the // parameters which are passed to // the exception upon // construction (i.e. within the - // ``Assert'' call) by using the - // names ``arg1'' through - // ``argN'', where ``N'' is the + // Assert call) by using the + // names arg1 through + // argN, where N is the // number of arguments as defined // by the use of the respective - // macro ``DeclExceptionN''. + // macro DeclExceptionN. // // To learn how the preprocessor // expands this macro into actual @@ -359,7 +359,7 @@ AdvectionField::value_list (const std::vector > &points, // Besides the advection field, we // need two functions describing the - // source terms (``right hand side'') + // source terms (right hand side) // and the boundary values. First for // the right hand side, which follows // the same pattern as in previous @@ -368,7 +368,7 @@ AdvectionField::value_list (const std::vector > &points, // constant function in the vicinity // of a source point, which we denote // by the constant static variable - // ``center_point''. We set the + // center_point. We set the // values of this center using the // same template tricks as we have // shown in the step-7 example @@ -376,7 +376,7 @@ AdvectionField::value_list (const std::vector > &points, // has been shown previously, // including the way to avoid virtual // function calls in the - // ``value_list'' function. + // value_list function. template class RightHandSide : public Function { @@ -408,13 +408,13 @@ const Point<3> RightHandSide<3>::center_point = Point<3> (-0.75, -0.75, -0.75); // The only new thing here is that we // check for the value of the - // ``component'' parameter. As this + // component parameter. As this // is a scalar function, it is // obvious that it only makes sense // if the desired component has the // index zero, so we assert that this // is indeed the - // case. ``ExcIndexRange'' is a + // case. ExcIndexRange is a // global predefined exception // (probably the one most often used, // we therefore made it global @@ -457,7 +457,7 @@ RightHandSide::value_list (const std::vector > &points, // Finally for the boundary values, // which is just another class - // derived from the ``Function'' base + // derived from the Function base // class: template class BoundaryValues : public Function @@ -513,7 +513,7 @@ BoundaryValues::value_list (const std::vector > &points, // power of the mesh size, as // described in the introduction. // This class is a simple version of - // the ``DerivativeApproximation'' + // the DerivativeApproximation // class in the library, that uses // similar techniques to obtain // finite difference approximations @@ -523,7 +523,7 @@ BoundaryValues::value_list (const std::vector > &points, // // The // class has one public static - // function ``estimate'' that is + // function estimate that is // called to compute a vector of // error indicators, and one private // function that does the actual work @@ -568,7 +568,7 @@ BoundaryValues::value_list (const std::vector > &points, // functions or variables, so this is // not really a class, but rather // serves the purpose of a - // ``namespace'' in C++. The reason + // namespace in C++. The reason // that we chose a class over a // namespace is that this way we can // declare functions that are @@ -601,7 +601,7 @@ BoundaryValues::value_list (const std::vector > &points, // argument. // // Finally note that the - // ``IndexInterval'' typedef is + // IndexInterval typedef is // introduced as a convenient // abbreviation for an otherwise // lengthy type name. @@ -637,7 +637,7 @@ class GradientEstimation // Now for the implementation of the // main class. Constructor, // destructor and the function - // ``setup_system'' follow the same + // setup_system follow the same // pattern that was used previously, // so we need not comment on these // three function: @@ -707,7 +707,7 @@ void AdvectionProblem::assemble_system () // we were to use this information, // we could use the value of the // global variable - // ``multithread_info.n_cpus'', + // multithread_info.n_cpus, // which is determined at start-up // time of your program // automatically. (Note that if the @@ -730,10 +730,10 @@ void AdvectionProblem::assemble_system () // systems assign roughly the same // CPU ressources to all threads // presently running. For this - // reason, the ``MultithreadInfo'' + // reason, the MultithreadInfo // class contains a read-write - // variable ``n_default_threads'' - // which is set to ``n_cpus'' by + // variable n_default_threads + // which is set to n_cpus by // default, but can be set to // another value. This variable is // also queried by functions inside @@ -744,29 +744,29 @@ void AdvectionProblem::assemble_system () // capable of keeping track of the // threads we created, and allows // us to wait until they all have - // finished (to ``join'' them in + // finished (to join them in // the language of threads). The - // ``Threads::ThreadGroup'' class + // Threads::ThreadGroup class // does this, which is basically // just a container for objects of - // type ``Threads::Thread'' that + // type Threads::Thread that // represent a single thread; - // ``Threads::Thread'' is what the - // ``spawn'' function below will + // Threads::Thread is what the + // spawn function below will // return when we start a new // thread. // - // Note that both ``ThreadGroup'' - // and ``Thread'' have a template + // Note that both ThreadGroup + // and Thread have a template // argument that represents the // return type of the function // being called on a separate // thread. Since most of the // functions that we will call on // different threads have return - // type ``void'', the template + // type void, the template // argument has a default value - // ``void'', so that in that case + // void, so that in that case // it can be omitted. (However, you // still need to write the angle // brackets, even if they are @@ -774,16 +774,16 @@ void AdvectionProblem::assemble_system () // // If you did not configure for // multi-threading, then the - // ``spawn'' function that is + // spawn function that is // supposed to start a new thread // in parallel only executes the // function which should be run in // parallel, waits for it to return // (i.e. the function is executed // sequentially), and puts the - // return value into the ``Thread'' + // return value into the Thread // object. Likewise, the function - // ``join'' that is supposed to + // join that is supposed to // wait for all spawned threads to // return, returns immediately, as // there can't be threads running. @@ -800,13 +800,13 @@ void AdvectionProblem::assemble_system () // splitting a range of cells is a // rather common task when using // multi-threading, there is a - // function in the ``Threads'' + // function in the Threads // namespace that does exactly // this. In fact, it does this not // only for a range of cell // iterators, but for iterators in // general, so you could use it for - // ``std::vector@::iterator'' or + // std::vector@::iterator or // usual pointers as well. // // The function returns a vector of @@ -828,10 +828,10 @@ void AdvectionProblem::assemble_system () // present case, however, the data // types of the two first // parameters differ - // (``begin_active'' returns an - // ``active_iterator'', while - // ``end'' returns a - // ``raw_iterator''), and in this + // (begin_active returns an + // active_iterator, while + // end returns a + // raw_iterator), and in this // case the C++ language requires // us to specify the template type // explicitely. For brevity, we @@ -863,21 +863,21 @@ void AdvectionProblem::assemble_system () // which is available online as // well. Suffice it to say that we // spawn a new thread that calls - // the ``assemble_system_interval'' + // the assemble_system_interval // function on the present object - // (the ``this'' pointer), with the + // (the this pointer), with the // arguments following in the // second set of parentheses passed - // as parameters. The ``spawn'' + // as parameters. The spawn // function return an object of - // type ``Threads::Thread'', which - // we put into the ``threads'' + // type Threads::Thread, which + // we put into the threads // container. If a thread exits, // the return value of the function // being called is put into a place // such that the thread objects can // access it using their - // ``return_value'' function; since + // return_value function; since // the function we call doesn't // have a return value, this does // not apply here. Note that you @@ -895,10 +895,10 @@ void AdvectionProblem::assemble_system () // right hand side are // assemblesd. Waiting for all the // threads to finish can be done - // using the ``joint_all'' function - // in the ``ThreadGroup'' + // using the joint_all function + // in the ThreadGroup // container, which just calls - // ``join'' on each of the thread + // join on each of the thread // objects it stores. // // Again, if the library was not @@ -933,7 +933,7 @@ void AdvectionProblem::assemble_system () // Now, this is the function that // does the actual work. It is not // very different from the - // ``assemble_system'' functions of + // assemble_system functions of // previous example programs, so we // will again only comment on the // differences. The mathematical @@ -972,8 +972,8 @@ assemble_system_interval (const typename DoFHandler::active_cell_iterator & QGauss face_quadrature_formula(2); // Finally, we need objects of type - // ``FEValues'' and - // ``FEFaceValues''. For the cell + // FEValues and + // FEFaceValues. For the cell // terms we need the values and // gradients of the shape // functions, the quadrature points @@ -1034,7 +1034,7 @@ assemble_system_interval (const typename DoFHandler::active_cell_iterator & cell_rhs = 0; // ... then initialize - // the ``FEValues'' object... + // the FEValues object... fe_values.reinit (cell); // ... obtain the values of @@ -1090,7 +1090,7 @@ assemble_system_interval (const typename DoFHandler::active_cell_iterator & // as well. Of course, the // bilinear form only contains // contributions from the - // ``inflow'' part of the + // inflow part of the // boundary, but to find out // whether a certain part of a // face of the present cell is @@ -1239,9 +1239,9 @@ assemble_system_interval (const typename DoFHandler::active_cell_iterator & // thread operates on these // objects at a time, we have // to lock it. This is done - // using a ``Mutex'', which is - // short for ``mutually - // exclusive'': a thread that + // using a Mutex, which is + // short for mutually + // exclusive: a thread that // wants to write to the global // objects acquires this lock, // but has to wait if it is @@ -1282,7 +1282,7 @@ assemble_system_interval (const typename DoFHandler::active_cell_iterator & // can't be parallel threads // and there is no need to // synchronize. Thus, the - // ``lock'' and ``release'' + // lock and release // functions are no-ops, // i.e. they return without // doing anything. @@ -1301,9 +1301,9 @@ assemble_system_interval (const typename DoFHandler::active_cell_iterator & // all threads execute member // functions of the same // object, they have the same - // ``this'' pointer and + // this pointer and // therefore also operate on - // the same ``lock''. + // the same lock. }; } @@ -1342,7 +1342,7 @@ void AdvectionProblem::solve () // described in the introduction. The // respective computations are made // in the class - // ``GradientEstimation''. The only + // GradientEstimation. The only // difference to previous examples is // that we refine a little more // aggressively (0.5 instead of 0.3 @@ -1432,7 +1432,7 @@ void AdvectionProblem::run () // @sect3{GradientEstimation class implementation} // Now for the implementation of the - // ``GradientEstimation'' class. The + // GradientEstimation class. The // first function does not much // except for delegating work to the // other function: @@ -1465,7 +1465,7 @@ GradientEstimation::estimate (const DoFHandler &dof_handler, // cells into chunks of equal // size. Just as we have used the // function - // ``Threads::split_range'' when + // Threads::split_range when // assembling above, there is a // function that computes intervals // of roughly equal size from a @@ -1477,15 +1477,15 @@ GradientEstimation::estimate (const DoFHandler &dof_handler, n_threads); // In the same way as before, we - // use a ``Threads::ThreadGroup'' + // use a Threads::ThreadGroup // object to collect the descriptor // objects of different // threads. Note that as the // function called is not a member // function, but rather a static // function, we need not (and can - // not) pass a ``this'' pointer to - // the ``spawn'' function in this + // not) pass a this pointer to + // the spawn function in this // case. // // Taking pointers to templated @@ -1498,11 +1498,11 @@ GradientEstimation::estimate (const DoFHandler &dof_handler, // quite frequently that we can't // directly insert taking the // address of a function in the - // call to ``encapsulate'' for one + // call to encapsulate for one // or the other compiler, but have // to take a temporary variable for // that purpose. Here, in this - // case, Compaq's ``cxx'' compiler + // case, Compaq's cxx compiler // choked on the code so we use // this workaround with the // function pointer: @@ -1522,13 +1522,13 @@ GradientEstimation::estimate (const DoFHandler &dof_handler, threads.join_all (); // Note that if the value of the // variable - // ``multithread_info.n_default_threads'' + // multithread_info.n_default_threads // was one, or if the library was // not configured to use threads, // then the sequence of commands // above reduced to a complicated // way to simply call the - // ``estimate_interval'' function + // estimate_interval function // with the whole range of cells to // work on. However, using the way // above, we are able to write the @@ -1580,24 +1580,24 @@ GradientEstimation::estimate_interval (const DoFHandler &dof_handler, // of the cells. As usual with // values of finite element // functions, we use an object of - // type ``FEValues'', and we use + // type FEValues, and we use // (or mis-use in this case) the // midpoint quadrature rule to get // at the values at the // center. Note that the - // ``FEValues'' object only needs + // FEValues object only needs // to compute the values at the // centers, and the location of the // quadrature points in real space // in order to get at the vectors - // ``y''. + // y. QMidpoint midpoint_rule; FEValues fe_midpoint_value (dof_handler.get_fe(), midpoint_rule, update_values | update_q_points); // Then we need space foe the - // tensor ``Y'', which is the sum + // tensor Y, which is the sum // of outer products of the // y-vectors. Tensor<2,dim> Y; @@ -1612,7 +1612,7 @@ GradientEstimation::estimate_interval (const DoFHandler &dof_handler, // cell and advancing them using // the given start and end // index. Note that we can use the - // ``advance'' function of the + // advance function of the // standard C++ library, but that // we have to cast the distance by // which the iterator is to be @@ -1658,8 +1658,8 @@ GradientEstimation::estimate_interval (const DoFHandler &dof_handler, for (; cell!=endc; ++cell, ++error_on_this_cell) { // First initialize the - // ``FEValues'' object, as well - // as the ``Y'' tensor: + // FEValues object, as well + // as the Y tensor: fe_midpoint_value.reinit (cell); Y.clear (); @@ -1806,8 +1806,8 @@ GradientEstimation::estimate_interval (const DoFHandler &dof_handler, // to // involuntarily // exchange - // ``n==1'' for - // ``n==0'' or + // n==1 for + // n==0 or // the like) and // in the library // (the @@ -1908,7 +1908,7 @@ GradientEstimation::estimate_interval (const DoFHandler &dof_handler, // Now loop over all active neighbors // and collect the data we // need. Allocate a vector just like - // ``this_midpoint_value'' which we + // this_midpoint_value which we // will use to store the value of the // solution in the midpoint of the // neighbor cell. We allocate it here @@ -1936,7 +1936,7 @@ GradientEstimation::estimate_interval (const DoFHandler &dof_handler, // thereon. Note that for // this information we // have to reinitialize the - // ``FEValues'' object for + // FEValues object for // the neighbor cell. fe_midpoint_value.reinit (neighbor); const Point neighbor_center = fe_midpoint_value.quadrature_point(0); @@ -1944,12 +1944,12 @@ GradientEstimation::estimate_interval (const DoFHandler &dof_handler, fe_midpoint_value.get_function_values (solution, neighbor_midpoint_value); - // Compute the vector ``y'' + // Compute the vector y // connecting the centers // of the two cells. Note // that as opposed to the // introduction, we denote - // by ``y'' the normalized + // by y the normalized // difference vector, as // this is the quantity // used everywhere in the @@ -1979,7 +1979,7 @@ GradientEstimation::estimate_interval (const DoFHandler &dof_handler, // an approximation of the // gradient for the present // cell, then we need to have - // passed over vectors ``y'' + // passed over vectors y // which span the whole space, // otherwise we would not have // all components of the @@ -2003,17 +2003,17 @@ GradientEstimation::estimate_interval (const DoFHandler &dof_handler, // reasonable to try to catch // this error also in optimized // mode. For this case, there - // is the ``AssertThrow'' + // is the AssertThrow // macro: it checks the // condition like the - // ``Assert'' macro, but not + // Assert macro, but not // only in debug mode; it then // outputs an error message, // but instead of terminating // the program as in the case - // of the ``Assert'' macro, the + // of the Assert macro, the // exception is thrown using - // the ``throw'' command of + // the throw command of // C++. This way, one has the // possibility to catch this // error and take reasonable @@ -2050,7 +2050,7 @@ GradientEstimation::estimate_interval (const DoFHandler &dof_handler, // @sect3{Main function} - // The ``main'' function is exactly + // The main function is exactly // like in previous examples, with // the only difference in the name of // the main class that actually does