From f7dcde9c2c862d64bd2467759589c9130be7baa1 Mon Sep 17 00:00:00 2001 From: heltai Date: Wed, 4 Sep 2013 12:52:20 +0000 Subject: [PATCH] Merged from trunk git-svn-id: https://svn.dealii.org/branches/branch_manifold_id@30589 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/cmake/config/CMakeLists.txt | 2 +- deal.II/cmake/config/Make.global_options.in | 9 + deal.II/cmake/modules/FindP4EST.cmake | 11 +- deal.II/doc/authors.html | 6 +- deal.II/doc/development/cmake.html | 27 ++ deal.II/doc/development/testsuite.html | 6 +- deal.II/doc/external-libs/p4est.html | 2 +- deal.II/doc/news/changes.h | 18 +- deal.II/examples/step-10/step-10.cc | 2 +- deal.II/examples/step-11/doc/intro.dox | 4 +- deal.II/examples/step-11/doc/results.dox | 2 +- deal.II/examples/step-11/step-11.cc | 2 +- deal.II/examples/step-13/doc/intro.dox | 6 +- deal.II/examples/step-13/step-13.cc | 6 +- deal.II/examples/step-14/doc/results.dox | 2 +- deal.II/examples/step-14/step-14.cc | 14 +- deal.II/examples/step-15/doc/intro.dox | 6 +- deal.II/examples/step-15/doc/results.dox | 2 +- deal.II/examples/step-15/step-15.cc | 4 +- deal.II/examples/step-16/step-16.cc | 10 +- deal.II/examples/step-17/step-17.cc | 4 +- deal.II/examples/step-18/doc/intro.dox | 2 +- deal.II/examples/step-18/step-18.cc | 20 +- deal.II/examples/step-19/doc/results.dox | 4 +- deal.II/examples/step-19/step-19.cc | 6 +- deal.II/examples/step-2/doc/intro.dox | 4 +- deal.II/examples/step-2/doc/results.dox | 2 +- deal.II/examples/step-2/step-2.cc | 2 +- deal.II/examples/step-20/doc/results.dox | 8 +- deal.II/examples/step-20/step-20.cc | 8 +- deal.II/examples/step-21/doc/intro.dox | 8 +- deal.II/examples/step-22/doc/intro.dox | 8 +- deal.II/examples/step-22/doc/results.dox | 14 +- deal.II/examples/step-22/step-22.cc | 12 +- deal.II/examples/step-23/doc/intro.dox | 4 +- deal.II/examples/step-23/step-23.cc | 2 +- deal.II/examples/step-24/doc/intro.dox | 4 +- deal.II/examples/step-24/doc/results.dox | 4 +- deal.II/examples/step-25/doc/intro.dox | 6 +- deal.II/examples/step-25/step-25.cc | 6 +- deal.II/examples/step-26/step-26.cc | 2 +- deal.II/examples/step-28/doc/results.dox | 4 +- deal.II/examples/step-28/step-28.cc | 6 +- deal.II/examples/step-29/doc/intro.dox | 2 +- deal.II/examples/step-29/doc/results.dox | 2 +- deal.II/examples/step-29/step-29.cc | 4 +- deal.II/examples/step-3/doc/intro.dox | 6 +- deal.II/examples/step-3/doc/results.dox | 2 +- deal.II/examples/step-30/doc/intro.dox | 4 +- deal.II/examples/step-30/doc/results.dox | 2 +- deal.II/examples/step-30/step-30.cc | 26 +- deal.II/examples/step-31/doc/intro.dox | 10 +- deal.II/examples/step-31/doc/results.dox | 2 +- deal.II/examples/step-31/step-31.cc | 28 +- deal.II/examples/step-32/doc/intro.dox | 18 +- deal.II/examples/step-32/doc/results.dox | 2 +- deal.II/examples/step-32/step-32.cc | 8 +- deal.II/examples/step-33/doc/intro.dox | 4 +- deal.II/examples/step-33/step-33.cc | 18 +- deal.II/examples/step-34/doc/intro.dox | 4 +- deal.II/examples/step-34/doc/results.dox | 2 +- deal.II/examples/step-34/step-34.cc | 8 +- deal.II/examples/step-35/step-35.cc | 2 +- deal.II/examples/step-36/doc/intro.dox | 4 +- deal.II/examples/step-36/doc/results.dox | 2 +- deal.II/examples/step-37/doc/intro.dox | 6 +- deal.II/examples/step-37/step-37.cc | 6 +- deal.II/examples/step-38/doc/intro.dox | 2 +- deal.II/examples/step-38/doc/results.dox | 4 +- deal.II/examples/step-38/step-38.cc | 2 +- deal.II/examples/step-39/doc/results.dox | 2 +- deal.II/examples/step-39/step-39.cc | 12 +- deal.II/examples/step-4/doc/intro.dox | 2 +- deal.II/examples/step-4/doc/results.dox | 2 +- deal.II/examples/step-4/step-4.cc | 4 +- deal.II/examples/step-40/doc/results.dox | 2 +- deal.II/examples/step-40/step-40.cc | 8 +- deal.II/examples/step-41/doc/intro.dox | 8 +- deal.II/examples/step-41/step-41.cc | 4 +- .../examples/step-42/doc/intro-step-42.tex | 36 +- deal.II/examples/step-42/doc/intro.dox | 32 +- deal.II/examples/step-42/doc/results.dox | 8 +- deal.II/examples/step-42/step-42.cc | 4 +- deal.II/examples/step-43/doc/intro.dox | 2 +- deal.II/examples/step-43/step-43.cc | 6 +- deal.II/examples/step-44/doc/intro.dox | 4 +- deal.II/examples/step-44/doc/results.dox | 2 +- deal.II/examples/step-44/step-44.cc | 16 +- deal.II/examples/step-46/doc/intro.dox | 2 +- deal.II/examples/step-46/step-46.cc | 4 +- deal.II/examples/step-47/step-47.cc | 4 +- deal.II/examples/step-48/doc/intro.dox | 2 +- deal.II/examples/step-48/doc/results.dox | 4 +- deal.II/examples/step-48/step-48.cc | 2 +- deal.II/examples/step-49/doc/intro.dox | 2 +- deal.II/examples/step-49/doc/results.dox | 2 +- deal.II/examples/step-50/step-50.cc | 16 +- deal.II/examples/step-51/step-51.cc | 4 +- deal.II/examples/step-6/step-6.cc | 6 +- deal.II/examples/step-8/doc/intro.dox | 2 +- deal.II/examples/step-8/step-8.cc | 2 +- deal.II/examples/step-9/step-9.cc | 18 +- deal.II/include/deal.II/base/config.h.in | 25 ++ deal.II/include/deal.II/distributed/tria.h | 4 +- .../deal.II/dofs/dof_accessor.templates.h | 129 +++--- deal.II/include/deal.II/fe/fe_values.h | 17 + deal.II/include/deal.II/grid/grid_tools.h | 17 +- deal.II/include/deal.II/hp/dof_levels.h | 347 ++++++++++---- deal.II/include/deal.II/hp/dof_objects.h | 424 +++++++----------- .../include/deal.II/lac/block_vector_base.h | 50 +++ deal.II/include/deal.II/lac/parallel_vector.h | 50 +++ .../include/deal.II/lac/petsc_vector_base.h | 135 ++++++ .../deal.II/lac/trilinos_sparse_matrix.h | 2 +- .../deal.II/lac/trilinos_vector_base.h | 46 ++ deal.II/include/deal.II/lac/vector.h | 50 +++ deal.II/source/distributed/tria.cc | 2 +- deal.II/source/dofs/dof_renumbering.cc | 6 +- deal.II/source/hp/dof_handler.cc | 55 +-- deal.II/source/hp/dof_levels.cc | 5 +- deal.II/source/numerics/data_out.inst.in | 9 - 120 files changed, 1268 insertions(+), 790 deletions(-) diff --git a/deal.II/cmake/config/CMakeLists.txt b/deal.II/cmake/config/CMakeLists.txt index 8cea9494f3..9bcda3e751 100644 --- a/deal.II/cmake/config/CMakeLists.txt +++ b/deal.II/cmake/config/CMakeLists.txt @@ -251,7 +251,7 @@ IF(DEAL_II_COMPONENT_COMPAT_FILES) # Boilerplate: The Make.global_options expects variables to be set to # yes, as is common for Makefiles. # - COND_SET_TO_YES(DEAL_II_WITH_TBB MAKEFILE_enablethreads) + COND_SET_TO_YES(DEAL_II_WITH_THREADS MAKEFILE_enablethreads) COND_SET_TO_YES(DEAL_II_WITH_FUNCTIONPARSER MAKEFILE_enableparser) COND_SET_TO_YES(BUILD_SHARED_LIBS MAKEFILE_enableshared) diff --git a/deal.II/cmake/config/Make.global_options.in b/deal.II/cmake/config/Make.global_options.in index a32109a464..cc00835e74 100644 --- a/deal.II/cmake/config/Make.global_options.in +++ b/deal.II/cmake/config/Make.global_options.in @@ -66,6 +66,15 @@ DEAL_II_USE_MPI = @MAKEFILE_MPI@ OBJEXT = o EXEEXT = +shared-lib-suffix = .so +static-lib-suffix = .a + +ifeq ($(enable-shared),yes) + lib-suffix = $(shared-lib-suffix) +else + lib-suffix = $(static-lib-suffix) +endif + # set paths to all the libraries we need: lib-deal2.o = @MAKEFILE_TARGETS_RELEASE@ diff --git a/deal.II/cmake/modules/FindP4EST.cmake b/deal.II/cmake/modules/FindP4EST.cmake index 32a6f08497..c7cab6ed1f 100644 --- a/deal.II/cmake/modules/FindP4EST.cmake +++ b/deal.II/cmake/modules/FindP4EST.cmake @@ -179,9 +179,18 @@ IF(P4EST_FOUND) STRING(REGEX REPLACE "^[0-9]+\\.[0-9]+\\.([0-9]+).*$" "\\1" P4EST_VERSION_SUBMINOR "${P4EST_VERSION}") + + # Now for the patch number such as in 0.3.4.1. If there + # is no patch number, then the REGEX REPLACE will fail, + # setting P4EST_VERSION_PATCH to P4EST_VERSION. If that + # is the case, then set the patch number to zero STRING(REGEX REPLACE - "^[0-9]+\\.[0-9]+\\.[0-9]+\\.([0-9]+).*$" "\\1" + "^[0-9]+\\.[0-9]+\\.[0-9]+\\.([0-9]+)?.*$" "\\1" P4EST_VERSION_PATCH "${P4EST_VERSION}") + IF(${P4EST_VERSION_PATCH} STREQUAL "${P4EST_VERSION}") + SET(P4EST_VERSION_PATCH "0") + ENDIF() + MARK_AS_ADVANCED(P4EST_DIR) ELSE() diff --git a/deal.II/doc/authors.html b/deal.II/doc/authors.html index 494df11f73..03aae2a2e1 100644 --- a/deal.II/doc/authors.html +++ b/deal.II/doc/authors.html @@ -191,7 +191,8 @@ 2.5 format.
  • Matthias Maier: - CMake build system. Periodic boundary conditions. Enhancements throughout the library. + CMake build system for the library and the testsuite. Periodic + boundary conditions. Enhancements throughout the library.
  • Cataldo Manigrasso: Work on the codimension-one meshes, DoFHandler, and finite @@ -280,7 +281,8 @@ Packaging and configuration issues.
  • Bruno Turcksin: - Extending deal.ii for 64-bit integer support, various other changes. + Extending deal.ii for 64-bit integer support. Converting the + testsuite to CMake. Various other changes.
  • Kainan Wang: Extending deal.ii for 64-bit integer support. diff --git a/deal.II/doc/development/cmake.html b/deal.II/doc/development/cmake.html index 2683cc1d7e..86c663bfb9 100644 --- a/deal.II/doc/development/cmake.html +++ b/deal.II/doc/development/cmake.html @@ -411,6 +411,33 @@ -NOTFOUND and may be set by hand.

    +

    Library conflicts

    +

    Caveat: if you have a set of standard libraries in the default + location, say /usr/lib and a set of + private versions of the same libraries, for instance because you + need different revisions sometimes, in your own library directory, + you may receive an error message of the form:

    + +
    +CMake Warning at source/CMakeLists.txt:65 (ADD_LIBRARY):
    +  Cannot generate a safe runtime search path for target deal_II.g because
    +  files in some directories may conflict with libraries in implicit
    +  directories:
    +
    +    runtime library [libtbb.so.2] in /usr/lib may be hidden by files in:
    +      /my/private/lib
    +
    +  Some of these libraries may not be found correctly.
    +
    + +

    This is not a problem of CMake or deal.II, but rather a general + Linux problem. In order to fix this, you have two options:

    +
      +
    1. Choose all libraries either from your private directory or from + the standard one.
    2. +
    3. Install all your private library versions in different directories.
    4. +
    +

    Manual override

    Warning: Do not do this unless absolutely necessary!

    diff --git a/deal.II/doc/development/testsuite.html b/deal.II/doc/development/testsuite.html index e101dcf7ed..f99a72dbe7 100644 --- a/deal.II/doc/development/testsuite.html +++ b/deal.II/doc/development/testsuite.html @@ -99,8 +99,8 @@

    The regression tests

    - deal.II has a testsuite that, at the time this article is written (mid-2011), - has some 2300 small programs (growing by roughly one per day) that we run + deal.II has a testsuite that, at the time this article is written (mid-2013), + has some 2,900 small programs (growing by roughly one per day) that we run every time we make a change to make sure that no existing functionality is broken. The expected output is also stored in our subversion archive, and when you @@ -110,7 +110,7 @@ in both cases to make sure that future changes do not break what we have just checked in. In addition, some machines run the tests every night and send the results back home; this is then converted - into a webpage showing the status of our regression tests.

    diff --git a/deal.II/doc/external-libs/p4est.html b/deal.II/doc/external-libs/p4est.html index 67fea34c44..ab17ff2a35 100644 --- a/deal.II/doc/external-libs/p4est.html +++ b/deal.II/doc/external-libs/p4est.html @@ -50,7 +50,7 @@ line like
     
    -    cmake -DP4EST_DIR=/path/to/installation/FAST -DDEAL_II_WITH_P4EST=ON -DDEAL_II_WITH_MPI=ON <...>
    +    cmake -DP4EST_DIR=/path/to/installation -DDEAL_II_WITH_P4EST=ON -DDEAL_II_WITH_MPI=ON <...>
           
    if the p4est library isn't picked up automatically. Note the presence of /FAST at the end of the path necessary when using diff --git a/deal.II/doc/news/changes.h b/deal.II/doc/news/changes.h index e4ff71cee2..c918de3ea1 100644 --- a/deal.II/doc/news/changes.h +++ b/deal.II/doc/news/changes.h @@ -69,7 +69,23 @@ inconvenience this causes.
    1. - Fixed: The ParameterHandler class can now deal with including one parameter + New: All vector classes now have functions extract_subvector_to() + that allow extracting not just a single value but a whole set. +
      + (Fahad Alrasched, 2013/09/02) +
    2. + +
    3. + Fixed: common/Make.global_options now exports enable-threads + correctly, furthermore, lib-suffix, shared-lib-suffix + and static-lib-suffix are now exported as well for better legacy + support. +
      + (Matthias Maier, 2013/08/30) +
    4. + +
    5. + New: The ParameterHandler class can now deal with including one parameter file from another.
      (Wolfgang Bangerth, 2013/08/25) diff --git a/deal.II/examples/step-10/step-10.cc b/deal.II/examples/step-10/step-10.cc index ba42848228..17838b3c76 100644 --- a/deal.II/examples/step-10/step-10.cc +++ b/deal.II/examples/step-10/step-10.cc @@ -134,7 +134,7 @@ namespace Step10 // will generate Gnuplot output, which consists of a set of lines // describing the mapped triangulation. By default, only one line // is drawn for each face of the triangulation, but since we want - // to explicitely see the effect of the mapping, we want to have + // to explicitly see the effect of the mapping, we want to have // the faces in more detail. This can be done by passing the // output object a structure which contains some flags. In the // present case, since Gnuplot can only draw straight lines, we diff --git a/deal.II/examples/step-11/doc/intro.dox b/deal.II/examples/step-11/doc/intro.dox index d4671fcffd..50f8b60653 100644 --- a/deal.II/examples/step-11/doc/intro.dox +++ b/deal.II/examples/step-11/doc/intro.dox @@ -31,7 +31,7 @@ For this, there are various possibilities: solutions of Laplace's equation are only in $H^1$, which does not allow for the definition of point values because it is not a subset of the continuous functions. Therefore, even though fixing one node is allowed for - discretitized functions, it is not for continuous functions, and one can + discretized functions, it is not for continuous functions, and one can often see this in a resulting error spike at this point in the numerical solution. @@ -57,7 +57,7 @@ various places where we use it; in almost all conceivable cases, you will only consider the objects describing mappings as a black box which you need not worry about, because their only uses seem to be to be passed to places deep inside the library where functions know how to handle them (i.e. in the -FEValues classes and their descendents). +FEValues classes and their descendants). The tricky point in this program is the use of the mean value constraint. Fortunately, there is a class in the library which knows how to diff --git a/deal.II/examples/step-11/doc/results.dox b/deal.II/examples/step-11/doc/results.dox index c8b0c67829..d76412f842 100644 --- a/deal.II/examples/step-11/doc/results.dox +++ b/deal.II/examples/step-11/doc/results.dox @@ -40,5 +40,5 @@ higher order mappings; it is therefore clearly advantageous in this case to use a higher order mapping, not because it improves the order of convergence but just to reduce the constant before the convergence order. On the other hand, using a cubic mapping only improves the -result further insignicantly, except for the case of very coarse +result further insignificantly, except for the case of very coarse grids. diff --git a/deal.II/examples/step-11/step-11.cc b/deal.II/examples/step-11/step-11.cc index 28775f4d2d..223219e3a7 100644 --- a/deal.II/examples/step-11/step-11.cc +++ b/deal.II/examples/step-11/step-11.cc @@ -140,7 +140,7 @@ namespace Step11 // the mean value of the degrees of freedom on the boundary shall be // zero. For this, we first want a list of those nodes which are actually // at the boundary. The DoFTools class has a function that - // returns an array of boolean values where true indicates + // returns an array of Boolean values where true indicates // that the node is at the boundary. The second argument denotes a mask // selecting which components of vector valued finite elements we want to // be considered. This sort of information is encoded using the diff --git a/deal.II/examples/step-13/doc/intro.dox b/deal.II/examples/step-13/doc/intro.dox index 7362602221..de67cb0faf 100644 --- a/deal.II/examples/step-13/doc/intro.dox +++ b/deal.II/examples/step-13/doc/intro.dox @@ -43,7 +43,7 @@ a different module if they were not cleanly separated. In previous examples, you have seen how the library itself is broken -up into several complexes each building atop the underying ones, but +up into several complexes each building atop the underlying ones, but relatively independent of the other ones:
      1. the triangulation class complex, with associated iterator classes; @@ -133,7 +133,7 @@ applications, there would of course be comments and class documentation, which would bring that to maybe 1200 lines. Yet, compared to the applications listed above, this is still small, as they are 20 to 25 times as large. For programs as large, a proper design right from -the start is thus indispensible. Otherwise, it will have to be +the start is thus indispensable. Otherwise, it will have to be redesigned at one point in its life, once it becomes too large to be manageable. @@ -149,7 +149,7 @@ dependent application, the major concern is when to store data to disk and when to reload it again; if this is not done in an organized fashion, then you end up with data released too early, loaded too late, or not released at all). Although the present example program -thus draws from sevelar years of experience, it is certainly not +thus draws from several years of experience, it is certainly not without flaws in its design, and in particular might not be suited for an application where the objective is different. It should serve as an inspiration for writing your own application in a modular way, to diff --git a/deal.II/examples/step-13/step-13.cc b/deal.II/examples/step-13/step-13.cc index 24ab2a6d5d..8e88ee8ad8 100644 --- a/deal.II/examples/step-13/step-13.cc +++ b/deal.II/examples/step-13/step-13.cc @@ -800,7 +800,7 @@ namespace Step13 // The second of this pair of functions takes a range of cell iterators, // and assembles the system matrix on this part of the domain. Since it's // actions have all been explained in previous programs, we do not comment - // on it any more, except for one pointe below. + // on it any more, except for one point below. template void Solver::assemble_matrix (LinearSystem &linear_system, @@ -861,7 +861,7 @@ namespace Step13 // will always be unlocked when we exit this part of the program, // whether the operation completed successfully or not, whether the // exit path was something we implemented willfully or whether the - // function was exited by an exception that we did not forsee. + // function was exited by an exception that we did not foresee. // // deal.II implements the scoped locking pattern in the // Treads::Mutex::ScopedLock class: it takes the mutex in the @@ -1120,7 +1120,7 @@ namespace Step13 // @sect4{Local refinement by the Kelly error indicator} // The second class implementing refinement strategies uses the Kelly - // refinemet indicator used in various example programs before. Since this + // refinement indicator used in various example programs before. Since this // indicator is already implemented in a class of its own inside the // deal.II library, there is not much t do here except cal the function // computing the indicator, then using it to select a number of cells for diff --git a/deal.II/examples/step-14/doc/results.dox b/deal.II/examples/step-14/doc/results.dox index b572963139..16af19d8e7 100644 --- a/deal.II/examples/step-14/doc/results.dox +++ b/deal.II/examples/step-14/doc/results.dox @@ -286,7 +286,7 @@ like this: -Note the assymetry of the grids compared with those we obtained for +Note the asymmetry of the grids compared with those we obtained for the point evaluation, which is due to the directionality of the x-derivative for which we tailored the refinement criterion. diff --git a/deal.II/examples/step-14/step-14.cc b/deal.II/examples/step-14/step-14.cc index d2d982b129..32d5710ce1 100644 --- a/deal.II/examples/step-14/step-14.cc +++ b/deal.II/examples/step-14/step-14.cc @@ -214,7 +214,7 @@ namespace Step14 // afterwards... double point_derivative = 0; - // ...then have some objects of which the meaning wil become clear + // ...then have some objects of which the meaning will become clear // below... QTrapez vertex_quadrature; FEValues fe_values (dof_handler.get_fe(), @@ -1465,7 +1465,7 @@ namespace Step14 // We will then implement two such classes, for the evaluation of a point // value and of the derivative of the solution at that point. For these // functionals we already have the corresponding evaluation objects, so they - // are comlementary. + // are complementary. namespace DualFunctional { // @sect4{The DualFunctionalBase class} @@ -2371,7 +2371,7 @@ namespace Step14 // Next we have the function that is called to estimate the error on a // subset of cells. The function may be called multiply if the library was - // configured to use multi-threading. Here it goes: + // configured to use multithreading. Here it goes: template void WeightedResidual:: @@ -2400,7 +2400,7 @@ namespace Step14 // Then calculate the start cell for this thread. We let the different // threads run on interleaved cells, i.e. for example if we have 4 - // threads, then the first thread treates cells 0, 4, 8, etc, while the + // threads, then the first thread treats cells 0, 4, 8, etc, while the // second threads works on cells 1, 5, 9, and so on. The reason is that // it takes vastly more time to work on cells with hanging nodes than on // regular cells, but such cells are not evenly distributed across the @@ -2582,7 +2582,7 @@ namespace Step14 // element solution at the quadrature points on the other side of the // face, i.e. from the neighboring cell. // - // For this, do a sanity check before: make sure that the neigbor + // For this, do a sanity check before: make sure that the neighbor // actually exists (yes, we should not have come here if the neighbor // did not exist, but in complicated software there are bugs, so better // check this), and if this is not the case throw an error. @@ -2595,7 +2595,7 @@ namespace Step14 // the name neighbor_neighbor: const unsigned int neighbor_neighbor = cell->neighbor_of_neighbor (face_no); - // Then define an abbreviation for the neigbor cell, initialize the + // Then define an abbreviation for the neighbor cell, initialize the // FEFaceValues object on that cell, and extract the // gradients on that cell: const active_cell_iterator neighbor = cell->neighbor(face_no); @@ -2673,7 +2673,7 @@ namespace Step14 // Then find out which neighbor the present cell is of the adjacent // cell. Note that we will operate on the children of this adjacent // cell, but that their orientation is the same as that of their mother, - // i.e. the neigbor direction is the same. + // i.e. the neighbor direction is the same. const unsigned int neighbor_neighbor = cell->neighbor_of_neighbor (face_no); diff --git a/deal.II/examples/step-15/doc/intro.dox b/deal.II/examples/step-15/doc/intro.dox index 344cd54f5f..2cebf0f9f1 100644 --- a/deal.II/examples/step-15/doc/intro.dox +++ b/deal.II/examples/step-15/doc/intro.dox @@ -84,7 +84,7 @@ boundary condition of this problem. Assuming that $u^{n}$ already has the right boundary values, the Newton update $\delta u^{n}$ should have zero boundary conditions, in order to have the right boundary condition after adding both. In the first Newton step, we are starting with the solution -$u^{0}\equiv 0$, the Newton update still has to deliever the right boundary +$u^{0}\equiv 0$, the Newton update still has to deliver the right boundary condition to the solution $u^{1}$. @@ -113,7 +113,7 @@ Reducing this space to a finite dimensional space with basis $\left\{ @f] Using the basis functions as test functions and defining $a_{n}:=\frac{1} -{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formualtion: +{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formulation: @f[ \sum_{j=0}^{N-1}\left[ \left( \nabla \varphi_{i} , a_{n} \nabla \varphi_{j} \right) - @@ -122,7 +122,7 @@ Using the basis functions as test functions and defining $a_{n}:=\frac{1} \nabla u^{n}\right) \qquad \forall i=0,\dots ,N-1 @f] -where the solution $\delta u^{n}$ is given by the coefficents $\delta U^{n}_{j}$. +where the solution $\delta u^{n}$ is given by the coefficients $\delta U^{n}_{j}$. This linear system of equations can be rewritten as: @f[ diff --git a/deal.II/examples/step-15/doc/results.dox b/deal.II/examples/step-15/doc/results.dox index 59c7ba414d..75e78a1104 100644 --- a/deal.II/examples/step-15/doc/results.dox +++ b/deal.II/examples/step-15/doc/results.dox @@ -72,7 +72,7 @@ solution. This yields the following set of images: It is clearly visible, that the solution minimizes the surface after each refinement. The solution converges to a picture one -would imagine a soapbubble to be that is located inside a wire loop +would imagine a soap bubble to be that is located inside a wire loop that is bent like the boundary. Also it is visible, how the boundary is smoothed out after each refinement. On the coarse mesh, diff --git a/deal.II/examples/step-15/step-15.cc b/deal.II/examples/step-15/step-15.cc index 024f831d84..6dcc5c4c24 100644 --- a/deal.II/examples/step-15/step-15.cc +++ b/deal.II/examples/step-15/step-15.cc @@ -59,14 +59,14 @@ #include #include -// We will use adaptive mesh refinement between Newton interations. To do so, +// We will use adaptive mesh refinement between Newton iterations. To do so, // we need to be able to work with a solution on the new mesh, although it was // computed on the old one. The SolutionTransfer class transfers the solution // from the old to the new mesh: #include -// We then open a namepsace for this program and import everything from the +// We then open a namespace for this program and import everything from the // dealii namespace into it, as in previous programs: namespace Step15 { diff --git a/deal.II/examples/step-16/step-16.cc b/deal.II/examples/step-16/step-16.cc index 86a8e4e6ba..33ddf4d578 100644 --- a/deal.II/examples/step-16/step-16.cc +++ b/deal.II/examples/step-16/step-16.cc @@ -62,7 +62,7 @@ #include #include -// These, now, are the include necessary for the multi-level methods. The +// These, now, are the include necessary for the multilevel methods. The // first two declare classes that allow us to enumerate degrees of freedom not // only on the finest mesh level, but also on intermediate levels (that's what // the MGDoFHandler class does) as well as allow to access this information @@ -303,7 +303,7 @@ namespace Step16 // Now for the things that concern the multigrid data structures. First, - // we resize the multi-level objects to hold matrices and sparsity + // we resize the multilevel objects to hold matrices and sparsity // patterns for every level. The coarse level is zero (this is mandatory // right now but may change in a future revision). Note that these // functions take a complete, inclusive range here (not a starting index @@ -424,7 +424,7 @@ namespace Step16 // integration core is the same as above, but the loop below will go over // all existing cells instead of just the active ones, and the results must // be entered into the correct matrix. Note also that since we only do - // multi-level preconditioning, no right-hand side needs to be assembled + // multilevel preconditioning, no right-hand side needs to be assembled // here. // // Before we go there, however, we have to take care of a significant amount @@ -460,7 +460,7 @@ namespace Step16 // that are located on interfaces between adaptively refined levels, and // those that lie on the interface but also on the exterior boundary of // the domain. As in many other parts of the library, we do this by using - // boolean masks, i.e. vectors of booleans each element of which indicates + // Boolean masks, i.e. vectors of Booleans each element of which indicates // whether the corresponding degree of freedom index is an interface DoF // or not. The MGConstraints already computed the information // for us when we called initialize in setup_system(). @@ -615,7 +615,7 @@ namespace Step16 MGTransferPrebuilt > mg_transfer(hanging_node_constraints, mg_constrained_dofs); // Now the prolongation matrix has to be built. This matrix needs to take // the boundary values on each level into account and needs to know about - // the indices at the refinement egdes. The MGConstraints + // the indices at the refinement edges. The MGConstraints // knows about that so pass it as an argument. mg_transfer.build_matrices(mg_dof_handler); diff --git a/deal.II/examples/step-17/step-17.cc b/deal.II/examples/step-17/step-17.cc index e6b5c907d9..b5ddcbb130 100644 --- a/deal.II/examples/step-17/step-17.cc +++ b/deal.II/examples/step-17/step-17.cc @@ -750,7 +750,7 @@ namespace Step17 // to the present process, but then we need to distribute the refinement // indicators into a distributed vector so that all processes have the // values of the refinement indicator for all cells. But then, in order for - // each process to refine its copy of the mesh, they need to have acces to + // each process to refine its copy of the mesh, they need to have access to // all refinement indicators locally, so they have to copy the global vector // back into a local one. That's a little convoluted, but thinking about it // quite straightforward nevertheless. So here's how we do it: @@ -788,7 +788,7 @@ namespace Step17 // local_error_per_cell vector. The elements of this vector // for cells not on the present process are zero. However, since all // processes have a copy of a copy of the entire triangulation and need to - // keep these copies in synch, they need the values of refinement + // keep these copies in sync, they need the values of refinement // indicators for all cells of the triangulation. Thus, we need to // distribute our results. We do this by creating a distributed vector // where each process has its share, and sets the elements it has diff --git a/deal.II/examples/step-18/doc/intro.dox b/deal.II/examples/step-18/doc/intro.dox index 81ff567e44..2d54bda4a9 100644 --- a/deal.II/examples/step-18/doc/intro.dox +++ b/deal.II/examples/step-18/doc/intro.dox @@ -299,7 +299,7 @@ simply translated along. The dilational or compressional change produces a corresponding stress update. However, the rotational component does not necessarily induce a nonzero stress update (think, in 2d, for example of the situation where $\Delta\mathbf{u}=(y, -x)^T$, with which $\varepsilon(\Delta -\mathbf{u})=0$). Nevertheless, if the the material was pre-stressed in a certain +\mathbf{u})=0$). Nevertheless, if the the material was prestressed in a certain direction, then this direction will be rotated along with the material. To this end, we have to define a rotation matrix $R(\Delta \mathbf{u}^n)$ that describes, in each point the rotation due to the displacement increments. It diff --git a/deal.II/examples/step-18/step-18.cc b/deal.II/examples/step-18/step-18.cc index bcd2add729..200d61b0c6 100644 --- a/deal.II/examples/step-18/step-18.cc +++ b/deal.II/examples/step-18/step-18.cc @@ -383,7 +383,7 @@ namespace Step18 // the system, direct what has to be solved in each time step, a function // that solves the linear system that arises in each timestep (and returns // the number of iterations it took), and finally output the solution - // vector on the currect mesh: + // vector on the correct mesh: void create_coarse_grid (); void setup_system (); @@ -743,7 +743,7 @@ namespace Step18 // @sect4{The public interface} - // The next step is the definition of constructors and descructors. There + // The next step is the definition of constructors and destructors. There // are no surprises here: we choose linear and continuous finite elements // for each of the dim vector components of the solution, and a // Gaussian quadrature formula with 2 points in each coordinate @@ -910,7 +910,7 @@ namespace Step18 // The next function is the one that sets up the data structures for a given // mesh. This is done in most the same way as in step-17: distribute the // degrees of freedom, then sort these degrees of freedom in such a way that - // each processor gets a contiguous chunk of them. Note that subdivions into + // each processor gets a contiguous chunk of them. Note that subdivisions into // chunks for each processor is handled in the functions that create or // refine grids, unlike in the previous example program (the point where // this happens is mostly a matter of taste; here, we chose to do it when @@ -1009,7 +1009,7 @@ namespace Step18 // adaptively refined). // // With this data structure, we can then go to the PETSc sparse matrix and - // tell it to pre-allocate all the entries we will later want to write to: + // tell it to preallocate all the entries we will later want to write to: system_matrix.reinit (mpi_communicator, sparsity_pattern, local_dofs_per_process, @@ -1680,13 +1680,13 @@ namespace Step18 // cell-@>vertex_dof_index(v,d) function that returns the index // of the dth degree of freedom at vertex v of the // given cell. In the present case, displacement in the k-th coordinate - // direction corresonds to the kth component of the finite element. Using a + // direction corresponds to the k-th component of the finite element. Using a // function like this bears a certain risk, because it uses knowledge of the // order of elements that we have taken together for this program in the // FESystem element. If we decided to add an additional // variable, for example a pressure variable for stabilization, and happened // to insert it as the first variable of the element, then the computation - // below will start to produce non-sensical results. In addition, this + // below will start to produce nonsensical results. In addition, this // computation rests on other assumptions: first, that the element we use // has, indeed, degrees of freedom that are associated with vertices. This // is indeed the case for the present Q1 element, as would be for all Qp @@ -1773,7 +1773,7 @@ namespace Step18 // To put this into larger perspective, we note that if we had previously // available stresses in our model (which we assume do not exist for the // purpose of this program), then we would need to interpolate the field of - // pre-existing stresses to the quadrature points. Likewise, if we were to + // preexisting stresses to the quadrature points. Likewise, if we were to // simulate elasto-plastic materials with hardening/softening, then we would // have to store additional history variables like the present yield stress // of the accumulated plastic strains in each quadrature @@ -1856,9 +1856,9 @@ namespace Step18 // displacement update so that the material in its new configuration // accommodates for the difference between the external body and boundary // forces applied during this time step minus the forces exerted through - // pre-existing internal stresses. In order to have the pre-existing + // preexisting internal stresses. In order to have the preexisting // stresses available at the next time step, we therefore have to update the - // pre-existing stresses with the stresses due to the incremental + // preexisting stresses with the stresses due to the incremental // displacement computed during the present time step. Ideally, the // resulting sum of internal stresses would exactly counter all external // forces. Indeed, a simple experiment can make sure that this is so: if we @@ -1969,7 +1969,7 @@ namespace Step18 // three matrices should be symmetric, it is not due to floating // point round off: we get an asymmetry on the order of 1e-16 of // the off-diagonal elements of the result. When assigning the - // result to a SymmetricTensor, the constuctor of + // result to a SymmetricTensor, the constructor of // that class checks the symmetry and realizes that it isn't // exactly symmetric; it will then raise an exception. To avoid // that, we explicitly symmetrize the result to make it exactly diff --git a/deal.II/examples/step-19/doc/results.dox b/deal.II/examples/step-19/doc/results.dox index 233fbff3b1..39fe3634b4 100644 --- a/deal.II/examples/step-19/doc/results.dox +++ b/deal.II/examples/step-19/doc/results.dox @@ -33,7 +33,7 @@ set Output format = gnuplot subsection DX output parameters - # A boolean field indicating whether neighborship information between cells + # A Boolean field indicating whether neighborship information between cells # is to be written to the OpenDX output file set Write neighbors = true end @@ -93,7 +93,7 @@ subsection Eps output parameters end subsection Povray output parameters - # Whether camera and lightling information should be put into an external + # Whether camera and lighting information should be put into an external # file "data.inc" or into the POVRAY input file set Include external file = true diff --git a/deal.II/examples/step-19/step-19.cc b/deal.II/examples/step-19/step-19.cc index 17bec2c4ab..3a61b279ae 100644 --- a/deal.II/examples/step-19/step-19.cc +++ b/deal.II/examples/step-19/step-19.cc @@ -142,7 +142,7 @@ namespace Step19 // this program, I realized that there aren't all that many parameters this // program can usefully ask for, or better, it turned out: declaring and // querying these parameters was already done centralized in one place of - // the libray, namely the DataOutInterface class that handles + // the library, namely the DataOutInterface class that handles // exactly this -- managing parameters for input and output. // // So the second function call in this function is to let the @@ -196,7 +196,7 @@ namespace Step19 // with the subsection name, I like to use curly braces to force my editor // to indent everything that goes into this sub-section by one level of // indentation. In this sub-section, we shall have two entries, one that - // takes a boolean parameter and one that takes a selection list of + // takes a Boolean parameter and one that takes a selection list of // values, separated by the '|' character: prm.enter_subsection ("Dummy subsection"); { @@ -288,7 +288,7 @@ namespace Step19 // Finally, let us note that if we were interested in the values // of the parameters declared above in the dummy subsection, we // would write something like this to extract the value of the - // boolean flag (the prm.get function returns the + // Boolean flag (the prm.get function returns the // value of a parameter as a string, whereas the // prm.get_X functions return a value already // converted to a different type): diff --git a/deal.II/examples/step-2/doc/intro.dox b/deal.II/examples/step-2/doc/intro.dox index bef310a19a..3c2412609a 100644 --- a/deal.II/examples/step-2/doc/intro.dox +++ b/deal.II/examples/step-2/doc/intro.dox @@ -13,7 +13,7 @@ The term "degree of freedom" is commonly used in the finite element community to indicate two slightly different, but related things. The first is that we'd like to represent the finite element solution as a linear combination of shape function, in the form $u_h(\mathbf x) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf -x)$. Here, $U_j$ is a vector of expension coefficients. Because we don't know +x)$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know their values yet (we will compute them as the solution of a linear or nonlinear system), they are called "unknowns" or "degrees of freedom". The second meaning of the term con be explained as follows: A mathematical @@ -62,7 +62,7 @@ approximation of the Laplace equation leads to a sparse matrix since the number of nonzero entries per row is five, and therefore independent of the total size of the matrix.) Sparsity is one of the distinguishing feature of the finite element method compared to, say, approximating the solution of a -partial differential equation using a Taylor expension and matching +partial differential equation using a Taylor expansion and matching coefficients, or using a Fourier basis. In practical terms, it is the sparsity of matrices that enables us to solve diff --git a/deal.II/examples/step-2/doc/results.dox b/deal.II/examples/step-2/doc/results.dox index af9891d631..d07d5bc643 100644 --- a/deal.II/examples/step-2/doc/results.dox +++ b/deal.II/examples/step-2/doc/results.dox @@ -4,7 +4,7 @@ The program has, after having been run, produced two sparsity patterns. We can visualize them using GNUPLOT (one of the simpler visualization programs; maybe not the easiest to use since it is command line driven, but -also universally available on all linux and other unix-like systems): +also universally available on all Linux and other Unix-like systems): @code examples/\step-2> gnuplot diff --git a/deal.II/examples/step-2/step-2.cc b/deal.II/examples/step-2/step-2.cc index 173841e1f7..fed915ca51 100644 --- a/deal.II/examples/step-2/step-2.cc +++ b/deal.II/examples/step-2/step-2.cc @@ -151,7 +151,7 @@ void make_grid (Triangulation<2> &triangulation) // least as long as that of the DoFHandler; one way to make sure // this is so is to make it static as well, in order to prevent its preemptive // destruction. (However, the library would warn us if we forgot about this -// and abort the program if that occured. You can check this, if you want, by +// and abort the program if that occurred. You can check this, if you want, by // removing the 'static' declaration.) void distribute_dofs (DoFHandler<2> &dof_handler) { diff --git a/deal.II/examples/step-20/doc/results.dox b/deal.II/examples/step-20/doc/results.dox index 29842cdfee..20ece4a3e2 100644 --- a/deal.II/examples/step-20/doc/results.dox +++ b/deal.II/examples/step-20/doc/results.dox @@ -40,7 +40,7 @@ flow first channels towards the center and then outward again. Consequently, the x-velocity has to increase to get the flow through the narrow part, something that can easily be seen in the left image. The middle image represents inward flow in y-direction at the left end of the domain, and -outward flow in y-directino at the right end of the domain. +outward flow in y-direction at the right end of the domain. @@ -180,11 +180,11 @@ The result concerning the convergence order is the same here. Realistic flow computations for ground water or oil reservoir simulations will not use a constant permeability. Here's a first, rather simple way to change this situation: we use a permeability that decays very rapidly away from a -central flowline until it hits a background value of 0.001. This is to mimick +central flowline until it hits a background value of 0.001. This is to mimic the behavior of fluids in sandstone: in most of the domain, the sandstone is -homogenous and, while permeably to fluids, not overly so; on the other stone, +homogeneous and, while permeable to fluids, not overly so; on the other stone, the stone has cracked, or faulted, along one line, and the fluids flow much -easier along this large crask. Here is how we could implement something like +easier along this large crack. Here is how we could implement something like this: @code template diff --git a/deal.II/examples/step-20/step-20.cc b/deal.II/examples/step-20/step-20.cc index a0e6c251d8..dc5e35de97 100644 --- a/deal.II/examples/step-20/step-20.cc +++ b/deal.II/examples/step-20/step-20.cc @@ -280,10 +280,10 @@ namespace Step20 // binding simpler elements together into one larger element. In the present // case, we want to couple a single RT(degree) element with a single // DQ(degree) element. The constructor to FESystem that does - // this requires us to specity first the first base element (the + // this requires us to specify first the first base element (the // FE_RaviartThomas object of given degree) and then the number // of copies for this base element, and then similarly the kind and number - // of FE_DGQ elements. Note that the Raviart Thomas element + // of FE_DGQ elements. Note that the Raviart-Thomas element // already has dim vector components, so that the coupled // element will have dim+1 vector components, the first // dim of which correspond to the velocity variable whereas the @@ -806,7 +806,7 @@ namespace Step20 // used here, the Gauss points happen to be superconvergence points in // which the pointwise error is much smaller (and converges with higher // order) than anywhere else. These are therefore not particularly good - // points for ingration. To avoid this problem, we simply use a + // points for integration. To avoid this problem, we simply use a // trapezoidal rule and iterate it degree+2 times in each // coordinate direction (again as explained in step-7): QTrapez<1> q_trapez; @@ -913,7 +913,7 @@ namespace Step20 // The main function we stole from step-6 instead of step-4. It is almost // equal to the one in step-6 (apart from the changed class names, of course), // the only exception is that we pass the degree of the finite element space -// to the constructor of the mixed laplace problem (here, we use zero-th order +// to the constructor of the mixed Laplace problem (here, we use zero-th order // elements). int main () { diff --git a/deal.II/examples/step-21/doc/intro.dox b/deal.II/examples/step-21/doc/intro.dox index 85ce8f313a..2ee80de1b5 100644 --- a/deal.II/examples/step-21/doc/intro.dox +++ b/deal.II/examples/step-21/doc/intro.dox @@ -114,7 +114,7 @@ In summary, what we get are the following two equations: Here, $p=p(\mathbf x, t), S=S(\mathbf x, t)$ are now time dependent functions: while at every time instant the flow field is in equilibrium with the pressure (i.e. we neglect dynamic -accellerations), the saturation is transported along with the flow and +accelerations), the saturation is transported along with the flow and therefore changes over time, in turn affected the flow field again through the dependence of the first equation on $S$. @@ -411,8 +411,8 @@ one is harder to justify: on a microscopic level, most rocks are isotropic, because they consist of a network of interconnected pores. However, this microscopic scale is out of the range of today's computer simulations, and we have to be content with simulating things on the scale of meters. On that -scale, however, fluid transport typically happens through a network of crasks -in the rock, rather than throuh pores. However, cracks often result from +scale, however, fluid transport typically happens through a network of cracks +in the rock, rather than through pores. However, cracks often result from external stress fields in the rock layer (for example from tectonic faulting) and the cracks are therefore roughly aligned. This leads to a situation where the permeability is often orders of magnitude larger in the direction parallel @@ -499,7 +499,7 @@ functions introduced at the end of the results section of @ref step_20 "step-20":
        • A function that models a single, winding crack that snakes through the - domain. In analgy to step-20, but taking care of the slightly + domain. In analogy to step-20, but taking care of the slightly different geometry we have here, we describe this by the following function: @f[ k(\mathbf x) diff --git a/deal.II/examples/step-22/doc/intro.dox b/deal.II/examples/step-22/doc/intro.dox index b57dbed1ba..ee94320e65 100644 --- a/deal.II/examples/step-22/doc/intro.dox +++ b/deal.II/examples/step-22/doc/intro.dox @@ -302,7 +302,7 @@ possibilities for imposing boundary conditions:
      Despite this wealth of possibilities, we will only use Dirichlet and -(homogenous) Neumann boundary conditions in this tutorial program. +(homogeneous) Neumann boundary conditions in this tutorial program.

      Discretization

      @@ -539,7 +539,7 @@ matrix. It is therefore an operation that can efficiently also be computed in represent an exact inverse of the matrix $A$. Consequently, preconditioning with the ILU will still require more than one iteration, unlike preconditioning with the sparse direct solver. The inner solver will therefore -take more time when multiplying with the Schur complement, a tradeoff +take more time when multiplying with the Schur complement, a trade-off unavoidable.
    @@ -678,7 +678,7 @@ conditions. Frequently, a sparse matrix contains a substantial amount of elements that -acutally are zero when we are about to start a linear solve. Such elements are +actually are zero when we are about to start a linear solve. Such elements are introduced when we eliminate constraints or implement Dirichlet conditions, where we usually delete all entries in constrained rows and columns, i.e., we set them to zero. The fraction of elements that are present in the sparsity @@ -751,7 +751,7 @@ removed this bottleneck at the price of a slightly higher memory consumption. Likewise, the implementation of the decomposition step in the SparseILU class was very inefficient and has been replaced by one that is about 10 times faster. Even the vmult function of the SparseILU has been -improved to save about twenty precent of time. Small improvements were +improved to save about twenty percent of time. Small improvements were applied here and there. Moreover, the ConstraintMatrix object has been used to eliminate a lot of entries in the sparse matrix that are eventually going to be zero, see the section on using advanced diff --git a/deal.II/examples/step-22/doc/results.dox b/deal.II/examples/step-22/doc/results.dox index 5263299127..d544b2f21e 100644 --- a/deal.II/examples/step-22/doc/results.dox +++ b/deal.II/examples/step-22/doc/results.dox @@ -73,7 +73,7 @@ In the images below, we show the grids for the first six refinement steps in the program. Observe how the grid is refined in regions where the solution rapidly changes: On the upper boundary, we have Dirichlet boundary conditions that are -1 in the left half of the line -and 1 in the right one, so there is an aprupt change at $x=0$. Likewise, +and 1 in the right one, so there is an abrupt change at $x=0$. Likewise, there are changes from Dirichlet to Neumann data in the two upper corners, so there is need for refinement there as well: @@ -226,7 +226,7 @@ interesting graph: -The isocountours shown here as well are those of the pressure +The isocontours shown here as well are those of the pressure variable, showing the singularity at the point of discontinuous velocity boundary conditions. @@ -315,8 +315,8 @@ the number of iterations for applying the inverse pressure mass matrix is always around five, both in two and three dimensions.) To summarize, most work is spent on solving linear systems with the same matrix $A$ over and over again. What makes this look even worse is the fact that we -actually invert a matrix that is about 95 precent the size of the total system -matrix and stands for 85 precent of the non-zero entries in the sparsity +actually invert a matrix that is about 95 percent the size of the total system +matrix and stands for 85 percent of the non-zero entries in the sparsity pattern. Hence, the natural question is whether it is reasonable to solve a linear system with matrix $A$ for about 15 times when calculating the solution to the block system. @@ -357,7 +357,7 @@ unknowns. Another idea to improve the situation even more would be to choose a preconditioner that makes CG for the (0,0) matrix $A$ converge in a mesh-independent number of iterations, say 10 to 30. We have seen such a -canditate in step-16: multigrid. +candidate in step-16: multigrid.
    Block Schur complement preconditioner
    Even with a good preconditioner for $A$, we still @@ -492,7 +492,7 @@ are problems where one of the two candidates clearly outperforms the other, and vice versa.
    Wikipedia's article on the GMRES method gives a comparative presentation. -A more comprehensive and well-founded comparsion can be read e.g. in the book by +A more comprehensive and well-founded comparison can be read e.g. in the book by J.W. Demmel (Applied Numerical Linear Algebra, SIAM, 1997, section 6.6.6). For our specific problem with the ILU preconditioner for $A$, we certainly need @@ -756,7 +756,7 @@ example mid-ocean ridges). Of course, in such places, the geometry is more complicated than the examples shown above, but it is not hard to accommodate for that. -For example, by using the folllowing modification of the boundary values +For example, by using the following modification of the boundary values function @code template diff --git a/deal.II/examples/step-22/step-22.cc b/deal.II/examples/step-22/step-22.cc index 7703cc0b28..ee118bbb5c 100644 --- a/deal.II/examples/step-22/step-22.cc +++ b/deal.II/examples/step-22/step-22.cc @@ -156,7 +156,7 @@ namespace Step22 // As in step-20 and most other example programs, the next task is to define // the data for the PDE: For the Stokes problem, we are going to use natural - // boundary values on parts of the boundary (i.e. homogenous Neumann-type) + // boundary values on parts of the boundary (i.e. homogeneous Neumann-type) // for which we won't have to do anything special (the homogeneity implies // that the corresponding terms in the weak form are simply zero), and // boundary conditions on the velocity (Dirichlet-type) on the rest of the @@ -379,7 +379,7 @@ namespace Step22 // Note that we initialize the triangulation with a MeshSmoothing argument, // which ensures that the refinement of cells is done in a way that the // approximation of the PDE solution remains well-behaved (problems arise if - // grids are too unstructered), see the documentation of + // grids are too unstructured), see the documentation of // Triangulation::MeshSmoothing for details. template StokesProblem::StokesProblem (const unsigned int degree) @@ -518,7 +518,7 @@ namespace Step22 // require allocating more memory than necessary but isn't suitable for // use as a basis of SparseMatrix or BlockSparseMatrix objects; in a // second step we then copy this object into an object of - // BlockSparsityPattern. This is entirely analgous to what we already did + // BlockSparsityPattern. This is entirely analogous to what we already did // in step-11 and step-18. // // There is one snag again here, though: it turns out that using the @@ -735,7 +735,7 @@ namespace Step22 // After the discussion in the introduction and the definition of the // respective classes above, the implementation of the solve - // function is rather straigt-forward and done in a similar way as in + // function is rather straight-forward and done in a similar way as in // step-20. To start with, we need an object of the // InverseMatrix class that represents the inverse of the // matrix A. As described in the introduction, the inverse is generated with @@ -790,7 +790,7 @@ namespace Step22 // An alternative that is cheaper to build, but needs more iterations // afterwards, would be to choose a SSOR preconditioner with factor // 1.2. It needs about twice the number of iterations, but the costs for - // its generation are almost neglible. + // its generation are almost negligible. SparseILU preconditioner; preconditioner.initialize (system_matrix.block(1,1), SparseILU::AdditionalData()); @@ -820,7 +820,7 @@ namespace Step22 // objects we already know - so we simply multiply $p$ by $B^T$, subtract // the right hand side and multiply by the inverse of $A$. At the end, we // need to distribute the constraints from hanging nodes in order to - // obtain a constistent flow field: + // obtain a consistent flow field: { system_matrix.block(0,1).vmult (tmp, solution.block(1)); tmp *= -1; diff --git a/deal.II/examples/step-23/doc/intro.dox b/deal.II/examples/step-23/doc/intro.dox index 1139efd2bd..90becf9b9b 100644 --- a/deal.II/examples/step-23/doc/intro.dox +++ b/deal.II/examples/step-23/doc/intro.dox @@ -63,7 +63,7 @@ one of the usual ODE solvers (this is called the method of lines). Both of these methods have advantages and disadvantages. Traditionally, people have preferred the method of lines, since it allows to use the very well developed machinery of high-order ODE -solvers avaiable for the rather stiff ODEs resulting from this +solvers available for the rather stiff ODEs resulting from this approach, including step length control and estimation of the temporal error. @@ -388,7 +388,7 @@ $[-1,1]^2$ seven times uniformly, giving a mesh size of $h=\frac 1{64}$, which is what we set the time step to. The fact that we set the time step and mesh size individually in two different places is error prone: it is too easy to refine the mesh once more but forget to also adjust the time step. @ref -step_24 "step-24" shows a better way how to keep these things in synch. +step_24 "step-24" shows a better way how to keep these things in sync.

    The test case

    diff --git a/deal.II/examples/step-23/step-23.cc b/deal.II/examples/step-23/step-23.cc index 3fb30e3476..b0a50390d3 100644 --- a/deal.II/examples/step-23/step-23.cc +++ b/deal.II/examples/step-23/step-23.cc @@ -340,7 +340,7 @@ namespace Step23 sparsity_pattern.compress(); // Then comes a block where we have to initialize the 3 matrices we need - // in the course of the program: the mass matrix, the laplace matrix, and + // in the course of the program: the mass matrix, the Laplace matrix, and // the matrix $M+k^2\theta^2A$ used when solving for $U^n$ in each time // step. // diff --git a/deal.II/examples/step-24/doc/intro.dox b/deal.II/examples/step-24/doc/intro.dox index c3a23332cc..cbaf189e27 100644 --- a/deal.II/examples/step-24/doc/intro.dox +++ b/deal.II/examples/step-24/doc/intro.dox @@ -41,7 +41,7 @@ Let us assume that tissues have heterogeneous dielectric properties but homogeneous acoustic properties. The basic acoustic generation equation in an acoustically homogeneous medium can be described as follows: if $u$ is the vector-valued displacement, then tissue certainly reacts to changes in -pressure by accelleration: +pressure by acceleration: @f[ \rho \frac{\partial^2}{\partial t^2}u(t,\mathbf r) = -\nabla p(t,\mathbf r). @@ -168,7 +168,7 @@ boundary mass matrix results from the use of absorbing boundary conditions. Above two equations can be rewritten in a matrix form with the pressure and its derivative as -an unknown vecotor: +an unknown vector: @f[ \left(\begin{array}{cc} M & -k\theta M \\ diff --git a/deal.II/examples/step-24/doc/results.dox b/deal.II/examples/step-24/doc/results.dox index d3139965bc..67101997f1 100644 --- a/deal.II/examples/step-24/doc/results.dox +++ b/deal.II/examples/step-24/doc/results.dox @@ -4,7 +4,7 @@ The program writes both graphical data for each time step as well as the values evaluated at each detector location to disk. We then draw them in plots. Experimental data were also collected for comparison. Currently our experiments have only been done in two dimensions by -circually scanning a single detector. The tissue sample here is a thin slice +circularly scanning a single detector. The tissue sample here is a thin slice in the X-Y plane (Z=0), and we assume that signals from other Z directions won't contribute to the data. Consequently, we only have to compare our experimental data with two dimensional simulated data. @@ -136,7 +136,7 @@ plots), but are bulged out in certain directions. To make things worse, the circular mesh we use (see for example step-6 for a view of the coarse mesh) is not isotropic either. The net result is that the signal fronts are not sinusoidal unless the mesh is sufficiently fine. The right image is a -lot better in this respect, though artificts in the form of trailing spurious +lot better in this respect, though artifacts in the form of trailing spurious waves can still be seen. diff --git a/deal.II/examples/step-25/doc/intro.dox b/deal.II/examples/step-25/doc/intro.dox index 96f269cc3a..41a133cc8c 100644 --- a/deal.II/examples/step-25/doc/intro.dox +++ b/deal.II/examples/step-25/doc/intro.dox @@ -103,7 +103,7 @@ To this end, we can use Newton's method. Given the nonlinear equation $F(u^n) = \f} The iteration can be initialized with the old time step, i.e. $u^n_0 = u^{n-1}$, and eventually it will produce a solution to the first equation of -the split formulation (see above). For the time discretizaion of the +the split formulation (see above). For the time discretization of the sine-Gordon equation under consideration here, we have that \f{eqnarray*} F(u^n_l) &=& \left[ 1-k^2\theta^2\Delta \right] u^n_l - @@ -147,7 +147,7 @@ terms of the nodal basis. Henceforth, we shall denote by a capital letter the vector of coefficients (in the nodal basis) of a function denoted by the same letter in lower case; e.g., $u^n = \sum_{i=1}^N U^n_i \varphi_i$ where $U^n \in {R}^N$ and $u^n \in -H^1(\Omega)$. Thus, the finite-dimensional version of the variational fomulation requires that we solve the following matrix equations at each time step: +H^1(\Omega)$. Thus, the finite-dimensional version of the variational formulation requires that we solve the following matrix equations at each time step: @f{eqnarray*} F_h'(U^{n,l})\delta U^{n,l} &=& -F_h(U^{n,l}), \qquad U^{n,l+1} = U^{n,l} + \delta U^{n,l}, \qquad U^{n,0} = U^{n-1}; \\ @@ -162,7 +162,7 @@ Above, the matrix $F_h'(\cdot)$ and the vector $F_h(\cdot)$ denote the discrete - k^2\theta^2N(u^n_l,u^{n-1}) \f} Again, note that the first matrix equation above is, in fact, the -defition of an iterative procedure, so it is solved multiple times +definition of an iterative procedure, so it is solved multiple times until a stopping criterion is met. Moreover, $M$ is the mass matrix, i.e. $M_{ij} = \left( \varphi_i,\varphi_j \right)_{\Omega}$, $A$ is the Laplace matrix, i.e. $A_{ij} = \left( \nabla \varphi_i, \nabla diff --git a/deal.II/examples/step-25/step-25.cc b/deal.II/examples/step-25/step-25.cc index 794e09f870..c3ad5131f1 100644 --- a/deal.II/examples/step-25/step-25.cc +++ b/deal.II/examples/step-25/step-25.cc @@ -416,7 +416,7 @@ namespace Step25 // integrate these terms exactly. It is usually sufficient to just make sure // that the right hand side is integrated up to the same order of accuracy // as the discretization scheme is, but it may be possible to improve on the - // constant in the asympotitic statement of convergence by choosing a more + // constant in the asymptotic statement of convergence by choosing a more // accurate quadrature formula. template void SineGordonProblem::compute_nl_term (const Vector &old_data, @@ -477,7 +477,7 @@ namespace Step25 // @sect4{SineGordonProblem::compute_nl_matrix} // This is the second function dealing with the nonlinear scheme. It - // computes the matrix $N(\cdot,\cdot)$, whicih appears in the nonlinear + // computes the matrix $N(\cdot,\cdot)$, which appears in the nonlinear // term in the Jacobian of $F(\cdot)$. Just as compute_nl_term, // we must allow this function to receive as input an "old" and a "new" // solution, which we again call $w_{\mathrm{old}}$ and $w_{\mathrm{new}}$ @@ -610,7 +610,7 @@ namespace Step25 { make_grid_and_dofs (); - // To aknowledge the initial condition, we must use the function $u_0(x)$ + // To acknowledge the initial condition, we must use the function $u_0(x)$ // to compute $U^0$. To this end, below we will create an object of type // InitialValues; note that when we create this object (which // is derived from the Function class), we set its internal diff --git a/deal.II/examples/step-26/step-26.cc b/deal.II/examples/step-26/step-26.cc index 2e16d32ad2..d047fc7b5c 100644 --- a/deal.II/examples/step-26/step-26.cc +++ b/deal.II/examples/step-26/step-26.cc @@ -330,7 +330,7 @@ namespace Step26 // SolutionTransfer class and we have to prepare the solution vectors that // should be transferred to the new grid (we will lose the old grid once // we have done the refinement so the transfer has to happen concurrently - // with refinement). What we definetely need are the current and the old + // with refinement). What we definitely need are the current and the old // temperature (BDF-2 time stepping requires two old solutions). Since the // SolutionTransfer objects only support to transfer one object per dof // handler, we need to collect the two temperature solutions in one data diff --git a/deal.II/examples/step-28/doc/results.dox b/deal.II/examples/step-28/doc/results.dox index 1a48da48b7..edbacad443 100644 --- a/deal.II/examples/step-28/doc/results.dox +++ b/deal.II/examples/step-28/doc/results.dox @@ -61,7 +61,7 @@ in following figure. -We see that the grid of thermal group is much finner than the one of fast group. +We see that the grid of thermal group is much finer than the one of fast group. The solutions on these grids are, (Note: flux are normalized with total fission source equal to 1) @@ -77,4 +77,4 @@ The estimated ``exact'' k-effective = 0.906834721253 which is simply from last mesh iteration of polynomial order 3 minus 2e-10. We see that h-adaptive calculations deliver an algebraic convergence. And the higher polynomial order is, the faster mesh iteration converges. In our problem, we need smaller number of DoFs to achieve same -accuracy with higher polynoimal order. +accuracy with higher polynomial order. diff --git a/deal.II/examples/step-28/step-28.cc b/deal.II/examples/step-28/step-28.cc index 475af6d47f..d2d542941f 100644 --- a/deal.II/examples/step-28/step-28.cc +++ b/deal.II/examples/step-28/step-28.cc @@ -152,7 +152,7 @@ namespace Step28 // arrays. It takes the number of energy groups as an argument (an throws an // error if that value is not equal to two, since at presently only data for // two energy groups is implemented; however, using this, the function - // remains flexible and extendible into the future). In the member + // remains flexible and extendable into the future). In the member // initialization part at the beginning, it also resizes the arrays to their // correct sizes. // @@ -506,7 +506,7 @@ namespace Step28 ConstraintMatrix hanging_node_constraints; - // @sect5{Private member functionss} + // @sect5{Private member functions} // // There is one private member function in this class. It recursively // walks over cells of two meshes to compute the cross-group right hand @@ -800,7 +800,7 @@ namespace Step28 // groups. First we call get_finest_common_cells to obtain this // list of pairs of common cells from both meshes. Both cells in a pair may // not be active but at least one of them is. We then hand each of these - // cell pairs off to a function tha computes the right hand side terms + // cell pairs off to a function that computes the right hand side terms // recursively. // // Note that ingroup coupling is handled already before, so we exit the diff --git a/deal.II/examples/step-29/doc/intro.dox b/deal.II/examples/step-29/doc/intro.dox index 03f347e791..32c62b84e3 100644 --- a/deal.II/examples/step-29/doc/intro.dox +++ b/deal.II/examples/step-29/doc/intro.dox @@ -96,7 +96,7 @@ boundary condition will yield partial reflections, i.e. only parts of the wave will pass through the boundary as if it wasn't here whereas the remaining fraction of the wave will be reflected back into the domain. -If we are willing to accept this as a suffient approximation to an absorbing boundary we finally arrive at the following problem for $u$: +If we are willing to accept this as a sufficient approximation to an absorbing boundary we finally arrive at the following problem for $u$: @f{eqnarray*} -\omega^2 u - c^2\Delta u &=& 0, \qquad x\in\Omega,\\ c (n\cdot\nabla u) + i\,\omega\,u &=&0, \qquad x\in\Gamma_2,\\ diff --git a/deal.II/examples/step-29/doc/results.dox b/deal.II/examples/step-29/doc/results.dox index cf9a8790dc..68c64e64bb 100644 --- a/deal.II/examples/step-29/doc/results.dox +++ b/deal.II/examples/step-29/doc/results.dox @@ -160,7 +160,7 @@ and the effort to solve a banded linear system using LU decomposition is ${\cal O}(BN)$. This also explains why the program does run in 3d as well (after changing the dimension on the UltrasoundProblem object), but scales very badly and -takes extraordinate patience before it finishes solving the linear +takes extraordinary patience before it finishes solving the linear system on a mesh with appreciable resolution, even though all the other parts of the program scale very nicely. diff --git a/deal.II/examples/step-29/step-29.cc b/deal.II/examples/step-29/step-29.cc index 5278c7a1c2..e581ff9685 100644 --- a/deal.II/examples/step-29/step-29.cc +++ b/deal.II/examples/step-29/step-29.cc @@ -330,7 +330,7 @@ namespace Step29 {} - // The actual prostprocessing happens in the following function. Its inputs + // The actual postprocessing happens in the following function. Its inputs // are a vector representing values of the function (which is here // vector-valued) representing the data vector given to // DataOut::add_data_vector, evaluated at all evaluation points where we @@ -634,7 +634,7 @@ namespace Step29 // At this point, it is important to keep in mind that we are // dealing with a finite element system with two // components. Due to the way we constructed this FESystem, - // namely as the cartesian product of two scalar finite + // namely as the Cartesian product of two scalar finite // element fields, each shape function has only a single // nonzero component (they are, in deal.II lingo, @ref // GlossPrimitive "primitive"). Hence, each shape function diff --git a/deal.II/examples/step-3/doc/intro.dox b/deal.II/examples/step-3/doc/intro.dox index 6286abe7bc..741e2e2d85 100644 --- a/deal.II/examples/step-3/doc/intro.dox +++ b/deal.II/examples/step-3/doc/intro.dox @@ -264,18 +264,18 @@ Secondly, let's look at the member functions. These, as well, already form the common structure that almost all following tutorial programs will use: