From f7dcde9c2c862d64bd2467759589c9130be7baa1 Mon Sep 17 00:00:00 2001
From: heltai
Date: Wed, 4 Sep 2013 12:52:20 +0000
Subject: [PATCH] Merged from trunk
git-svn-id: https://svn.dealii.org/branches/branch_manifold_id@30589 0785d39b-7218-0410-832d-ea1e28bc413d
---
deal.II/cmake/config/CMakeLists.txt | 2 +-
deal.II/cmake/config/Make.global_options.in | 9 +
deal.II/cmake/modules/FindP4EST.cmake | 11 +-
deal.II/doc/authors.html | 6 +-
deal.II/doc/development/cmake.html | 27 ++
deal.II/doc/development/testsuite.html | 6 +-
deal.II/doc/external-libs/p4est.html | 2 +-
deal.II/doc/news/changes.h | 18 +-
deal.II/examples/step-10/step-10.cc | 2 +-
deal.II/examples/step-11/doc/intro.dox | 4 +-
deal.II/examples/step-11/doc/results.dox | 2 +-
deal.II/examples/step-11/step-11.cc | 2 +-
deal.II/examples/step-13/doc/intro.dox | 6 +-
deal.II/examples/step-13/step-13.cc | 6 +-
deal.II/examples/step-14/doc/results.dox | 2 +-
deal.II/examples/step-14/step-14.cc | 14 +-
deal.II/examples/step-15/doc/intro.dox | 6 +-
deal.II/examples/step-15/doc/results.dox | 2 +-
deal.II/examples/step-15/step-15.cc | 4 +-
deal.II/examples/step-16/step-16.cc | 10 +-
deal.II/examples/step-17/step-17.cc | 4 +-
deal.II/examples/step-18/doc/intro.dox | 2 +-
deal.II/examples/step-18/step-18.cc | 20 +-
deal.II/examples/step-19/doc/results.dox | 4 +-
deal.II/examples/step-19/step-19.cc | 6 +-
deal.II/examples/step-2/doc/intro.dox | 4 +-
deal.II/examples/step-2/doc/results.dox | 2 +-
deal.II/examples/step-2/step-2.cc | 2 +-
deal.II/examples/step-20/doc/results.dox | 8 +-
deal.II/examples/step-20/step-20.cc | 8 +-
deal.II/examples/step-21/doc/intro.dox | 8 +-
deal.II/examples/step-22/doc/intro.dox | 8 +-
deal.II/examples/step-22/doc/results.dox | 14 +-
deal.II/examples/step-22/step-22.cc | 12 +-
deal.II/examples/step-23/doc/intro.dox | 4 +-
deal.II/examples/step-23/step-23.cc | 2 +-
deal.II/examples/step-24/doc/intro.dox | 4 +-
deal.II/examples/step-24/doc/results.dox | 4 +-
deal.II/examples/step-25/doc/intro.dox | 6 +-
deal.II/examples/step-25/step-25.cc | 6 +-
deal.II/examples/step-26/step-26.cc | 2 +-
deal.II/examples/step-28/doc/results.dox | 4 +-
deal.II/examples/step-28/step-28.cc | 6 +-
deal.II/examples/step-29/doc/intro.dox | 2 +-
deal.II/examples/step-29/doc/results.dox | 2 +-
deal.II/examples/step-29/step-29.cc | 4 +-
deal.II/examples/step-3/doc/intro.dox | 6 +-
deal.II/examples/step-3/doc/results.dox | 2 +-
deal.II/examples/step-30/doc/intro.dox | 4 +-
deal.II/examples/step-30/doc/results.dox | 2 +-
deal.II/examples/step-30/step-30.cc | 26 +-
deal.II/examples/step-31/doc/intro.dox | 10 +-
deal.II/examples/step-31/doc/results.dox | 2 +-
deal.II/examples/step-31/step-31.cc | 28 +-
deal.II/examples/step-32/doc/intro.dox | 18 +-
deal.II/examples/step-32/doc/results.dox | 2 +-
deal.II/examples/step-32/step-32.cc | 8 +-
deal.II/examples/step-33/doc/intro.dox | 4 +-
deal.II/examples/step-33/step-33.cc | 18 +-
deal.II/examples/step-34/doc/intro.dox | 4 +-
deal.II/examples/step-34/doc/results.dox | 2 +-
deal.II/examples/step-34/step-34.cc | 8 +-
deal.II/examples/step-35/step-35.cc | 2 +-
deal.II/examples/step-36/doc/intro.dox | 4 +-
deal.II/examples/step-36/doc/results.dox | 2 +-
deal.II/examples/step-37/doc/intro.dox | 6 +-
deal.II/examples/step-37/step-37.cc | 6 +-
deal.II/examples/step-38/doc/intro.dox | 2 +-
deal.II/examples/step-38/doc/results.dox | 4 +-
deal.II/examples/step-38/step-38.cc | 2 +-
deal.II/examples/step-39/doc/results.dox | 2 +-
deal.II/examples/step-39/step-39.cc | 12 +-
deal.II/examples/step-4/doc/intro.dox | 2 +-
deal.II/examples/step-4/doc/results.dox | 2 +-
deal.II/examples/step-4/step-4.cc | 4 +-
deal.II/examples/step-40/doc/results.dox | 2 +-
deal.II/examples/step-40/step-40.cc | 8 +-
deal.II/examples/step-41/doc/intro.dox | 8 +-
deal.II/examples/step-41/step-41.cc | 4 +-
.../examples/step-42/doc/intro-step-42.tex | 36 +-
deal.II/examples/step-42/doc/intro.dox | 32 +-
deal.II/examples/step-42/doc/results.dox | 8 +-
deal.II/examples/step-42/step-42.cc | 4 +-
deal.II/examples/step-43/doc/intro.dox | 2 +-
deal.II/examples/step-43/step-43.cc | 6 +-
deal.II/examples/step-44/doc/intro.dox | 4 +-
deal.II/examples/step-44/doc/results.dox | 2 +-
deal.II/examples/step-44/step-44.cc | 16 +-
deal.II/examples/step-46/doc/intro.dox | 2 +-
deal.II/examples/step-46/step-46.cc | 4 +-
deal.II/examples/step-47/step-47.cc | 4 +-
deal.II/examples/step-48/doc/intro.dox | 2 +-
deal.II/examples/step-48/doc/results.dox | 4 +-
deal.II/examples/step-48/step-48.cc | 2 +-
deal.II/examples/step-49/doc/intro.dox | 2 +-
deal.II/examples/step-49/doc/results.dox | 2 +-
deal.II/examples/step-50/step-50.cc | 16 +-
deal.II/examples/step-51/step-51.cc | 4 +-
deal.II/examples/step-6/step-6.cc | 6 +-
deal.II/examples/step-8/doc/intro.dox | 2 +-
deal.II/examples/step-8/step-8.cc | 2 +-
deal.II/examples/step-9/step-9.cc | 18 +-
deal.II/include/deal.II/base/config.h.in | 25 ++
deal.II/include/deal.II/distributed/tria.h | 4 +-
.../deal.II/dofs/dof_accessor.templates.h | 129 +++---
deal.II/include/deal.II/fe/fe_values.h | 17 +
deal.II/include/deal.II/grid/grid_tools.h | 17 +-
deal.II/include/deal.II/hp/dof_levels.h | 347 ++++++++++----
deal.II/include/deal.II/hp/dof_objects.h | 424 +++++++-----------
.../include/deal.II/lac/block_vector_base.h | 50 +++
deal.II/include/deal.II/lac/parallel_vector.h | 50 +++
.../include/deal.II/lac/petsc_vector_base.h | 135 ++++++
.../deal.II/lac/trilinos_sparse_matrix.h | 2 +-
.../deal.II/lac/trilinos_vector_base.h | 46 ++
deal.II/include/deal.II/lac/vector.h | 50 +++
deal.II/source/distributed/tria.cc | 2 +-
deal.II/source/dofs/dof_renumbering.cc | 6 +-
deal.II/source/hp/dof_handler.cc | 55 +--
deal.II/source/hp/dof_levels.cc | 5 +-
deal.II/source/numerics/data_out.inst.in | 9 -
120 files changed, 1268 insertions(+), 790 deletions(-)
diff --git a/deal.II/cmake/config/CMakeLists.txt b/deal.II/cmake/config/CMakeLists.txt
index 8cea9494f3..9bcda3e751 100644
--- a/deal.II/cmake/config/CMakeLists.txt
+++ b/deal.II/cmake/config/CMakeLists.txt
@@ -251,7 +251,7 @@ IF(DEAL_II_COMPONENT_COMPAT_FILES)
# Boilerplate: The Make.global_options expects variables to be set to
# yes, as is common for Makefiles.
#
- COND_SET_TO_YES(DEAL_II_WITH_TBB MAKEFILE_enablethreads)
+ COND_SET_TO_YES(DEAL_II_WITH_THREADS MAKEFILE_enablethreads)
COND_SET_TO_YES(DEAL_II_WITH_FUNCTIONPARSER MAKEFILE_enableparser)
COND_SET_TO_YES(BUILD_SHARED_LIBS MAKEFILE_enableshared)
diff --git a/deal.II/cmake/config/Make.global_options.in b/deal.II/cmake/config/Make.global_options.in
index a32109a464..cc00835e74 100644
--- a/deal.II/cmake/config/Make.global_options.in
+++ b/deal.II/cmake/config/Make.global_options.in
@@ -66,6 +66,15 @@ DEAL_II_USE_MPI = @MAKEFILE_MPI@
OBJEXT = o
EXEEXT =
+shared-lib-suffix = .so
+static-lib-suffix = .a
+
+ifeq ($(enable-shared),yes)
+ lib-suffix = $(shared-lib-suffix)
+else
+ lib-suffix = $(static-lib-suffix)
+endif
+
# set paths to all the libraries we need:
lib-deal2.o = @MAKEFILE_TARGETS_RELEASE@
diff --git a/deal.II/cmake/modules/FindP4EST.cmake b/deal.II/cmake/modules/FindP4EST.cmake
index 32a6f08497..c7cab6ed1f 100644
--- a/deal.II/cmake/modules/FindP4EST.cmake
+++ b/deal.II/cmake/modules/FindP4EST.cmake
@@ -179,9 +179,18 @@ IF(P4EST_FOUND)
STRING(REGEX REPLACE
"^[0-9]+\\.[0-9]+\\.([0-9]+).*$" "\\1"
P4EST_VERSION_SUBMINOR "${P4EST_VERSION}")
+
+ # Now for the patch number such as in 0.3.4.1. If there
+ # is no patch number, then the REGEX REPLACE will fail,
+ # setting P4EST_VERSION_PATCH to P4EST_VERSION. If that
+ # is the case, then set the patch number to zero
STRING(REGEX REPLACE
- "^[0-9]+\\.[0-9]+\\.[0-9]+\\.([0-9]+).*$" "\\1"
+ "^[0-9]+\\.[0-9]+\\.[0-9]+\\.([0-9]+)?.*$" "\\1"
P4EST_VERSION_PATCH "${P4EST_VERSION}")
+ IF(${P4EST_VERSION_PATCH} STREQUAL "${P4EST_VERSION}")
+ SET(P4EST_VERSION_PATCH "0")
+ ENDIF()
+
MARK_AS_ADVANCED(P4EST_DIR)
ELSE()
diff --git a/deal.II/doc/authors.html b/deal.II/doc/authors.html
index 494df11f73..03aae2a2e1 100644
--- a/deal.II/doc/authors.html
+++ b/deal.II/doc/authors.html
@@ -191,7 +191,8 @@
2.5 format.
Matthias Maier:
- CMake build system. Periodic boundary conditions. Enhancements throughout the library.
+ CMake build system for the library and the testsuite. Periodic
+ boundary conditions. Enhancements throughout the library.
Cataldo Manigrasso:
Work on the codimension-one meshes, DoFHandler, and finite
@@ -280,7 +281,8 @@
Packaging and configuration issues.
Bruno Turcksin:
- Extending deal.ii for 64-bit integer support, various other changes.
+ Extending deal.ii for 64-bit integer support. Converting the
+ testsuite to CMake. Various other changes.
Kainan Wang:
Extending deal.ii for 64-bit integer support.
diff --git a/deal.II/doc/development/cmake.html b/deal.II/doc/development/cmake.html
index 2683cc1d7e..86c663bfb9 100644
--- a/deal.II/doc/development/cmake.html
+++ b/deal.II/doc/development/cmake.html
@@ -411,6 +411,33 @@
-NOTFOUND
and may be set by hand.
+ Library conflicts
+ Caveat: if you have a set of standard libraries in the default
+ location, say /usr/lib
and a set of
+ private versions of the same libraries, for instance because you
+ need different revisions sometimes, in your own library directory,
+ you may receive an error message of the form:
+
+
+CMake Warning at source/CMakeLists.txt:65 (ADD_LIBRARY):
+ Cannot generate a safe runtime search path for target deal_II.g because
+ files in some directories may conflict with libraries in implicit
+ directories:
+
+ runtime library [libtbb.so.2] in /usr/lib may be hidden by files in:
+ /my/private/lib
+
+ Some of these libraries may not be found correctly.
+
+
+This is not a problem of CMake or deal.II, but rather a general
+ Linux problem. In order to fix this, you have two options:
+
+- Choose all libraries either from your private directory or from
+ the standard one.
+- Install all your private library versions in different directories.
+
+
Manual override
Warning: Do not do this unless absolutely necessary!
diff --git a/deal.II/doc/development/testsuite.html b/deal.II/doc/development/testsuite.html
index e101dcf7ed..f99a72dbe7 100644
--- a/deal.II/doc/development/testsuite.html
+++ b/deal.II/doc/development/testsuite.html
@@ -99,8 +99,8 @@
The regression tests
- deal.II has a testsuite that, at the time this article is written (mid-2011),
- has some 2300 small programs (growing by roughly one per day) that we run
+ deal.II has a testsuite that, at the time this article is written (mid-2013),
+ has some 2,900 small programs (growing by roughly one per day) that we run
every time we make a
change to make sure that no existing functionality is broken. The
expected output is also stored in our subversion archive, and when you
@@ -110,7 +110,7 @@
in both cases to make sure that future changes do not break what
we have just checked in. In addition, some machines run the tests
every night and send the results back home; this is then converted
- into a webpage showing the status of our regression
tests.
diff --git a/deal.II/doc/external-libs/p4est.html b/deal.II/doc/external-libs/p4est.html
index 67fea34c44..ab17ff2a35 100644
--- a/deal.II/doc/external-libs/p4est.html
+++ b/deal.II/doc/external-libs/p4est.html
@@ -50,7 +50,7 @@
line like
- cmake -DP4EST_DIR=/path/to/installation/FAST -DDEAL_II_WITH_P4EST=ON -DDEAL_II_WITH_MPI=ON <...>
+ cmake -DP4EST_DIR=/path/to/installation -DDEAL_II_WITH_P4EST=ON -DDEAL_II_WITH_MPI=ON <...>
if the p4est library isn't picked up automatically. Note the presence
of /FAST
at the end of the path necessary when using
diff --git a/deal.II/doc/news/changes.h b/deal.II/doc/news/changes.h
index e4ff71cee2..c918de3ea1 100644
--- a/deal.II/doc/news/changes.h
+++ b/deal.II/doc/news/changes.h
@@ -69,7 +69,23 @@ inconvenience this causes.
-
- Fixed: The ParameterHandler class can now deal with including one parameter
+ New: All vector classes now have functions
extract_subvector_to()
+ that allow extracting not just a single value but a whole set.
+
+ (Fahad Alrasched, 2013/09/02)
+
+
+ -
+ Fixed:
common/Make.global_options
now exports enable-threads
+ correctly, furthermore, lib-suffix
, shared-lib-suffix
+ and static-lib-suffix
are now exported as well for better legacy
+ support.
+
+ (Matthias Maier, 2013/08/30)
+
+
+ -
+ New: The ParameterHandler class can now deal with including one parameter
file from another.
(Wolfgang Bangerth, 2013/08/25)
diff --git a/deal.II/examples/step-10/step-10.cc b/deal.II/examples/step-10/step-10.cc
index ba42848228..17838b3c76 100644
--- a/deal.II/examples/step-10/step-10.cc
+++ b/deal.II/examples/step-10/step-10.cc
@@ -134,7 +134,7 @@ namespace Step10
// will generate Gnuplot output, which consists of a set of lines
// describing the mapped triangulation. By default, only one line
// is drawn for each face of the triangulation, but since we want
- // to explicitely see the effect of the mapping, we want to have
+ // to explicitly see the effect of the mapping, we want to have
// the faces in more detail. This can be done by passing the
// output object a structure which contains some flags. In the
// present case, since Gnuplot can only draw straight lines, we
diff --git a/deal.II/examples/step-11/doc/intro.dox b/deal.II/examples/step-11/doc/intro.dox
index d4671fcffd..50f8b60653 100644
--- a/deal.II/examples/step-11/doc/intro.dox
+++ b/deal.II/examples/step-11/doc/intro.dox
@@ -31,7 +31,7 @@ For this, there are various possibilities:
solutions of Laplace's equation are only in $H^1$, which does not allow for
the definition of point values because it is not a subset of the continuous
functions. Therefore, even though fixing one node is allowed for
- discretitized functions, it is not for continuous functions, and one can
+ discretized functions, it is not for continuous functions, and one can
often see this in a resulting error spike at this point in the numerical
solution.
@@ -57,7 +57,7 @@ various places where we use it; in almost all conceivable cases, you will only
consider the objects describing mappings as a black box which you need not
worry about, because their only uses seem to be to be passed to places deep
inside the library where functions know how to handle them (i.e. in the
-FEValues
classes and their descendents).
+FEValues
classes and their descendants).
The tricky point in this program is the use of the mean value
constraint. Fortunately, there is a class in the library which knows how to
diff --git a/deal.II/examples/step-11/doc/results.dox b/deal.II/examples/step-11/doc/results.dox
index c8b0c67829..d76412f842 100644
--- a/deal.II/examples/step-11/doc/results.dox
+++ b/deal.II/examples/step-11/doc/results.dox
@@ -40,5 +40,5 @@ higher order mappings; it is therefore clearly advantageous in this
case to use a higher order mapping, not because it improves the order
of convergence but just to reduce the constant before the convergence
order. On the other hand, using a cubic mapping only improves the
-result further insignicantly, except for the case of very coarse
+result further insignificantly, except for the case of very coarse
grids.
diff --git a/deal.II/examples/step-11/step-11.cc b/deal.II/examples/step-11/step-11.cc
index 28775f4d2d..223219e3a7 100644
--- a/deal.II/examples/step-11/step-11.cc
+++ b/deal.II/examples/step-11/step-11.cc
@@ -140,7 +140,7 @@ namespace Step11
// the mean value of the degrees of freedom on the boundary shall be
// zero. For this, we first want a list of those nodes which are actually
// at the boundary. The DoFTools
class has a function that
- // returns an array of boolean values where true
indicates
+ // returns an array of Boolean values where true
indicates
// that the node is at the boundary. The second argument denotes a mask
// selecting which components of vector valued finite elements we want to
// be considered. This sort of information is encoded using the
diff --git a/deal.II/examples/step-13/doc/intro.dox b/deal.II/examples/step-13/doc/intro.dox
index 7362602221..de67cb0faf 100644
--- a/deal.II/examples/step-13/doc/intro.dox
+++ b/deal.II/examples/step-13/doc/intro.dox
@@ -43,7 +43,7 @@ a different module if they were not cleanly separated.
In previous examples, you have seen how the library itself is broken
-up into several complexes each building atop the underying ones, but
+up into several complexes each building atop the underlying ones, but
relatively independent of the other ones:
- the triangulation class complex, with associated iterator classes;
@@ -133,7 +133,7 @@ applications, there would of course be comments and class
documentation, which would bring that to maybe 1200 lines. Yet, compared to
the applications listed above, this is still small, as they are 20 to
25 times as large. For programs as large, a proper design right from
-the start is thus indispensible. Otherwise, it will have to be
+the start is thus indispensable. Otherwise, it will have to be
redesigned at one point in its life, once it becomes too large to be
manageable.
@@ -149,7 +149,7 @@ dependent application, the major concern is when to store data to disk
and when to reload it again; if this is not done in an organized
fashion, then you end up with data released too early, loaded too
late, or not released at all). Although the present example program
-thus draws from sevelar years of experience, it is certainly not
+thus draws from several years of experience, it is certainly not
without flaws in its design, and in particular might not be suited for
an application where the objective is different. It should serve as an
inspiration for writing your own application in a modular way, to
diff --git a/deal.II/examples/step-13/step-13.cc b/deal.II/examples/step-13/step-13.cc
index 24ab2a6d5d..8e88ee8ad8 100644
--- a/deal.II/examples/step-13/step-13.cc
+++ b/deal.II/examples/step-13/step-13.cc
@@ -800,7 +800,7 @@ namespace Step13
// The second of this pair of functions takes a range of cell iterators,
// and assembles the system matrix on this part of the domain. Since it's
// actions have all been explained in previous programs, we do not comment
- // on it any more, except for one pointe below.
+ // on it any more, except for one point below.
template
void
Solver::assemble_matrix (LinearSystem &linear_system,
@@ -861,7 +861,7 @@ namespace Step13
// will always be unlocked when we exit this part of the program,
// whether the operation completed successfully or not, whether the
// exit path was something we implemented willfully or whether the
- // function was exited by an exception that we did not forsee.
+ // function was exited by an exception that we did not foresee.
//
// deal.II implements the scoped locking pattern in the
// Treads::Mutex::ScopedLock class: it takes the mutex in the
@@ -1120,7 +1120,7 @@ namespace Step13
// @sect4{Local refinement by the Kelly error indicator}
// The second class implementing refinement strategies uses the Kelly
- // refinemet indicator used in various example programs before. Since this
+ // refinement indicator used in various example programs before. Since this
// indicator is already implemented in a class of its own inside the
// deal.II library, there is not much t do here except cal the function
// computing the indicator, then using it to select a number of cells for
diff --git a/deal.II/examples/step-14/doc/results.dox b/deal.II/examples/step-14/doc/results.dox
index b572963139..16af19d8e7 100644
--- a/deal.II/examples/step-14/doc/results.dox
+++ b/deal.II/examples/step-14/doc/results.dox
@@ -286,7 +286,7 @@ like this:
-Note the assymetry of the grids compared with those we obtained for
+Note the asymmetry of the grids compared with those we obtained for
the point evaluation, which is due to the directionality of the
x-derivative for which we tailored the refinement criterion.
diff --git a/deal.II/examples/step-14/step-14.cc b/deal.II/examples/step-14/step-14.cc
index d2d982b129..32d5710ce1 100644
--- a/deal.II/examples/step-14/step-14.cc
+++ b/deal.II/examples/step-14/step-14.cc
@@ -214,7 +214,7 @@ namespace Step14
// afterwards...
double point_derivative = 0;
- // ...then have some objects of which the meaning wil become clear
+ // ...then have some objects of which the meaning will become clear
// below...
QTrapez vertex_quadrature;
FEValues fe_values (dof_handler.get_fe(),
@@ -1465,7 +1465,7 @@ namespace Step14
// We will then implement two such classes, for the evaluation of a point
// value and of the derivative of the solution at that point. For these
// functionals we already have the corresponding evaluation objects, so they
- // are comlementary.
+ // are complementary.
namespace DualFunctional
{
// @sect4{The DualFunctionalBase class}
@@ -2371,7 +2371,7 @@ namespace Step14
// Next we have the function that is called to estimate the error on a
// subset of cells. The function may be called multiply if the library was
- // configured to use multi-threading. Here it goes:
+ // configured to use multithreading. Here it goes:
template
void
WeightedResidual::
@@ -2400,7 +2400,7 @@ namespace Step14
// Then calculate the start cell for this thread. We let the different
// threads run on interleaved cells, i.e. for example if we have 4
- // threads, then the first thread treates cells 0, 4, 8, etc, while the
+ // threads, then the first thread treats cells 0, 4, 8, etc, while the
// second threads works on cells 1, 5, 9, and so on. The reason is that
// it takes vastly more time to work on cells with hanging nodes than on
// regular cells, but such cells are not evenly distributed across the
@@ -2582,7 +2582,7 @@ namespace Step14
// element solution at the quadrature points on the other side of the
// face, i.e. from the neighboring cell.
//
- // For this, do a sanity check before: make sure that the neigbor
+ // For this, do a sanity check before: make sure that the neighbor
// actually exists (yes, we should not have come here if the neighbor
// did not exist, but in complicated software there are bugs, so better
// check this), and if this is not the case throw an error.
@@ -2595,7 +2595,7 @@ namespace Step14
// the name
neighbor_neighbor
:
const unsigned int
neighbor_neighbor = cell->neighbor_of_neighbor (face_no);
- // Then define an abbreviation for the neigbor cell, initialize the
+ // Then define an abbreviation for the neighbor cell, initialize the
// FEFaceValues
object on that cell, and extract the
// gradients on that cell:
const active_cell_iterator neighbor = cell->neighbor(face_no);
@@ -2673,7 +2673,7 @@ namespace Step14
// Then find out which neighbor the present cell is of the adjacent
// cell. Note that we will operate on the children of this adjacent
// cell, but that their orientation is the same as that of their mother,
- // i.e. the neigbor direction is the same.
+ // i.e. the neighbor direction is the same.
const unsigned int
neighbor_neighbor = cell->neighbor_of_neighbor (face_no);
diff --git a/deal.II/examples/step-15/doc/intro.dox b/deal.II/examples/step-15/doc/intro.dox
index 344cd54f5f..2cebf0f9f1 100644
--- a/deal.II/examples/step-15/doc/intro.dox
+++ b/deal.II/examples/step-15/doc/intro.dox
@@ -84,7 +84,7 @@ boundary condition of this problem. Assuming that $u^{n}$ already has the
right boundary values, the Newton update $\delta u^{n}$ should have zero
boundary conditions, in order to have the right boundary condition after
adding both. In the first Newton step, we are starting with the solution
-$u^{0}\equiv 0$, the Newton update still has to deliever the right boundary
+$u^{0}\equiv 0$, the Newton update still has to deliver the right boundary
condition to the solution $u^{1}$.
@@ -113,7 +113,7 @@ Reducing this space to a finite dimensional space with basis $\left\{
@f]
Using the basis functions as test functions and defining $a_{n}:=\frac{1}
-{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formualtion:
+{\sqrt{1+|\nabla u^{n}|^{2}}}$, we can rewrite the weak formulation:
@f[
\sum_{j=0}^{N-1}\left[ \left( \nabla \varphi_{i} , a_{n} \nabla \varphi_{j} \right) -
@@ -122,7 +122,7 @@ Using the basis functions as test functions and defining $a_{n}:=\frac{1}
\nabla u^{n}\right) \qquad \forall i=0,\dots ,N-1
@f]
-where the solution $\delta u^{n}$ is given by the coefficents $\delta U^{n}_{j}$.
+where the solution $\delta u^{n}$ is given by the coefficients $\delta U^{n}_{j}$.
This linear system of equations can be rewritten as:
@f[
diff --git a/deal.II/examples/step-15/doc/results.dox b/deal.II/examples/step-15/doc/results.dox
index 59c7ba414d..75e78a1104 100644
--- a/deal.II/examples/step-15/doc/results.dox
+++ b/deal.II/examples/step-15/doc/results.dox
@@ -72,7 +72,7 @@ solution. This yields the following set of images:
It is clearly visible, that the solution minimizes the surface
after each refinement. The solution converges to a picture one
-would imagine a soapbubble to be that is located inside a wire loop
+would imagine a soap bubble to be that is located inside a wire loop
that is bent like
the boundary. Also it is visible, how the boundary
is smoothed out after each refinement. On the coarse mesh,
diff --git a/deal.II/examples/step-15/step-15.cc b/deal.II/examples/step-15/step-15.cc
index 024f831d84..6dcc5c4c24 100644
--- a/deal.II/examples/step-15/step-15.cc
+++ b/deal.II/examples/step-15/step-15.cc
@@ -59,14 +59,14 @@
#include
#include
-// We will use adaptive mesh refinement between Newton interations. To do so,
+// We will use adaptive mesh refinement between Newton iterations. To do so,
// we need to be able to work with a solution on the new mesh, although it was
// computed on the old one. The SolutionTransfer class transfers the solution
// from the old to the new mesh:
#include
-// We then open a namepsace for this program and import everything from the
+// We then open a namespace for this program and import everything from the
// dealii namespace into it, as in previous programs:
namespace Step15
{
diff --git a/deal.II/examples/step-16/step-16.cc b/deal.II/examples/step-16/step-16.cc
index 86a8e4e6ba..33ddf4d578 100644
--- a/deal.II/examples/step-16/step-16.cc
+++ b/deal.II/examples/step-16/step-16.cc
@@ -62,7 +62,7 @@
#include
#include
-// These, now, are the include necessary for the multi-level methods. The
+// These, now, are the include necessary for the multilevel methods. The
// first two declare classes that allow us to enumerate degrees of freedom not
// only on the finest mesh level, but also on intermediate levels (that's what
// the MGDoFHandler class does) as well as allow to access this information
@@ -303,7 +303,7 @@ namespace Step16
// Now for the things that concern the multigrid data structures. First,
- // we resize the multi-level objects to hold matrices and sparsity
+ // we resize the multilevel objects to hold matrices and sparsity
// patterns for every level. The coarse level is zero (this is mandatory
// right now but may change in a future revision). Note that these
// functions take a complete, inclusive range here (not a starting index
@@ -424,7 +424,7 @@ namespace Step16
// integration core is the same as above, but the loop below will go over
// all existing cells instead of just the active ones, and the results must
// be entered into the correct matrix. Note also that since we only do
- // multi-level preconditioning, no right-hand side needs to be assembled
+ // multilevel preconditioning, no right-hand side needs to be assembled
// here.
//
// Before we go there, however, we have to take care of a significant amount
@@ -460,7 +460,7 @@ namespace Step16
// that are located on interfaces between adaptively refined levels, and
// those that lie on the interface but also on the exterior boundary of
// the domain. As in many other parts of the library, we do this by using
- // boolean masks, i.e. vectors of booleans each element of which indicates
+ // Boolean masks, i.e. vectors of Booleans each element of which indicates
// whether the corresponding degree of freedom index is an interface DoF
// or not. The MGConstraints
already computed the information
// for us when we called initialize in setup_system()
.
@@ -615,7 +615,7 @@ namespace Step16
MGTransferPrebuilt > mg_transfer(hanging_node_constraints, mg_constrained_dofs);
// Now the prolongation matrix has to be built. This matrix needs to take
// the boundary values on each level into account and needs to know about
- // the indices at the refinement egdes. The MGConstraints
+ // the indices at the refinement edges. The MGConstraints
// knows about that so pass it as an argument.
mg_transfer.build_matrices(mg_dof_handler);
diff --git a/deal.II/examples/step-17/step-17.cc b/deal.II/examples/step-17/step-17.cc
index e6b5c907d9..b5ddcbb130 100644
--- a/deal.II/examples/step-17/step-17.cc
+++ b/deal.II/examples/step-17/step-17.cc
@@ -750,7 +750,7 @@ namespace Step17
// to the present process, but then we need to distribute the refinement
// indicators into a distributed vector so that all processes have the
// values of the refinement indicator for all cells. But then, in order for
- // each process to refine its copy of the mesh, they need to have acces to
+ // each process to refine its copy of the mesh, they need to have access to
// all refinement indicators locally, so they have to copy the global vector
// back into a local one. That's a little convoluted, but thinking about it
// quite straightforward nevertheless. So here's how we do it:
@@ -788,7 +788,7 @@ namespace Step17
// local_error_per_cell
vector. The elements of this vector
// for cells not on the present process are zero. However, since all
// processes have a copy of a copy of the entire triangulation and need to
- // keep these copies in synch, they need the values of refinement
+ // keep these copies in sync, they need the values of refinement
// indicators for all cells of the triangulation. Thus, we need to
// distribute our results. We do this by creating a distributed vector
// where each process has its share, and sets the elements it has
diff --git a/deal.II/examples/step-18/doc/intro.dox b/deal.II/examples/step-18/doc/intro.dox
index 81ff567e44..2d54bda4a9 100644
--- a/deal.II/examples/step-18/doc/intro.dox
+++ b/deal.II/examples/step-18/doc/intro.dox
@@ -299,7 +299,7 @@ simply translated along. The dilational or compressional change produces a
corresponding stress update. However, the rotational component does not
necessarily induce a nonzero stress update (think, in 2d, for example of the
situation where $\Delta\mathbf{u}=(y, -x)^T$, with which $\varepsilon(\Delta
-\mathbf{u})=0$). Nevertheless, if the the material was pre-stressed in a certain
+\mathbf{u})=0$). Nevertheless, if the the material was prestressed in a certain
direction, then this direction will be rotated along with the material. To
this end, we have to define a rotation matrix $R(\Delta \mathbf{u}^n)$ that
describes, in each point the rotation due to the displacement increments. It
diff --git a/deal.II/examples/step-18/step-18.cc b/deal.II/examples/step-18/step-18.cc
index bcd2add729..200d61b0c6 100644
--- a/deal.II/examples/step-18/step-18.cc
+++ b/deal.II/examples/step-18/step-18.cc
@@ -383,7 +383,7 @@ namespace Step18
// the system, direct what has to be solved in each time step, a function
// that solves the linear system that arises in each timestep (and returns
// the number of iterations it took), and finally output the solution
- // vector on the currect mesh:
+ // vector on the correct mesh:
void create_coarse_grid ();
void setup_system ();
@@ -743,7 +743,7 @@ namespace Step18
// @sect4{The public interface}
- // The next step is the definition of constructors and descructors. There
+ // The next step is the definition of constructors and destructors. There
// are no surprises here: we choose linear and continuous finite elements
// for each of the dim
vector components of the solution, and a
// Gaussian quadrature formula with 2 points in each coordinate
@@ -910,7 +910,7 @@ namespace Step18
// The next function is the one that sets up the data structures for a given
// mesh. This is done in most the same way as in step-17: distribute the
// degrees of freedom, then sort these degrees of freedom in such a way that
- // each processor gets a contiguous chunk of them. Note that subdivions into
+ // each processor gets a contiguous chunk of them. Note that subdivisions into
// chunks for each processor is handled in the functions that create or
// refine grids, unlike in the previous example program (the point where
// this happens is mostly a matter of taste; here, we chose to do it when
@@ -1009,7 +1009,7 @@ namespace Step18
// adaptively refined).
//
// With this data structure, we can then go to the PETSc sparse matrix and
- // tell it to pre-allocate all the entries we will later want to write to:
+ // tell it to preallocate all the entries we will later want to write to:
system_matrix.reinit (mpi_communicator,
sparsity_pattern,
local_dofs_per_process,
@@ -1680,13 +1680,13 @@ namespace Step18
// cell-@>vertex_dof_index(v,d)
function that returns the index
// of the d
th degree of freedom at vertex v
of the
// given cell. In the present case, displacement in the k-th coordinate
- // direction corresonds to the kth component of the finite element. Using a
+ // direction corresponds to the k-th component of the finite element. Using a
// function like this bears a certain risk, because it uses knowledge of the
// order of elements that we have taken together for this program in the
// FESystem
element. If we decided to add an additional
// variable, for example a pressure variable for stabilization, and happened
// to insert it as the first variable of the element, then the computation
- // below will start to produce non-sensical results. In addition, this
+ // below will start to produce nonsensical results. In addition, this
// computation rests on other assumptions: first, that the element we use
// has, indeed, degrees of freedom that are associated with vertices. This
// is indeed the case for the present Q1 element, as would be for all Qp
@@ -1773,7 +1773,7 @@ namespace Step18
// To put this into larger perspective, we note that if we had previously
// available stresses in our model (which we assume do not exist for the
// purpose of this program), then we would need to interpolate the field of
- // pre-existing stresses to the quadrature points. Likewise, if we were to
+ // preexisting stresses to the quadrature points. Likewise, if we were to
// simulate elasto-plastic materials with hardening/softening, then we would
// have to store additional history variables like the present yield stress
// of the accumulated plastic strains in each quadrature
@@ -1856,9 +1856,9 @@ namespace Step18
// displacement update so that the material in its new configuration
// accommodates for the difference between the external body and boundary
// forces applied during this time step minus the forces exerted through
- // pre-existing internal stresses. In order to have the pre-existing
+ // preexisting internal stresses. In order to have the preexisting
// stresses available at the next time step, we therefore have to update the
- // pre-existing stresses with the stresses due to the incremental
+ // preexisting stresses with the stresses due to the incremental
// displacement computed during the present time step. Ideally, the
// resulting sum of internal stresses would exactly counter all external
// forces. Indeed, a simple experiment can make sure that this is so: if we
@@ -1969,7 +1969,7 @@ namespace Step18
// three matrices should be symmetric, it is not due to floating
// point round off: we get an asymmetry on the order of 1e-16 of
// the off-diagonal elements of the result. When assigning the
- // result to a SymmetricTensor
, the constuctor of
+ // result to a SymmetricTensor
, the constructor of
// that class checks the symmetry and realizes that it isn't
// exactly symmetric; it will then raise an exception. To avoid
// that, we explicitly symmetrize the result to make it exactly
diff --git a/deal.II/examples/step-19/doc/results.dox b/deal.II/examples/step-19/doc/results.dox
index 233fbff3b1..39fe3634b4 100644
--- a/deal.II/examples/step-19/doc/results.dox
+++ b/deal.II/examples/step-19/doc/results.dox
@@ -33,7 +33,7 @@ set Output format = gnuplot
subsection DX output parameters
- # A boolean field indicating whether neighborship information between cells
+ # A Boolean field indicating whether neighborship information between cells
# is to be written to the OpenDX output file
set Write neighbors = true
end
@@ -93,7 +93,7 @@ subsection Eps output parameters
end
subsection Povray output parameters
- # Whether camera and lightling information should be put into an external
+ # Whether camera and lighting information should be put into an external
# file "data.inc" or into the POVRAY input file
set Include external file = true
diff --git a/deal.II/examples/step-19/step-19.cc b/deal.II/examples/step-19/step-19.cc
index 17bec2c4ab..3a61b279ae 100644
--- a/deal.II/examples/step-19/step-19.cc
+++ b/deal.II/examples/step-19/step-19.cc
@@ -142,7 +142,7 @@ namespace Step19
// this program, I realized that there aren't all that many parameters this
// program can usefully ask for, or better, it turned out: declaring and
// querying these parameters was already done centralized in one place of
- // the libray, namely the DataOutInterface
class that handles
+ // the library, namely the DataOutInterface
class that handles
// exactly this -- managing parameters for input and output.
//
// So the second function call in this function is to let the
@@ -196,7 +196,7 @@ namespace Step19
// with the subsection name, I like to use curly braces to force my editor
// to indent everything that goes into this sub-section by one level of
// indentation. In this sub-section, we shall have two entries, one that
- // takes a boolean parameter and one that takes a selection list of
+ // takes a Boolean parameter and one that takes a selection list of
// values, separated by the '|' character:
prm.enter_subsection ("Dummy subsection");
{
@@ -288,7 +288,7 @@ namespace Step19
// Finally, let us note that if we were interested in the values
// of the parameters declared above in the dummy subsection, we
// would write something like this to extract the value of the
- // boolean flag (the prm.get
function returns the
+ // Boolean flag (the prm.get
function returns the
// value of a parameter as a string, whereas the
// prm.get_X
functions return a value already
// converted to a different type):
diff --git a/deal.II/examples/step-2/doc/intro.dox b/deal.II/examples/step-2/doc/intro.dox
index bef310a19a..3c2412609a 100644
--- a/deal.II/examples/step-2/doc/intro.dox
+++ b/deal.II/examples/step-2/doc/intro.dox
@@ -13,7 +13,7 @@ The term "degree of freedom" is commonly used in the finite element community
to indicate two slightly different, but related things. The first is that we'd
like to represent the finite element solution as a linear combination of shape
function, in the form $u_h(\mathbf x) = \sum_{j=0}^{N-1} U_j \varphi_j(\mathbf
-x)$. Here, $U_j$ is a vector of expension coefficients. Because we don't know
+x)$. Here, $U_j$ is a vector of expansion coefficients. Because we don't know
their values yet (we will compute them as the solution of a linear or
nonlinear system), they are called "unknowns" or "degrees of freedom". The
second meaning of the term con be explained as follows: A mathematical
@@ -62,7 +62,7 @@ approximation of the Laplace equation leads to a sparse matrix since the
number of nonzero entries per row is five, and therefore independent of the
total size of the matrix.) Sparsity is one of the distinguishing feature of
the finite element method compared to, say, approximating the solution of a
-partial differential equation using a Taylor expension and matching
+partial differential equation using a Taylor expansion and matching
coefficients, or using a Fourier basis.
In practical terms, it is the sparsity of matrices that enables us to solve
diff --git a/deal.II/examples/step-2/doc/results.dox b/deal.II/examples/step-2/doc/results.dox
index af9891d631..d07d5bc643 100644
--- a/deal.II/examples/step-2/doc/results.dox
+++ b/deal.II/examples/step-2/doc/results.dox
@@ -4,7 +4,7 @@ The program has, after having been run, produced two sparsity
patterns. We can visualize them using GNUPLOT (one of the simpler visualization
programs; maybe not the easiest to use since it is command line driven, but
-also universally available on all linux and other unix-like systems):
+also universally available on all Linux and other Unix-like systems):
@code
examples/\step-2> gnuplot
diff --git a/deal.II/examples/step-2/step-2.cc b/deal.II/examples/step-2/step-2.cc
index 173841e1f7..fed915ca51 100644
--- a/deal.II/examples/step-2/step-2.cc
+++ b/deal.II/examples/step-2/step-2.cc
@@ -151,7 +151,7 @@ void make_grid (Triangulation<2> &triangulation)
// least as long as that of the DoFHandler
; one way to make sure
// this is so is to make it static as well, in order to prevent its preemptive
// destruction. (However, the library would warn us if we forgot about this
-// and abort the program if that occured. You can check this, if you want, by
+// and abort the program if that occurred. You can check this, if you want, by
// removing the 'static' declaration.)
void distribute_dofs (DoFHandler<2> &dof_handler)
{
diff --git a/deal.II/examples/step-20/doc/results.dox b/deal.II/examples/step-20/doc/results.dox
index 29842cdfee..20ece4a3e2 100644
--- a/deal.II/examples/step-20/doc/results.dox
+++ b/deal.II/examples/step-20/doc/results.dox
@@ -40,7 +40,7 @@ flow first channels towards the center and then outward again. Consequently,
the x-velocity has to increase to get the flow through the narrow part,
something that can easily be seen in the left image. The middle image
represents inward flow in y-direction at the left end of the domain, and
-outward flow in y-directino at the right end of the domain.
+outward flow in y-direction at the right end of the domain.
@@ -180,11 +180,11 @@ The result concerning the convergence order is the same here.
Realistic flow computations for ground water or oil reservoir simulations will
not use a constant permeability. Here's a first, rather simple way to change
this situation: we use a permeability that decays very rapidly away from a
-central flowline until it hits a background value of 0.001. This is to mimick
+central flowline until it hits a background value of 0.001. This is to mimic
the behavior of fluids in sandstone: in most of the domain, the sandstone is
-homogenous and, while permeably to fluids, not overly so; on the other stone,
+homogeneous and, while permeable to fluids, not overly so; on the other stone,
the stone has cracked, or faulted, along one line, and the fluids flow much
-easier along this large crask. Here is how we could implement something like
+easier along this large crack. Here is how we could implement something like
this:
@code
template
diff --git a/deal.II/examples/step-20/step-20.cc b/deal.II/examples/step-20/step-20.cc
index a0e6c251d8..dc5e35de97 100644
--- a/deal.II/examples/step-20/step-20.cc
+++ b/deal.II/examples/step-20/step-20.cc
@@ -280,10 +280,10 @@ namespace Step20
// binding simpler elements together into one larger element. In the present
// case, we want to couple a single RT(degree) element with a single
// DQ(degree) element. The constructor to FESystem
that does
- // this requires us to specity first the first base element (the
+ // this requires us to specify first the first base element (the
// FE_RaviartThomas
object of given degree) and then the number
// of copies for this base element, and then similarly the kind and number
- // of FE_DGQ
elements. Note that the Raviart Thomas element
+ // of FE_DGQ
elements. Note that the Raviart-Thomas element
// already has dim
vector components, so that the coupled
// element will have dim+1
vector components, the first
// dim
of which correspond to the velocity variable whereas the
@@ -806,7 +806,7 @@ namespace Step20
// used here, the Gauss points happen to be superconvergence points in
// which the pointwise error is much smaller (and converges with higher
// order) than anywhere else. These are therefore not particularly good
- // points for ingration. To avoid this problem, we simply use a
+ // points for integration. To avoid this problem, we simply use a
// trapezoidal rule and iterate it degree+2
times in each
// coordinate direction (again as explained in step-7):
QTrapez<1> q_trapez;
@@ -913,7 +913,7 @@ namespace Step20
// The main function we stole from step-6 instead of step-4. It is almost
// equal to the one in step-6 (apart from the changed class names, of course),
// the only exception is that we pass the degree of the finite element space
-// to the constructor of the mixed laplace problem (here, we use zero-th order
+// to the constructor of the mixed Laplace problem (here, we use zero-th order
// elements).
int main ()
{
diff --git a/deal.II/examples/step-21/doc/intro.dox b/deal.II/examples/step-21/doc/intro.dox
index 85ce8f313a..2ee80de1b5 100644
--- a/deal.II/examples/step-21/doc/intro.dox
+++ b/deal.II/examples/step-21/doc/intro.dox
@@ -114,7 +114,7 @@ In summary, what we get are the following two equations:
Here, $p=p(\mathbf x, t), S=S(\mathbf x, t)$ are now time dependent
functions: while at every time instant the flow field is in
equilibrium with the pressure (i.e. we neglect dynamic
-accellerations), the saturation is transported along with the flow and
+accelerations), the saturation is transported along with the flow and
therefore changes over time, in turn affected the flow field again
through the dependence of the first equation on $S$.
@@ -411,8 +411,8 @@ one is harder to justify: on a microscopic level, most rocks are isotropic,
because they consist of a network of interconnected pores. However, this
microscopic scale is out of the range of today's computer simulations, and we
have to be content with simulating things on the scale of meters. On that
-scale, however, fluid transport typically happens through a network of crasks
-in the rock, rather than throuh pores. However, cracks often result from
+scale, however, fluid transport typically happens through a network of cracks
+in the rock, rather than through pores. However, cracks often result from
external stress fields in the rock layer (for example from tectonic faulting)
and the cracks are therefore roughly aligned. This leads to a situation where
the permeability is often orders of magnitude larger in the direction parallel
@@ -499,7 +499,7 @@ functions introduced at the end of the results section of @ref step_20
"step-20":
- A function that models a single, winding crack that snakes through the
- domain. In analgy to step-20, but taking care of the slightly
+ domain. In analogy to step-20, but taking care of the slightly
different geometry we have here, we describe this by the following function:
@f[
k(\mathbf x)
diff --git a/deal.II/examples/step-22/doc/intro.dox b/deal.II/examples/step-22/doc/intro.dox
index b57dbed1ba..ee94320e65 100644
--- a/deal.II/examples/step-22/doc/intro.dox
+++ b/deal.II/examples/step-22/doc/intro.dox
@@ -302,7 +302,7 @@ possibilities for imposing boundary conditions:
Despite this wealth of possibilities, we will only use Dirichlet and
-(homogenous) Neumann boundary conditions in this tutorial program.
+(homogeneous) Neumann boundary conditions in this tutorial program.
Discretization
@@ -539,7 +539,7 @@ matrix. It is therefore an operation that can efficiently also be computed in
represent an exact inverse of the matrix $A$. Consequently, preconditioning
with the ILU will still require more than one iteration, unlike
preconditioning with the sparse direct solver. The inner solver will therefore
-take more time when multiplying with the Schur complement, a tradeoff
+take more time when multiplying with the Schur complement, a trade-off
unavoidable.
@@ -678,7 +678,7 @@ conditions.
Frequently, a sparse matrix contains a substantial amount of elements that
-acutally are zero when we are about to start a linear solve. Such elements are
+actually are zero when we are about to start a linear solve. Such elements are
introduced when we eliminate constraints or implement Dirichlet conditions,
where we usually delete all entries in constrained rows and columns, i.e., we
set them to zero. The fraction of elements that are present in the sparsity
@@ -751,7 +751,7 @@ removed this bottleneck at the price of a slightly higher memory
consumption. Likewise, the implementation of the decomposition step in the
SparseILU class was very inefficient and has been replaced by one that is
about 10 times faster. Even the vmult function of the SparseILU has been
-improved to save about twenty precent of time. Small improvements were
+improved to save about twenty percent of time. Small improvements were
applied here and there. Moreover, the ConstraintMatrix object has been used
to eliminate a lot of entries in the sparse matrix that are eventually going
to be zero, see the section on using advanced
diff --git a/deal.II/examples/step-22/doc/results.dox b/deal.II/examples/step-22/doc/results.dox
index 5263299127..d544b2f21e 100644
--- a/deal.II/examples/step-22/doc/results.dox
+++ b/deal.II/examples/step-22/doc/results.dox
@@ -73,7 +73,7 @@ In the images below, we show the grids for the first six refinement
steps in the program. Observe how the grid is refined in regions
where the solution rapidly changes: On the upper boundary, we have
Dirichlet boundary conditions that are -1 in the left half of the line
-and 1 in the right one, so there is an aprupt change at $x=0$. Likewise,
+and 1 in the right one, so there is an abrupt change at $x=0$. Likewise,
there are changes from Dirichlet to Neumann data in the two upper
corners, so there is need for refinement there as well:
@@ -226,7 +226,7 @@ interesting graph:
-The isocountours shown here as well are those of the pressure
+The isocontours shown here as well are those of the pressure
variable, showing the singularity at the point of discontinuous
velocity boundary conditions.
@@ -315,8 +315,8 @@ the number of iterations for applying the inverse pressure mass matrix is
always around five, both in two and three dimensions.) To summarize, most work
is spent on solving linear systems with the same matrix $A$ over and over again.
What makes this look even worse is the fact that we
-actually invert a matrix that is about 95 precent the size of the total system
-matrix and stands for 85 precent of the non-zero entries in the sparsity
+actually invert a matrix that is about 95 percent the size of the total system
+matrix and stands for 85 percent of the non-zero entries in the sparsity
pattern. Hence, the natural question is whether it is reasonable to solve a
linear system with matrix $A$ for about 15 times when calculating the solution
to the block system.
@@ -357,7 +357,7 @@ unknowns.
Another idea to improve the situation even more would be to choose a
preconditioner that makes CG for the (0,0) matrix $A$ converge in a
mesh-independent number of iterations, say 10 to 30. We have seen such a
-canditate in step-16: multigrid.
+candidate in step-16: multigrid.
Block Schur complement preconditioner
Even with a good preconditioner for $A$, we still
@@ -492,7 +492,7 @@ are problems where one of the two candidates clearly outperforms the other, and
vice versa.
Wikipedia's
article on the GMRES method gives a comparative presentation.
-A more comprehensive and well-founded comparsion can be read e.g. in the book by
+A more comprehensive and well-founded comparison can be read e.g. in the book by
J.W. Demmel (Applied Numerical Linear Algebra, SIAM, 1997, section 6.6.6).
For our specific problem with the ILU preconditioner for $A$, we certainly need
@@ -756,7 +756,7 @@ example mid-ocean ridges). Of course, in such places, the geometry is more
complicated than the examples shown above, but it is not hard to accommodate
for that.
-For example, by using the folllowing modification of the boundary values
+For example, by using the following modification of the boundary values
function
@code
template
diff --git a/deal.II/examples/step-22/step-22.cc b/deal.II/examples/step-22/step-22.cc
index 7703cc0b28..ee118bbb5c 100644
--- a/deal.II/examples/step-22/step-22.cc
+++ b/deal.II/examples/step-22/step-22.cc
@@ -156,7 +156,7 @@ namespace Step22
// As in step-20 and most other example programs, the next task is to define
// the data for the PDE: For the Stokes problem, we are going to use natural
- // boundary values on parts of the boundary (i.e. homogenous Neumann-type)
+ // boundary values on parts of the boundary (i.e. homogeneous Neumann-type)
// for which we won't have to do anything special (the homogeneity implies
// that the corresponding terms in the weak form are simply zero), and
// boundary conditions on the velocity (Dirichlet-type) on the rest of the
@@ -379,7 +379,7 @@ namespace Step22
// Note that we initialize the triangulation with a MeshSmoothing argument,
// which ensures that the refinement of cells is done in a way that the
// approximation of the PDE solution remains well-behaved (problems arise if
- // grids are too unstructered), see the documentation of
+ // grids are too unstructured), see the documentation of
// Triangulation::MeshSmoothing
for details.
template
StokesProblem::StokesProblem (const unsigned int degree)
@@ -518,7 +518,7 @@ namespace Step22
// require allocating more memory than necessary but isn't suitable for
// use as a basis of SparseMatrix or BlockSparseMatrix objects; in a
// second step we then copy this object into an object of
- // BlockSparsityPattern. This is entirely analgous to what we already did
+ // BlockSparsityPattern. This is entirely analogous to what we already did
// in step-11 and step-18.
//
// There is one snag again here, though: it turns out that using the
@@ -735,7 +735,7 @@ namespace Step22
// After the discussion in the introduction and the definition of the
// respective classes above, the implementation of the solve
- // function is rather straigt-forward and done in a similar way as in
+ // function is rather straight-forward and done in a similar way as in
// step-20. To start with, we need an object of the
// InverseMatrix
class that represents the inverse of the
// matrix A. As described in the introduction, the inverse is generated with
@@ -790,7 +790,7 @@ namespace Step22
// An alternative that is cheaper to build, but needs more iterations
// afterwards, would be to choose a SSOR preconditioner with factor
// 1.2. It needs about twice the number of iterations, but the costs for
- // its generation are almost neglible.
+ // its generation are almost negligible.
SparseILU preconditioner;
preconditioner.initialize (system_matrix.block(1,1),
SparseILU::AdditionalData());
@@ -820,7 +820,7 @@ namespace Step22
// objects we already know - so we simply multiply $p$ by $B^T$, subtract
// the right hand side and multiply by the inverse of $A$. At the end, we
// need to distribute the constraints from hanging nodes in order to
- // obtain a constistent flow field:
+ // obtain a consistent flow field:
{
system_matrix.block(0,1).vmult (tmp, solution.block(1));
tmp *= -1;
diff --git a/deal.II/examples/step-23/doc/intro.dox b/deal.II/examples/step-23/doc/intro.dox
index 1139efd2bd..90becf9b9b 100644
--- a/deal.II/examples/step-23/doc/intro.dox
+++ b/deal.II/examples/step-23/doc/intro.dox
@@ -63,7 +63,7 @@ one of the usual ODE solvers (this is called the method of lines).
Both of these methods have advantages and disadvantages.
Traditionally, people have preferred the method of lines, since it
allows to use the very well developed machinery of high-order ODE
-solvers avaiable for the rather stiff ODEs resulting from this
+solvers available for the rather stiff ODEs resulting from this
approach, including step length control and estimation of the temporal
error.
@@ -388,7 +388,7 @@ $[-1,1]^2$ seven times uniformly, giving a mesh size of $h=\frac 1{64}$, which
is what we set the time step to. The fact that we set the time step and mesh
size individually in two different places is error prone: it is too easy to
refine the mesh once more but forget to also adjust the time step. @ref
-step_24 "step-24" shows a better way how to keep these things in synch.
+step_24 "step-24" shows a better way how to keep these things in sync.
The test case
diff --git a/deal.II/examples/step-23/step-23.cc b/deal.II/examples/step-23/step-23.cc
index 3fb30e3476..b0a50390d3 100644
--- a/deal.II/examples/step-23/step-23.cc
+++ b/deal.II/examples/step-23/step-23.cc
@@ -340,7 +340,7 @@ namespace Step23
sparsity_pattern.compress();
// Then comes a block where we have to initialize the 3 matrices we need
- // in the course of the program: the mass matrix, the laplace matrix, and
+ // in the course of the program: the mass matrix, the Laplace matrix, and
// the matrix $M+k^2\theta^2A$ used when solving for $U^n$ in each time
// step.
//
diff --git a/deal.II/examples/step-24/doc/intro.dox b/deal.II/examples/step-24/doc/intro.dox
index c3a23332cc..cbaf189e27 100644
--- a/deal.II/examples/step-24/doc/intro.dox
+++ b/deal.II/examples/step-24/doc/intro.dox
@@ -41,7 +41,7 @@ Let us assume that tissues have heterogeneous dielectric properties but
homogeneous acoustic properties. The basic acoustic generation equation in an
acoustically homogeneous medium can be described as follows: if $u$ is the
vector-valued displacement, then tissue certainly reacts to changes in
-pressure by accelleration:
+pressure by acceleration:
@f[
\rho \frac{\partial^2}{\partial t^2}u(t,\mathbf r) =
-\nabla p(t,\mathbf r).
@@ -168,7 +168,7 @@ boundary mass matrix
results from the use of absorbing boundary conditions.
Above two equations can be rewritten in a matrix form with the pressure and its derivative as
-an unknown vecotor:
+an unknown vector:
@f[
\left(\begin{array}{cc}
M & -k\theta M \\
diff --git a/deal.II/examples/step-24/doc/results.dox b/deal.II/examples/step-24/doc/results.dox
index d3139965bc..67101997f1 100644
--- a/deal.II/examples/step-24/doc/results.dox
+++ b/deal.II/examples/step-24/doc/results.dox
@@ -4,7 +4,7 @@ The program writes both graphical data for each time step as well as the
values evaluated at each detector location to disk. We then
draw them in plots. Experimental data were also collected for comparison.
Currently our experiments have only been done in two dimensions by
-circually scanning a single detector. The tissue sample here is a thin slice
+circularly scanning a single detector. The tissue sample here is a thin slice
in the X-Y plane (Z=0), and we assume that signals from other Z directions
won't contribute to the data. Consequently, we only have to compare
our experimental data with two dimensional simulated data.
@@ -136,7 +136,7 @@ plots), but are bulged out in certain directions. To make things worse, the
circular mesh we use (see for example step-6 for a view of the
coarse mesh) is not isotropic either. The net result is that the signal fronts
are not sinusoidal unless the mesh is sufficiently fine. The right image is a
-lot better in this respect, though artificts in the form of trailing spurious
+lot better in this respect, though artifacts in the form of trailing spurious
waves can still be seen.
diff --git a/deal.II/examples/step-25/doc/intro.dox b/deal.II/examples/step-25/doc/intro.dox
index 96f269cc3a..41a133cc8c 100644
--- a/deal.II/examples/step-25/doc/intro.dox
+++ b/deal.II/examples/step-25/doc/intro.dox
@@ -103,7 +103,7 @@ To this end, we can use Newton's method. Given the nonlinear equation $F(u^n) =
\f}
The iteration can be initialized with the old time step, i.e. $u^n_0 = u^{n-1}$,
and eventually it will produce a solution to the first equation of
-the split formulation (see above). For the time discretizaion of the
+the split formulation (see above). For the time discretization of the
sine-Gordon equation under consideration here, we have that
\f{eqnarray*}
F(u^n_l) &=& \left[ 1-k^2\theta^2\Delta \right] u^n_l -
@@ -147,7 +147,7 @@ terms of the nodal basis. Henceforth, we shall denote by a capital
letter the vector of coefficients (in the nodal basis) of a function
denoted by the same letter in lower case; e.g., $u^n = \sum_{i=1}^N
U^n_i \varphi_i$ where $U^n \in {R}^N$ and $u^n \in
-H^1(\Omega)$. Thus, the finite-dimensional version of the variational fomulation requires that we solve the following matrix equations at each time step:
+H^1(\Omega)$. Thus, the finite-dimensional version of the variational formulation requires that we solve the following matrix equations at each time step:
@f{eqnarray*}
F_h'(U^{n,l})\delta U^{n,l} &=& -F_h(U^{n,l}), \qquad
U^{n,l+1} = U^{n,l} + \delta U^{n,l}, \qquad U^{n,0} = U^{n-1}; \\
@@ -162,7 +162,7 @@ Above, the matrix $F_h'(\cdot)$ and the vector $F_h(\cdot)$ denote the discrete
- k^2\theta^2N(u^n_l,u^{n-1})
\f}
Again, note that the first matrix equation above is, in fact, the
-defition of an iterative procedure, so it is solved multiple times
+definition of an iterative procedure, so it is solved multiple times
until a stopping criterion is met. Moreover, $M$ is the mass matrix,
i.e. $M_{ij} = \left( \varphi_i,\varphi_j \right)_{\Omega}$, $A$ is
the Laplace matrix, i.e. $A_{ij} = \left( \nabla \varphi_i, \nabla
diff --git a/deal.II/examples/step-25/step-25.cc b/deal.II/examples/step-25/step-25.cc
index 794e09f870..c3ad5131f1 100644
--- a/deal.II/examples/step-25/step-25.cc
+++ b/deal.II/examples/step-25/step-25.cc
@@ -416,7 +416,7 @@ namespace Step25
// integrate these terms exactly. It is usually sufficient to just make sure
// that the right hand side is integrated up to the same order of accuracy
// as the discretization scheme is, but it may be possible to improve on the
- // constant in the asympotitic statement of convergence by choosing a more
+ // constant in the asymptotic statement of convergence by choosing a more
// accurate quadrature formula.
template
void SineGordonProblem::compute_nl_term (const Vector &old_data,
@@ -477,7 +477,7 @@ namespace Step25
// @sect4{SineGordonProblem::compute_nl_matrix}
// This is the second function dealing with the nonlinear scheme. It
- // computes the matrix $N(\cdot,\cdot)$, whicih appears in the nonlinear
+ // computes the matrix $N(\cdot,\cdot)$, which appears in the nonlinear
// term in the Jacobian of $F(\cdot)$. Just as compute_nl_term
,
// we must allow this function to receive as input an "old" and a "new"
// solution, which we again call $w_{\mathrm{old}}$ and $w_{\mathrm{new}}$
@@ -610,7 +610,7 @@ namespace Step25
{
make_grid_and_dofs ();
- // To aknowledge the initial condition, we must use the function $u_0(x)$
+ // To acknowledge the initial condition, we must use the function $u_0(x)$
// to compute $U^0$. To this end, below we will create an object of type
// InitialValues
; note that when we create this object (which
// is derived from the Function
class), we set its internal
diff --git a/deal.II/examples/step-26/step-26.cc b/deal.II/examples/step-26/step-26.cc
index 2e16d32ad2..d047fc7b5c 100644
--- a/deal.II/examples/step-26/step-26.cc
+++ b/deal.II/examples/step-26/step-26.cc
@@ -330,7 +330,7 @@ namespace Step26
// SolutionTransfer class and we have to prepare the solution vectors that
// should be transferred to the new grid (we will lose the old grid once
// we have done the refinement so the transfer has to happen concurrently
- // with refinement). What we definetely need are the current and the old
+ // with refinement). What we definitely need are the current and the old
// temperature (BDF-2 time stepping requires two old solutions). Since the
// SolutionTransfer objects only support to transfer one object per dof
// handler, we need to collect the two temperature solutions in one data
diff --git a/deal.II/examples/step-28/doc/results.dox b/deal.II/examples/step-28/doc/results.dox
index 1a48da48b7..edbacad443 100644
--- a/deal.II/examples/step-28/doc/results.dox
+++ b/deal.II/examples/step-28/doc/results.dox
@@ -61,7 +61,7 @@ in following figure.
-We see that the grid of thermal group is much finner than the one of fast group.
+We see that the grid of thermal group is much finer than the one of fast group.
The solutions on these grids are, (Note: flux are normalized with total fission
source equal to 1)
@@ -77,4 +77,4 @@ The estimated ``exact'' k-effective = 0.906834721253 which is simply from last
mesh iteration of polynomial order 3 minus 2e-10. We see that h-adaptive calculations
deliver an algebraic convergence. And the higher polynomial order is, the faster mesh
iteration converges. In our problem, we need smaller number of DoFs to achieve same
-accuracy with higher polynoimal order.
+accuracy with higher polynomial order.
diff --git a/deal.II/examples/step-28/step-28.cc b/deal.II/examples/step-28/step-28.cc
index 475af6d47f..d2d542941f 100644
--- a/deal.II/examples/step-28/step-28.cc
+++ b/deal.II/examples/step-28/step-28.cc
@@ -152,7 +152,7 @@ namespace Step28
// arrays. It takes the number of energy groups as an argument (an throws an
// error if that value is not equal to two, since at presently only data for
// two energy groups is implemented; however, using this, the function
- // remains flexible and extendible into the future). In the member
+ // remains flexible and extendable into the future). In the member
// initialization part at the beginning, it also resizes the arrays to their
// correct sizes.
//
@@ -506,7 +506,7 @@ namespace Step28
ConstraintMatrix hanging_node_constraints;
- // @sect5{Private member functionss}
+ // @sect5{Private member functions}
//
// There is one private member function in this class. It recursively
// walks over cells of two meshes to compute the cross-group right hand
@@ -800,7 +800,7 @@ namespace Step28
// groups. First we call get_finest_common_cells
to obtain this
// list of pairs of common cells from both meshes. Both cells in a pair may
// not be active but at least one of them is. We then hand each of these
- // cell pairs off to a function tha computes the right hand side terms
+ // cell pairs off to a function that computes the right hand side terms
// recursively.
//
// Note that ingroup coupling is handled already before, so we exit the
diff --git a/deal.II/examples/step-29/doc/intro.dox b/deal.II/examples/step-29/doc/intro.dox
index 03f347e791..32c62b84e3 100644
--- a/deal.II/examples/step-29/doc/intro.dox
+++ b/deal.II/examples/step-29/doc/intro.dox
@@ -96,7 +96,7 @@ boundary condition will yield partial reflections, i.e. only parts of the wave
will pass through the boundary as if it wasn't here whereas the remaining
fraction of the wave will be reflected back into the domain.
-If we are willing to accept this as a suffient approximation to an absorbing boundary we finally arrive at the following problem for $u$:
+If we are willing to accept this as a sufficient approximation to an absorbing boundary we finally arrive at the following problem for $u$:
@f{eqnarray*}
-\omega^2 u - c^2\Delta u &=& 0, \qquad x\in\Omega,\\
c (n\cdot\nabla u) + i\,\omega\,u &=&0, \qquad x\in\Gamma_2,\\
diff --git a/deal.II/examples/step-29/doc/results.dox b/deal.II/examples/step-29/doc/results.dox
index cf9a8790dc..68c64e64bb 100644
--- a/deal.II/examples/step-29/doc/results.dox
+++ b/deal.II/examples/step-29/doc/results.dox
@@ -160,7 +160,7 @@ and the effort to solve a banded linear system using LU decomposition
is ${\cal O}(BN)$. This also explains why the program does run in 3d
as well (after changing the dimension on the
UltrasoundProblem
object), but scales very badly and
-takes extraordinate patience before it finishes solving the linear
+takes extraordinary patience before it finishes solving the linear
system on a mesh with appreciable resolution, even though all the
other parts of the program scale very nicely.
diff --git a/deal.II/examples/step-29/step-29.cc b/deal.II/examples/step-29/step-29.cc
index 5278c7a1c2..e581ff9685 100644
--- a/deal.II/examples/step-29/step-29.cc
+++ b/deal.II/examples/step-29/step-29.cc
@@ -330,7 +330,7 @@ namespace Step29
{}
- // The actual prostprocessing happens in the following function. Its inputs
+ // The actual postprocessing happens in the following function. Its inputs
// are a vector representing values of the function (which is here
// vector-valued) representing the data vector given to
// DataOut::add_data_vector, evaluated at all evaluation points where we
@@ -634,7 +634,7 @@ namespace Step29
// At this point, it is important to keep in mind that we are
// dealing with a finite element system with two
// components. Due to the way we constructed this FESystem,
- // namely as the cartesian product of two scalar finite
+ // namely as the Cartesian product of two scalar finite
// element fields, each shape function has only a single
// nonzero component (they are, in deal.II lingo, @ref
// GlossPrimitive "primitive"). Hence, each shape function
diff --git a/deal.II/examples/step-3/doc/intro.dox b/deal.II/examples/step-3/doc/intro.dox
index 6286abe7bc..741e2e2d85 100644
--- a/deal.II/examples/step-3/doc/intro.dox
+++ b/deal.II/examples/step-3/doc/intro.dox
@@ -264,18 +264,18 @@ Secondly, let's look at the member functions. These, as well, already form the
common structure that almost all following tutorial programs will use:
-
make_grid()
: This is what one could call a
- pre-processing function. As its name suggests, it sets up the
+ preprocessing function. As its name suggests, it sets up the
object that stores the triangulation. In later examples, it could also
deal with boundary conditions, geometries, etc.
-
setup_system()
: This then is the function in which all the
other data structures are set up that are needed to solve the
problem. In particular, it will initialize the DoFHandler object and
correctly size the various objects that have to do with the linear
- algebra. This function is often separated from the pre-processing
+ algebra. This function is often separated from the preprocessing
function above because, in a time dependent program, it may be called
at least every few time steps whenever the mesh
is adaptively refined (something we will see how to do in step-6). On
- the other hand, setting up the mesh itself in the pre-processing
+ the other hand, setting up the mesh itself in the preprocessing
function above is done only once at the beginning of the program and
is, therefore, separated into its own function.
-
assemble_system()
: This, then is where the contents of the
diff --git a/deal.II/examples/step-3/doc/results.dox b/deal.II/examples/step-3/doc/results.dox
index c5d2e96c05..50012fae17 100644
--- a/deal.II/examples/step-3/doc/results.dox
+++ b/deal.II/examples/step-3/doc/results.dox
@@ -120,7 +120,7 @@ suggestions:
values for those faces that have zero boundary indicator, and leave
those faces alone that have a different boundary indicator. What
this then does is to impose Dirichlet boundary conditions on the
- former, and homogenous Neumann conditions on the latter (i.e. zero
+ former, and homogeneous Neumann conditions on the latter (i.e. zero
normal derivative of the solution, unless one adds additional terms
to the right hand side of the variational equality that deal with
potentially non-zero Neumann conditions). You will see this if you
diff --git a/deal.II/examples/step-30/doc/intro.dox b/deal.II/examples/step-30/doc/intro.dox
index dc8ea2fa18..3e21d47e34 100644
--- a/deal.II/examples/step-30/doc/intro.dox
+++ b/deal.II/examples/step-30/doc/intro.dox
@@ -192,7 +192,7 @@ affects some of the most fundamental assumptions. Consequently, some of the
usual code found in application programs will need modifications to exploit
the features of meshes which were created using anisotropic
refinement. For those interested in how deal.II evolved, it may be of
-interest that the losening of such invariants required some
+interest that the loosening of such invariants required some
incompatible changes. For example, the library used to have a member
GeometryInfo::children_per_cell that specified how many children
a cell has once it is refined. For isotropic refinement, this number
@@ -202,7 +202,7 @@ does not exist, as is can be either two or four in 2D and two, four or eight in
consequently been removed. It has now been replaced by
GeometryInfo::max_children_per_cell which specifies the
maximum number of children a cell can have. How many children a
-refined cell has was previously available as static informtion, but
+refined cell has was previously available as static information, but
now it depends on the actual refinement state of a cell and can be
retrieved using the function call cell-@>n_children()
,
a call that works equally well for both isotropic and anisotropic
diff --git a/deal.II/examples/step-30/doc/results.dox b/deal.II/examples/step-30/doc/results.dox
index 9c928eea18..a98bb187c4 100644
--- a/deal.II/examples/step-30/doc/results.dox
+++ b/deal.II/examples/step-30/doc/results.dox
@@ -146,6 +146,6 @@ It might seem that the necessary alignment of anisotropic features and the
coarse mesh can decrease performance significantly for real world
problems. However, that is not always the case. Considering boundary layers in
compressible viscous flows, for example, the mesh is always aligned with the
-anisotropic features, thus anisotropic refinement will almost alway increase the
+anisotropic features, thus anisotropic refinement will almost always increase the
efficiency of computations on adapted grids for these cases.
diff --git a/deal.II/examples/step-30/step-30.cc b/deal.II/examples/step-30/step-30.cc
index bb1397c71c..0afa9f2543 100644
--- a/deal.II/examples/step-30/step-30.cc
+++ b/deal.II/examples/step-30/step-30.cc
@@ -106,7 +106,7 @@ namespace Step30
// in the left part of the domain at a velocity that matches the one coming
// in from the right. In the circular part the magnitude of the flow
// velocity is proportional to the distance from the origin. This is a
- // difference to step-12, where the magnitude was 1 evereywhere. the new
+ // difference to step-12, where the magnitude was 1 everywhere. the new
// definition leads to a linear variation of $\beta$ along each given face
// of a cell. On the other hand, the solution $u(x,y)$ is exactly the same
// as before.
@@ -385,7 +385,7 @@ namespace Step30
anisotropic(anisotropic),
// As beta is a linear function, we can choose the degree of the
// quadrature for which the resulting integration is correct. Thus, we
- // choose to use degree+1
gauss points, which enables us to
+ // choose to use degree+1
Gauss points, which enables us to
// integrate exactly polynomials of degree 2*degree+1
, enough
// for all the integrals we will perform in this program.
quadrature (degree+1),
@@ -507,7 +507,7 @@ namespace Step30
// Case b), we decide that there are finer cells as neighbors
// by asking the face, whether it has children. if so, then
// there must also be finer cells which are children or
- // farther offsprings of our neighbor.
+ // farther offspring of our neighbor.
if (face->has_children())
{
// We need to know, which of the neighbors faces points in
@@ -576,7 +576,7 @@ namespace Step30
(neighbor->level() < cell->level() &&
neighbor->index() == cell->index())))
{
- // Here we know, that the neigbor is not coarser so we
+ // Here we know, that the neighbor is not coarser so we
// can use the usual @p neighbor_of_neighbor
// function. However, we could also use the more
// general @p neighbor_face_no function.
@@ -684,9 +684,9 @@ namespace Step30
triangulation.execute_coarsening_and_refinement ();
}
- // Once an error indicator has been evaluated and the cells with largerst
+ // Once an error indicator has been evaluated and the cells with largest
// error are flagged for refinement we want to loop over the flagged cells
- // again to decide whether they need isotropic refinemnt or whether
+ // again to decide whether they need isotropic refinement or whether
// anisotropic refinement is more appropriate. This is the anisotropic jump
// indicator explained in the introduction.
template
@@ -706,7 +706,7 @@ namespace Step30
endc=dof_handler.end();
for (; cell!=endc; ++cell)
- // We only need to consider cells which are flaged for refinement.
+ // We only need to consider cells which are flagged for refinement.
if (cell->refine_flag_set())
{
Point jump;
@@ -724,7 +724,7 @@ namespace Step30
std::vector u (fe_v_face.n_quadrature_points);
std::vector u_neighbor (fe_v_face.n_quadrature_points);
- // The four cases of different neighbor relations senn in
+ // The four cases of different neighbor relations seen in
// the assembly routines are repeated much in the same way
// here.
if (face->has_children())
@@ -741,7 +741,7 @@ namespace Step30
// present subface...
typename DoFHandler::cell_iterator neighbor_child = cell->neighbor_child_on_subface(face_no,subface_no);
Assert (!neighbor_child->has_children(), ExcInternalError());
- // ... and reinit the respective FEFaceValues und
+ // ... and reinit the respective FEFaceValues and
// FESubFaceValues objects.
fe_v_subface.reinit (cell, face_no, subface_no);
fe_v_face_neighbor.reinit (neighbor_child, neighbor2);
@@ -749,7 +749,7 @@ namespace Step30
fe_v_subface.get_function_values(solution2, u);
fe_v_face_neighbor.get_function_values(solution2, u_neighbor);
// as well as the quadrature weights, multiplied by
- // the jacobian determinant.
+ // the Jacobian determinant.
const std::vector &JxW = fe_v_subface.get_JxW_values ();
// Now we loop over all quadrature points
for (unsigned int x=0; xdim components.
jump[face_no/2]+=std::fabs(u[x]-u_neighbor[x])*JxW[x];
// We also sum up the scaled weights to obtain
@@ -844,8 +844,8 @@ namespace Step30
// Now we loop over the dim
coordinate directions of
// the unit cell and compare the average jump over the faces
- // orthogional to that direction with the average jumnps over faces
- // orthogonal to the remining direction(s). If the first is larger
+ // orthogonal to that direction with the average jumps over faces
+ // orthogonal to the remaining direction(s). If the first is larger
// than the latter by a given factor, we refine only along hat
// axis. Otherwise we leave the refinement flag unchanged, resulting
// in isotropic refinement.
diff --git a/deal.II/examples/step-31/doc/intro.dox b/deal.II/examples/step-31/doc/intro.dox
index e14e4c87b5..9f11f04b81 100644
--- a/deal.II/examples/step-31/doc/intro.dox
+++ b/deal.II/examples/step-31/doc/intro.dox
@@ -77,7 +77,7 @@ sometimes, as in the case of rock minerals at temperatures close to their
melting point, $\eta$ may change by orders of magnitude over the typical range
of temperatures.
-We note that the Stokes equation above could be non-dimensionalized by
+We note that the Stokes equation above could be nondimensionalized by
introducing the Rayleigh
number $\mathrm{Ra}=\frac{\|g\| \beta \rho}{\eta \kappa} \delta T L^3$ using a
@@ -476,7 +476,7 @@ diffusion into the equations we would like to solve. Note that the
numerical viscosity $\nu(T)$ is temperature-dependent, so the equation
we want to solve is nonlinear in T — not what one desires from a
simple method to stabilize an equation, and even less so if we realize
-that $\nu(T)$ is non-differentiable in T. However, there is no
+that $\nu(T)$ is nondifferentiable in T. However, there is no
reason to despair: we still have to discretize in time and we can
treat the term explicitly.
@@ -551,7 +551,7 @@ artificial diffusion operates on the extrapolated
temperature at the current time in the same way as we have discussed
the advection works in the section on time stepping.
-The form for non-uniform time steps that we will have to use in
+The form for nonuniform time steps that we will have to use in
reality is a bit more complicated (which is why we showed the simpler
form above first) and reads:
@f{eqnarray*}
@@ -993,7 +993,7 @@ boundary, we will require no-normal flux for the velocity
introduction of step-22 and fixes one component of the velocity
while allowing flow to be %parallel to the boundary. There remain
dim-1
components to be fixed, namely the tangential components of
-the normal stress; for these, we choose homogenous conditions which means that
+the normal stress; for these, we choose homogeneous conditions which means that
we do not have to anything special. Initial conditions are only necessary for
the temperature field, and we choose it to be constant zero.
@@ -1005,6 +1005,6 @@ are allowed. We therefore choose three spherical heat sources unequally spaced
close to the bottom of the domain, imitating three candles. The fluid located
at these sources, initially at rest, is then heated up and as the temperature
rises gains buoyancy, rising up; more fluid is dragged up and through the
-sources, leading to three hote plumes that rise up until they are captured by
+sources, leading to three hot plumes that rise up until they are captured by
the recirculation of fluid that sinks down on the outside, replacing the air
that rises due to heating.
diff --git a/deal.II/examples/step-31/doc/results.dox b/deal.II/examples/step-31/doc/results.dox
index 55b0db1520..36fe1bc8c2 100644
--- a/deal.II/examples/step-31/doc/results.dox
+++ b/deal.II/examples/step-31/doc/results.dox
@@ -513,7 +513,7 @@ if we state the CFL condition as the requirement that the time step be
small enough so that the distance transport advects in each time step
is no longer than one grid point away (which for $Q_1$ elements
is $h_K$, but for $Q_2$ elements is $h_K/2$). It turns out that $\beta$
-needs to be sligthly larger for obtaining stable results also late in
+needs to be slightly larger for obtaining stable results also late in
the simulation at times larger than 60, so we actually choose it as
$\beta = 0.034$ in the code.
diff --git a/deal.II/examples/step-31/step-31.cc b/deal.II/examples/step-31/step-31.cc
index 0d33264c19..c40b9bd5a6 100644
--- a/deal.II/examples/step-31/step-31.cc
+++ b/deal.II/examples/step-31/step-31.cc
@@ -88,12 +88,12 @@ namespace Step31
// same as in step-22. Regarding the details, though, there are some
// differences.
- // The first thing is that we don't set any non-homogenous boundary
+ // The first thing is that we don't set any nonhomogeneous boundary
// conditions on the velocity, since as is explained in the introduction we
// will use no-flux conditions $\mathbf{n}\cdot\mathbf{u}=0$. So what is
// left are dim-1
conditions for the tangential part of the
// normal component of the stress tensor, $\textbf{n} \cdot [p \textbf{1} -
- // \eta\varepsilon(\textbf{u})]$; we assume homogenous values for these
+ // \eta\varepsilon(\textbf{u})]$; we assume homogeneous values for these
// components, i.e. a natural boundary condition that requires no specific
// action (it appears as a zero term in the right hand side of the weak
// form).
@@ -246,7 +246,7 @@ namespace Step31
//
// Secondly, we catch any exceptions that the solver may have thrown. The
// reason is as follows: When debugging a program like this one
- // occasionally makes a mistake of passing an indefinite or non-symmetric
+ // occasionally makes a mistake of passing an indefinite or nonsymmetric
// matrix or preconditioner to the current class. The solver will, in that
// case, not converge and throw a run-time exception. If not caught here
// it will propagate up the call stack and may end up in
@@ -764,7 +764,7 @@ namespace Step31
// detail in the results section of this tutorial program. The second is the
// exponent $\alpha$; $\alpha=1$ appears to work fine for the current
// program, even though some additional benefit might be expected from
- // chosing $\alpha = 2$. Finally, there is one thing that requires special
+ // choosing $\alpha = 2$. Finally, there is one thing that requires special
// casing: In the first time step, the velocity equals zero, and the formula
// for $\nu|_K$ is not defined. In that case, we return $\nu|_K=5\cdot 10^3
// \cdot h_K$, a choice admittedly more motivated by heuristics than
@@ -950,7 +950,7 @@ namespace Step31
// dofs. Consequently, there will be no data written at positions of
// constrained degrees of freedom, so we can let the
// DoFTools::make_sparsity_pattern function omit these entries by setting
- // the last boolean flag to false
. Once the sparsity pattern
+ // the last Boolean flag to false
. Once the sparsity pattern
// is ready, we can use it to initialize the Trilinos matrices. Since the
// Trilinos matrices store the sparsity pattern internally, there is no
// need to keep the sparsity pattern around after the initialization of
@@ -1190,15 +1190,15 @@ namespace Step31
// Next, we set some more options of the AMG preconditioner. In
// particular, we need to tell the AMG setup that we use quadratic basis
// functions for the velocity matrix (this implies more nonzero elements
- // in the matrix, so that a more rubust algorithm needs to be chosen
+ // in the matrix, so that a more robust algorithm needs to be chosen
// internally). Moreover, we want to be able to control how the coarsening
// structure is build up. The way the Trilinos smoothed aggregation AMG
// does this is to look which matrix entries are of similar size as the
// diagonal entry in order to algebraically build a coarse-grid
// structure. By setting the parameter aggregation_threshold
- // to 0.02, we specify that all entries that are more than two precent of
+ // to 0.02, we specify that all entries that are more than two percent of
// size of some diagonal pivots in that row should form one coarse grid
- // point. This parameter is rather ad-hoc, and some fine-tuning of it can
+ // point. This parameter is rather ad hoc, and some fine-tuning of it can
// influence the performance of the preconditioner. As a rule of thumb,
// larger values of aggregation_threshold
will decrease the
// number of iterations, but increase the costs per iteration. A look at
@@ -1208,7 +1208,7 @@ namespace Step31
//
// Finally, we also initialize the preconditioner for the inversion of the
// pressure mass matrix. This matrix is symmetric and well-behaved, so we
- // can chose a simple preconditioner. We stick with an incomple Cholesky
+ // can chose a simple preconditioner. We stick with an incomplete Cholesky
// (IC) factorization preconditioner, which is designed for symmetric
// matrices. We could have also chosen an SSOR preconditioner with
// relaxation factor around 1.2, but IC is cheaper for our example. We
@@ -1586,8 +1586,8 @@ namespace Step31
// Next comes the declaration of vectors to hold the old and older
// solution values (as a notation for time levels n-1 and
// n-2, respectively) and gradients at quadrature points of the
- // current cell. We also declarate an object to hold the temperature right
- // hande side values (gamma_values
), and we again use
+ // current cell. We also declare an object to hold the temperature right
+ // hand side values (gamma_values
), and we again use
// shortcuts for the temperature basis functions. Eventually, we need to
// find the temperature extrema and the diameter of the computational
// domain which will be used for the definition of the stabilization
@@ -1665,7 +1665,7 @@ namespace Step31
// according to the discussion in the introduction using the dedicated
// function. With that at hand, we can get into the loop over
// quadrature points and local rhs vector components. The terms here
- // are quite lenghty, but their definition follows the time-discrete
+ // are quite lengthy, but their definition follows the time-discrete
// system developed in the introduction of this program. The BDF-2
// scheme needs one more term from the old time step (and involves
// more complicated factors) than the backward Euler scheme that is
@@ -1824,7 +1824,7 @@ namespace Step31
// There is a snatch here. The formula contains a division by the maximum
// value of the velocity. However, at the start of the computation, we
// have a constant temperature field (we start with a constant
- // temperature, and it will be non-constant only after the first time step
+ // temperature, and it will be nonconstant only after the first time step
// during which the source acts). Constant temperature means that no
// buoyancy acts, and so the velocity is zero. Dividing by it will not
// likely lead to anything good.
@@ -2024,7 +2024,7 @@ namespace Step31
// SolutionTransfer class and we have to prepare the solution vectors that
// should be transferred to the new grid (we will lose the old grid once
// we have done the refinement so the transfer has to happen concurrently
- // with refinement). What we definetely need are the current and the old
+ // with refinement). What we definitely need are the current and the old
// temperature (BDF-2 time stepping requires two old solutions). Since the
// SolutionTransfer objects only support to transfer one object per dof
// handler, we need to collect the two temperature solutions in one data
diff --git a/deal.II/examples/step-32/doc/intro.dox b/deal.II/examples/step-32/doc/intro.dox
index 1620308066..da3dbb3875 100644
--- a/deal.II/examples/step-32/doc/intro.dox
+++ b/deal.II/examples/step-32/doc/intro.dox
@@ -237,7 +237,7 @@ So why is this an issue here, but not in step-31? The
reason back there is that everything was nicely balanced: velocities
were on the order of one, the pressure likewise, the viscosity was
one, and the domain had a diameter of $\sqrt{2}$. As a result, while
-non-sensical, nothing bad happened. On the other hand, as we will explain
+nonsensical, nothing bad happened. On the other hand, as we will explain
below, things here will not be that simply scaled: $\eta$ will be around
$10^{21}$, velocities on the order of $10^{-8}$, pressure around $10^8$, and
the diameter of the domain is $10^7$. In other words, the order of magnitude
@@ -293,7 +293,7 @@ $\frac{\eta}{L}$, and we will use this factor in the assembly of the system
matrix and preconditioner. Because it is annoying and error prone, we will
recover the unscaled pressure immediately following the solution of the linear
system, i.e., the solution vector's pressure component will immediately be
-un-scaled to retrieve the physical pressure. Since the solver uses the fact that
+unscaled to retrieve the physical pressure. Since the solver uses the fact that
we can use a good initial guess by extrapolating the previous solutions, we
also have to scale the pressure immediately before solving.
@@ -521,8 +521,8 @@ becomes unstable? Looking at the graphical output, we can see that
with the unreasonably coarse mesh chosen for these experiments, around
time $t=10^{15}$ seconds the plumes of hot material that have been
rising towards the cold outer boundary and have then spread sideways
-are starting to get close to each other, squezzing out the cold
-material inbetween. This creates a layer of cells into which fluids
+are starting to get close to each other, squeezing out the cold
+material in-between. This creates a layer of cells into which fluids
flows from two opposite sides and flows out toward a third, apparently
a scenario that then produce these instabilities without sufficient
stabilization. Second: In step-31, we used
@@ -845,7 +845,7 @@ convection in the earth mantle: for that, more and more difficult
physics would have to be implemented, and several other aspects are
currently missing from this program as well. We will come back to this
issue in the results section again, but state for now that providing a
-realistic description is a goal of the Apect code in
+realistic description is a goal of the Aspect code in
development at the time of writing this.
As a reminder, let us again state the equations we want to solve are these:
@@ -887,7 +887,7 @@ the following quantities:
entry). In Kelvin, we therefore choose $T_0=(4000+273)\text{K}$,
$T_1=(500+273)\text{K}$ as boundary conditions at the inner and outer edge.
- In addition to this, we also have to specifiy some initial conditions for
+ In addition to this, we also have to specify some initial conditions for
the temperature field. The real temperature field of the earth is quite
complicated as a consequence of the convection that has been going on for
more than four billion years -- in fact, it is the properties of this
@@ -1081,7 +1081,7 @@ the following quantities:
@f]
- There are two problems with this, however: (i) The Earth is not homogenous,
+ There are two problems with this, however: (i) The Earth is not homogeneous,
i.e. the density $\rho$ depends on $\mathbf x$; in fact it is not even a
function that only depends on the radius $r=\|\mathbf x\|$. In reality, gravity therefore
does not always decrease as we get deeper: because the earth core is so much
@@ -1152,7 +1152,7 @@ the following quantities:
changes as temperature and pressure change: depending on temperature and
pressure, different crystal configurations are thermodynamically favored
over others, even if the chemical composition of the mantle were
- homogenous. For example, the common mantle material MgSiO3 exists
+ homogeneous. For example, the common mantle material MgSiO3 exists
in its perovskite
structure throughout most of the mantle, but in the lower mantle the
@@ -1213,7 +1213,7 @@ Compared to step-31, this program has a number of noteworthy differences:
program just because one wants to play with a single parameter (think, for
example, of parameter studies determining the best values of the
stabilization constants discussed above), in particular given that it takes
- a non-trivial amount of time to re-compile programs of the current size. To
+ a nontrivial amount of time to re-compile programs of the current size. To
just give an overview of the kinds of parameters we have moved from fixed
values into the input file, here is a listing of a typical
\step-32.prm
file:
diff --git a/deal.II/examples/step-32/doc/results.dox b/deal.II/examples/step-32/doc/results.dox
index 000f778dcc..1bc42080b4 100644
--- a/deal.II/examples/step-32/doc/results.dox
+++ b/deal.II/examples/step-32/doc/results.dox
@@ -449,7 +449,7 @@ address to make the program more useful:
trivial. An additional complexity is introduced by the fact that one may
want to change the number of processors between runs, for example because
one may wish to continue computing on a mesh that is finer than the one used
- to pre-compute a starting temperature field at an intermediate time.
+ to precompute a starting temperature field at an intermediate time.
- Predictive postprocessing: The point of computations like this is
not simply to solve the equations. Rather, it is typically the exploration
diff --git a/deal.II/examples/step-32/step-32.cc b/deal.II/examples/step-32/step-32.cc
index e991399a3a..2e9dda22f7 100644
--- a/deal.II/examples/step-32/step-32.cc
+++ b/deal.II/examples/step-32/step-32.cc
@@ -105,7 +105,7 @@ namespace Step32
// In the following namespace, we define the various pieces of equation data
// that describe the problem. This corresponds to the various aspects of
- // making the problem at least slightly realistc and that were exhaustively
+ // making the problem at least slightly realistic and that were exhaustively
// discussed in the description of the testcase in the introduction.
//
// We start with a few coefficients that have constant values (the comment
@@ -229,7 +229,7 @@ namespace Step32
// block. The three code blocks of the
vmult
function implement
// the multiplications with the three blocks of this preconditioner matrix
// and should be self explanatory if you have read through step-31 or the
- // discussion of compositing solvers in step-20.
+ // discussion of composing solvers in step-20.
namespace LinearSolvers
{
template
@@ -2005,7 +2005,7 @@ namespace Step32
}
// Following this, we can compute constraints for the solution vectors,
- // including hanging node constraints and homogenous and inhomogenous
+ // including hanging node constraints and homogeneous and inhomogeneous
// boundary values for the Stokes and temperature fields. Note that as for
// everything else, the constraint objects can not hold all
// constraints on every processor. Rather, each processor needs to store
@@ -3242,7 +3242,7 @@ namespace Step32
// dof on a cell, the joint finite element knows to which equation component
// (velocity component, pressure, or temperature) it belongs – that's the
// information we need! So we step through all cells (with iterators into
- // all three DoFHandlers moving in synch), and for each joint cell dof, we
+ // all three DoFHandlers moving in sync), and for each joint cell dof, we
// read out that component using the FiniteElement::system_to_base_index
// function (see there for a description of what the various parts of its
// return value contain). We also need to keep track whether we're on a
diff --git a/deal.II/examples/step-33/doc/intro.dox b/deal.II/examples/step-33/doc/intro.dox
index acafeee5dc..c35476df7b 100644
--- a/deal.II/examples/step-33/doc/intro.dox
+++ b/deal.II/examples/step-33/doc/intro.dox
@@ -270,7 +270,7 @@ main() {
The output are the derivatives $\frac{\partial c(a,b)}{\partial a},
\frac{\partial c(a,b)}{\partial b}$ of $c(a,b)=2a+\cos(ab)$ at $a=1,b=2$.
-It should be noted that Sacado provides more auto-differentation capabilities than the small subset
+It should be noted that Sacado provides more auto-differentiation capabilities than the small subset
used in this program. However, understanding the example above is
enough to understand the use of Sacado in this Euler flow program.
@@ -285,7 +285,7 @@ on these vectors. Please view the commented code for more details on how these
within the example.
Adaptivity
-The example uses an ad-hoc refinement indicator that shows some usefulness in shock-type problems, and
+The example uses an ad hoc refinement indicator that shows some usefulness in shock-type problems, and
in the downhill flow example included. We refine according to the squared gradient of the density.
Hanging nodes are handled by computing the numerical flux across cells that are of differing
refinement levels, rather than using the ConstraintMatrix class as in
diff --git a/deal.II/examples/step-33/step-33.cc b/deal.II/examples/step-33/step-33.cc
index de46484541..0f7d82b82d 100644
--- a/deal.II/examples/step-33/step-33.cc
+++ b/deal.II/examples/step-33/step-33.cc
@@ -435,7 +435,7 @@ namespace Step33
{
// We prescribe the velocity (we are dealing with a particular
// component here so that the average of the velocities is
- // orthogonal to the surface normal. This creates sensitivies of
+ // orthogonal to the surface normal. This creates sensitivities of
// across the velocity components.
Sacado::Fad::DFad vdotn = 0;
for (unsigned int d = 0; d < dim; d++)
@@ -585,12 +585,12 @@ namespace Step33
// This is the only function worth commenting on. When generating graphical
// output, the DataOut and related classes will call this function on each
- // cell, with values, gradients, hessians, and normal vectors (in case we're
+ // cell, with values, gradients, Hessians, and normal vectors (in case we're
// working on faces) at each quadrature point. Note that the data at each
// quadrature point is itself vector-valued, namely the conserved
// variables. What we're going to do here is to compute the quantities we're
// interested in at each quadrature point. Note that for this we can ignore
- // the hessians ("dduh") and normal vectors; to avoid compiler warnings
+ // the Hessians ("dduh") and normal vectors; to avoid compiler warnings
// about unused variables, we comment out their names.
template
void
@@ -1039,7 +1039,7 @@ namespace Step33
// we allow up to max_n_boundaries
boundary indicators to be
// used in the input file, and each of these boundary indicators can be
// associated with an inflow, outflow, or pressure boundary condition,
- // with inhomogenous boundary conditions being specified for each
+ // with homogeneous boundary conditions being specified for each
// component and each boundary indicator separately.
//
// The data structure used to store the boundary indicators is a bit
@@ -1355,7 +1355,7 @@ namespace Step33
// This final set of member variables (except for the object holding all
// run-time parameters at the very bottom and a screen output stream that
// only prints something if verbose output has been requested) deals with
- // the inteface we have in this program to the Trilinos library that
+ // the interface we have in this program to the Trilinos library that
// provides us with linear solvers. Similarly to including PETSc matrices
// in step-17, step-18, and step-19, all we need to do is to create a
// Trilinos sparse matrix instead of the standard deal.II class. The
@@ -1702,7 +1702,7 @@ namespace Step33
// whereas all the other ones remain dependent functions. These are
// precisely the local degrees of freedom just extracted. All calculations
// that reference them (either directly or indirectly) will accumulate
- // sensitivies with respect to these variables.
+ // sensitivities with respect to these variables.
//
// In order to mark the variables as independent, the following does the
// trick, marking independent_local_dof_values[i]
as the
@@ -1801,7 +1801,7 @@ namespace Step33
// (\mathbf{z}_i)_{\text{component\_i}})_K$, where integrals are
// understood to be evaluated through summation over quadrature points.
//
- // We initialy sum all contributions of the residual in the positive
+ // We initially sum all contributions of the residual in the positive
// sense, so that we don't need to negative the Jacobian entries. Then,
// when we sum into the right_hand_side
vector, we negate
// this residual.
@@ -2014,7 +2014,7 @@ namespace Step33
// Now assemble the face term in exactly the same way as for the cell
// contributions in the previous function. The only difference is that if
// this is an internal face, we also have to take into account the
- // sensitivies of the residual contributions to the degrees of freedom on
+ // sensitivities of the residual contributions to the degrees of freedom on
// the neighboring cell:
std::vector residual_derivatives (dofs_per_cell);
for (unsigned int i=0; i
void ConservationLaw::output_results () const
{
diff --git a/deal.II/examples/step-34/doc/intro.dox b/deal.II/examples/step-34/doc/intro.dox
index c7dd04c28b..8aeaebc2c9 100644
--- a/deal.II/examples/step-34/doc/intro.dox
+++ b/deal.II/examples/step-34/doc/intro.dox
@@ -99,7 +99,7 @@ gradient of a scalar function:
\mathbf{\tilde{v}}=\nabla\phi,
\f]
and so the second part of Euler equations above can be rewritten
-as the homogenous Laplace equation for the unknown $\phi$:
+as the homogeneous Laplace equation for the unknown $\phi$:
\f{align*}
\label{laplace}
\Delta\phi &= 0 \qquad &&\text{in}\ \mathbb{R}^n\backslash\Omega,
@@ -612,7 +612,7 @@ iterative solver, without providing any preconditioner.
If this were a production code rather than a demonstration of principles,
there are techniques that are available to not store full matrices but instead
store only those entries that are large and/or relevant. In the literature on
-boundary element methods, a plethory of methods is available that allows to
+boundary element methods, a plethora of methods is available that allows to
determine which elements are important and which are not, leading to a
significantly sparser representation of these matrices that also facilitates
rapid evaluations of the scalar product between vectors and matrices. This not
diff --git a/deal.II/examples/step-34/doc/results.dox b/deal.II/examples/step-34/doc/results.dox
index 47e65a30d2..dec4c54823 100644
--- a/deal.II/examples/step-34/doc/results.dox
+++ b/deal.II/examples/step-34/doc/results.dox
@@ -200,7 +200,7 @@ like
while in three dimensions we show first the potential on the surface,
-together with a contur plot,
+together with a contour plot,
diff --git a/deal.II/examples/step-34/step-34.cc b/deal.II/examples/step-34/step-34.cc
index b3c7d1c3cf..af97f95038 100644
--- a/deal.II/examples/step-34/step-34.cc
+++ b/deal.II/examples/step-34/step-34.cc
@@ -302,7 +302,7 @@ namespace Step34
// @sect4{BEMProblem::BEMProblem and BEMProblem::read_parameters}
- // The constructor initializes the variuous object in much the same way as
+ // The constructor initializes the various object in much the same way as
// done in the finite element programs such as step-4 or step-6. The only
// new ingredient here is the ParsedFunction object, which needs, at
// construction time, the specification of the number of components.
@@ -609,7 +609,7 @@ namespace Step34
// the current cell, a deviation from the usual finite element
// integrals). The integral that we need to perform is singular if one
// of the local degrees of freedom is the same as the support point
- // $i$. A the beginning of the loop we therefore check wether this is
+ // $i$. A the beginning of the loop we therefore check whether this is
// the case, and we store which one is the singular index:
for (unsigned int i=0; idim solves are all taken care of and are
// scheduled to available processors: if your machine has more than one
diff --git a/deal.II/examples/step-36/doc/intro.dox b/deal.II/examples/step-36/doc/intro.dox
index 48d23ae0fd..c5e4aecf00 100644
--- a/deal.II/examples/step-36/doc/intro.dox
+++ b/deal.II/examples/step-36/doc/intro.dox
@@ -56,7 +56,7 @@ The basic equation of stationary quantum mechanics is the
Schrödinger equation which models the motion of particles in an
external potential $V(\mathbf x)$. The particle is described by a wave
function $\Psi(\mathbf x)$ that satisfies a relation of the
-(non-dimensionalized) form
+(nondimensionalized) form
@f{align*} [-\Delta + V(\mathbf x)]
\Psi(\mathbf x) &= \varepsilon \Psi(\mathbf x) \qquad &&\text{in}\
\Omega\quad, \\ \Psi &= 0 &&\text{on}\ \partial\Omega\quad.
@@ -105,7 +105,7 @@ longer has a single solution vector, but a whole set of vectors for
the various eigenfunctions we want to compute. Moreover, the
main
function, which has the top-level control over
everything here, initializes and finalizes the interface to SLEPc and
-PETSc simultaneouly via SlepcInitialize
and
+PETSc simultaneously via SlepcInitialize
and
SlepFinalize
.
- We use PETSc matrices and vectors as in step-17 and
diff --git a/deal.II/examples/step-36/doc/results.dox b/deal.II/examples/step-36/doc/results.dox
index 9684b72a4e..93eb53b18b 100644
--- a/deal.II/examples/step-36/doc/results.dox
+++ b/deal.II/examples/step-36/doc/results.dox
@@ -190,7 +190,7 @@ PETScWrappers and SLEPcWrappers and is suitable for running on serial
machine architecture. However, for larger grids and with a larger
number of degrees-of-freedom, we may want to run our application on
parallel architectures. A parallel implementation of the above code
-can be particularily useful here since the generalized eigenspectrum
+can be particularly useful here since the generalized eigenspectrum
problem is somewhat more expensive to solve than the standard problems
considered in most of the earlier tutorials. Fortunately, modifying the above
program to be MPI compliant is a relatively straightforward
diff --git a/deal.II/examples/step-37/doc/intro.dox b/deal.II/examples/step-37/doc/intro.dox
index 08add8cb9c..1cd4efa584 100644
--- a/deal.II/examples/step-37/doc/intro.dox
+++ b/deal.II/examples/step-37/doc/intro.dox
@@ -20,7 +20,7 @@ International Conference on e-Science, 2011.
This example shows how to implement a matrix-free method, that is, a
method that does not explicitly store the matrix elements, for a
second-order Poisson equation with variable coefficients on a
-hypercube. The eliptic equation will be solved with a multigrid
+hypercube. The elliptic equation will be solved with a multigrid
method.
The major motivation for matrix-free methods is the fact that today
@@ -280,7 +280,7 @@ v_\mathrm{cell}.
for (unsigned int q=0; q temp;
for (unsigned int d=0; d MatrixFree instance for the problem. Also, the
// coefficient is evaluated. For this, we need to activate the update flag
diff --git a/deal.II/examples/step-38/doc/intro.dox b/deal.II/examples/step-38/doc/intro.dox
index 0985bc59b7..8ff7796b1e 100644
--- a/deal.II/examples/step-38/doc/intro.dox
+++ b/deal.II/examples/step-38/doc/intro.dox
@@ -224,7 +224,7 @@ looks. In essence, we will simply declare an appropriate object of type
MappingQ that will automatically obtain the boundary description from the
Triangulation. The mapping object will then be passed to the appropriate
functions, and we will get a boundary description for half circles or half
-spheres that is pre-defined in the library.
+spheres that is predefined in the library.
The rest of the program follows closely step-4 and, as far as computing the
error, step-7. Some aspects of this program, in particular the use of two
diff --git a/deal.II/examples/step-38/doc/results.dox b/deal.II/examples/step-38/doc/results.dox
index b52ea38440..eb51b19fc8 100644
--- a/deal.II/examples/step-38/doc/results.dox
+++ b/deal.II/examples/step-38/doc/results.dox
@@ -144,11 +144,11 @@ void LaplaceBeltrami::make_grid_and_dofs ()
Note that the only essential addition has been the three lines marked with
asterisks. It is worth pointing out one other thing here, though: because we
-un-attach the manifold description from the surface mesh, whenever we use a
+detach the manifold description from the surface mesh, whenever we use a
mapping object in the rest of the program, it has no curves boundary
description to go on any more. Rather, it will have to use the implicit,
StraightBoundary class that is used on all parts of the boundary not
-explicitly assigned a different mannifold object. Consequently, whether we use
+explicitly assigned a different manifold object. Consequently, whether we use
MappingQ(2), MappingQ(15) or MappingQ1, each cell of our mesh will be mapped
using a bilinear approximation.
diff --git a/deal.II/examples/step-38/step-38.cc b/deal.II/examples/step-38/step-38.cc
index ac5c8a2c3d..00ca34117a 100644
--- a/deal.II/examples/step-38/step-38.cc
+++ b/deal.II/examples/step-38/step-38.cc
@@ -88,7 +88,7 @@ namespace Step38
// boundary of the domain -- this is what we did in step-11, for
// example. However, here we have a curved domain, not just a curved
// boundary, and while we can approximate it with bilinearly mapped cells,
- // it is really only prodent to use a higher order mapping for all
+ // it is really only prudent to use a higher order mapping for all
// cells. Consequently, this class has a member variable of type MappingQ;
// we will choose the polynomial degree of the mapping equal to the
// polynomial degree of the finite element used in the computations to
diff --git a/deal.II/examples/step-39/doc/results.dox b/deal.II/examples/step-39/doc/results.dox
index f0aa08444e..1e7de4afd1 100644
--- a/deal.II/examples/step-39/doc/results.dox
+++ b/deal.II/examples/step-39/doc/results.dox
@@ -1,7 +1,7 @@
Results
Logfile output
-First, the program produces the usual logfile here stored in deallog. It reads (with ommission of intermediate steps)
+First, the program produces the usual logfile here stored in deallog. It reads (with omission of intermediate steps)
@code
DEAL::Element: FE_DGQ<2>(3)
diff --git a/deal.II/examples/step-39/step-39.cc b/deal.II/examples/step-39/step-39.cc
index 61f2eba90e..18bb795f8c 100644
--- a/deal.II/examples/step-39/step-39.cc
+++ b/deal.II/examples/step-39/step-39.cc
@@ -113,7 +113,7 @@ namespace Step39
// ourselves. Similarly, we implement Nitsche boundary conditions and the
// interior penalty fluxes between cells.
//
- // The boundary und flux terms need a penalty parameter, which should be
+ // The boundary and flux terms need a penalty parameter, which should be
// adjusted to the cell size and the polynomial degree. A safe choice of
// this parameter for constant coefficients can be found in
// LocalIntegrators::Laplace::compute_penalty() and we use this below.
@@ -314,7 +314,7 @@ namespace Step39
};
// Here we have the integration on cells. There is currently no good
- // interfce in MeshWorker that would allow us to access values of regular
+ // interface in MeshWorker that would allow us to access values of regular
// functions in the quadrature points. Thus, we have to create the vectors
// for the exact function's values and gradients inside the cell
// integrator. After that, everything is as before and we just add up the
@@ -521,7 +521,7 @@ namespace Step39
mg_matrix_dg_down.clear();
// It is important to update the sparsity patterns after clear()
// was called for the level matrices, since the matrices lock the sparsity
- // pattern through the Smartpointer ans Subscriptor mechanism.
+ // pattern through the Smartpointer and Subscriptor mechanism.
mg_sparsity.resize(0, n_levels-1);
mg_sparsity_dg_interface.resize(0, n_levels-1);
@@ -650,7 +650,7 @@ namespace Step39
MeshWorker::DoFInfo dof_info(dof_handler);
- // Since this assembler alows us to fill several vectors, the interface is
+ // Since this assembler allows us to fill several vectors, the interface is
// a little more complicated as above. The pointers to the vectors have to
// be stored in a NamedData object. While this seems to cause two extra
// lines of code here, it actually comes handy in more complex
@@ -765,7 +765,7 @@ namespace Step39
const unsigned int n_gauss_points = dof_handler.get_fe().tensor_degree()+1;
info_box.initialize_gauss_quadrature(n_gauss_points, n_gauss_points+1, n_gauss_points);
- // but now we need to notify the info box of the finite element functio we
+ // but now we need to notify the info box of the finite element function we
// want to evaluate in the quadrature points. First, we create a NamedData
// object with this vector, which is the solution we just computed.
NamedData* > solution_data;
@@ -773,7 +773,7 @@ namespace Step39
// Then, we tell the Meshworker::VectorSelector for cells, that we need
// the second derivatives of this solution (to compute the
- // Laplacian). Therefore, the boolean arguments selecting function values
+ // Laplacian). Therefore, the Boolean arguments selecting function values
// and first derivatives a false, only the last one selecting second
// derivatives is true.
info_box.cell_selector.add("solution", false, false, true);
diff --git a/deal.II/examples/step-4/doc/intro.dox b/deal.II/examples/step-4/doc/intro.dox
index 83df40d5c2..2b2467a383 100644
--- a/deal.II/examples/step-4/doc/intro.dox
+++ b/deal.II/examples/step-4/doc/intro.dox
@@ -20,7 +20,7 @@ a look at the code below - there are almost no distinctions between 2d
and 3d!). We would have to write the same function twice, once
working on Triangulation2d
and once working with a
Triangulation3d
. This is an unnecessary obstacle in
-programming and leads to a nuisance to keep the two function in synch
+programming and leads to a nuisance to keep the two function in sync
(at best) or difficult to find errors if the two versions get out of
sync (at worst; this would probably the more common case).
diff --git a/deal.II/examples/step-4/doc/results.dox b/deal.II/examples/step-4/doc/results.dox
index 4c7bf53bce..2a4402097f 100644
--- a/deal.II/examples/step-4/doc/results.dox
+++ b/deal.II/examples/step-4/doc/results.dox
@@ -64,7 +64,7 @@ the domain. This is nice, but it has the drawback that it completely hides
what is happening on the inside. The picture on the right is an attempt at
visualizing the interior as well, by showing surfaces where the solution has
constant values (as indicated by the legend at the top left). Isosurface
-pictures look best if one makes the individual surfaces slightly transparaent
+pictures look best if one makes the individual surfaces slightly transparent
so that it is possible to see through them and see what's behind.
diff --git a/deal.II/examples/step-4/step-4.cc b/deal.II/examples/step-4/step-4.cc
index d8e92fa132..d6ed24c0e9 100644
--- a/deal.II/examples/step-4/step-4.cc
+++ b/deal.II/examples/step-4/step-4.cc
@@ -164,7 +164,7 @@ public:
// formulas above right away.
//
// The last thing to note is that a Point@
denotes a point
-// in dim-dimensionsal space, and its individual components (i.e. $x$, $y$,
+// in dim-dimensional space, and its individual components (i.e. $x$, $y$,
// ... coordinates) can be accessed using the () operator (in fact, the []
// operator will work just as well) with indices starting at zero as usual in
// C and C++.
@@ -456,7 +456,7 @@ void Step4::solve ()
// data_out.write_gnuplot
call by
// data_out.write_vtk
.
//
-// Since the program will run both 2d and 3d versions of the laplace solver,
+// Since the program will run both 2d and 3d versions of the Laplace solver,
// we use the dimension in the filename to generate distinct filenames for
// each run (in a better program, one would check whether dim
can
// have other values than 2 or 3, but we neglect this here for the sake of
diff --git a/deal.II/examples/step-40/doc/results.dox b/deal.II/examples/step-40/doc/results.dox
index f43355fce5..8b62cec1e7 100644
--- a/deal.II/examples/step-40/doc/results.dox
+++ b/deal.II/examples/step-40/doc/results.dox
@@ -126,7 +126,7 @@ So how large are the largest problems one can solve? At the time of writing
this problem, the
limiting factor is that the program uses the BoomerAMG algebraic
multigrid method from the hypre package as
+href="http://acts.nersc.gov/hypre/" target="_top">Hypre package as
a preconditioner, which unfortunately uses signed 32-bit integers to
index the elements of a %distributed matrix. This limits the size of
problems to $2^31-1=2,147,483,647$ degrees of freedom. From the graphs
diff --git a/deal.II/examples/step-40/step-40.cc b/deal.II/examples/step-40/step-40.cc
index 979f6b060b..d02184477e 100644
--- a/deal.II/examples/step-40/step-40.cc
+++ b/deal.II/examples/step-40/step-40.cc
@@ -238,7 +238,7 @@ namespace Step40
dof_handler.distribute_dofs (fe);
- // The next two lines extract some informatino we will need later on,
+ // The next two lines extract some information we will need later on,
// namely two index sets that provide information about which degrees of
// freedom are owned by the current processor (this information will be
// used to initialize solution and right hand side vectors, and the system
@@ -544,7 +544,7 @@ namespace Step40
// that stores, for each cell, the subdomain the cell belongs to. This is
// slightly tricky, because of course not every processor knows about every
// cell. The vector we attach therefore has an entry for every cell that the
- // current processor has in its mesh (locally owned onces, ghost cells, and
+ // current processor has in its mesh (locally owned ones, ghost cells, and
// artificial cells), but the DataOut class will ignore all entries that
// correspond to cells that are not owned by the current processor. As a
// consequence, it doesn't actually matter what values we write into these
@@ -672,12 +672,12 @@ namespace Step40
// The final function, main()
, again has the same structure as in
// all other programs, in particular step-6. Like in the other programs that
-// use PETSc, we have to inialize and finalize PETSc, which is done using the
+// use PETSc, we have to initialize and finalize PETSc, which is done using the
// helper object MPI_InitFinalize.
//
// Note how we enclose the use the use of the LaplaceProblem class in a pair
// of braces. This makes sure that all member variables of the object are
-// destroyed by the time we destroy the mpi_intialization object. Not doing
+// destroyed by the time we destroy the mpi_initialization object. Not doing
// this will lead to strange and hard to debug errors when
// PetscFinalize
first deletes all PETSc vectors that are still
// around, and the destructor of the LaplaceProblem class then tries to delete
diff --git a/deal.II/examples/step-41/doc/intro.dox b/deal.II/examples/step-41/doc/intro.dox
index f98bbc5674..84afbba1f6 100644
--- a/deal.II/examples/step-41/doc/intro.dox
+++ b/deal.II/examples/step-41/doc/intro.dox
@@ -21,7 +21,7 @@ the current example program, we will consider that under the membrane is a
stair step obstacle against which gravity pushes the membrane.
This problem is typically called the "obstacle problem" (see also this wikipedia article), and it results in a
+href="http://en.wikipedia.org/wiki/Obstacle_problem">this Wikipedia article), and it results in a
variational inequality, rather than a variational equation when put into the
weak form. We will below derive it from the classical formulation, but before we
go on to discuss the mathematics let us show how the solution of the problem we
@@ -98,7 +98,7 @@ We have to find a solution $u\in G$ of the following minimization problem:
@f{equation*}
E(u)\leq E(v)\quad \forall v\in G,
@f}
-with the convex set of admissble displacements:
+with the convex set of admissible displacements:
@f{equation*}
G:=\lbrace v\in V: v\geq g \text{ a.e. in } \Omega\rbrace,\quad V:=H^1_0(\Omega).
@f}
@@ -132,7 +132,7 @@ sides are in fact equal, i.e., we obtain a variational equality.
On the other hand, if $u=g$ then $G$ only allows test functions $v$ so that in fact
$v-u\ge 0$. This means that we can't test the equation with both $v-u$ and
$-(v-u)$ as above, and so we can no longer conclude that the two sides are in
-fact equal. Thus, this mimicks the way we have discussed the complementarity
+fact equal. Thus, this mimics the way we have discussed the complementarity
condition above.
@@ -222,7 +222,7 @@ penalty parameter which depends on the problem itself and needs to be chosen
large enough; for example there is no convergence for $c = 1$ using the
current program if we use 7 global refinements.)
-After some headscratching one can then convince oneself that the inequalities
+After some head-scratching one can then convince oneself that the inequalities
above can equivalently be rewritten as
@f{equation*}
C([BU]_i,\Lambda_i) = 0, \qquad \forall i\in{\cal S}.
diff --git a/deal.II/examples/step-41/step-41.cc b/deal.II/examples/step-41/step-41.cc
index b177c92a20..4df6507cb8 100644
--- a/deal.II/examples/step-41/step-41.cc
+++ b/deal.II/examples/step-41/step-41.cc
@@ -494,7 +494,7 @@ namespace Step41
// function given above and in the introduction.
//
// If we decide that the DoF should be part of the active set, we
- // add its index to the active set, introduce a nonhomogeneous
+ // add its index to the active set, introduce an inhomogeneous
// equality constraint in the ConstraintMatrix object, and reset the
// solution value to the height of the obstacle. Finally, the
// residual of the non-contact part of the system serves as an
@@ -576,7 +576,7 @@ namespace Step41
// @sect4{ObstacleProblem::output_results}
// We use the vtk-format for the output. The file contains the displacement
- // and a numerical represenation of the active set. The function looks
+ // and a numerical representation of the active set. The function looks
// standard but note that we can add an IndexSet object to the DataOut
// object in exactly the same way as a regular solution vector: it is simply
// interpreted as a function that is either zero (when a degree of freedom
diff --git a/deal.II/examples/step-42/doc/intro-step-42.tex b/deal.II/examples/step-42/doc/intro-step-42.tex
index 7f2e577d33..06b2820743 100644
--- a/deal.II/examples/step-42/doc/intro-step-42.tex
+++ b/deal.II/examples/step-42/doc/intro-step-42.tex
@@ -58,15 +58,15 @@ Furthermore we have to distinguish two cases.\\
The continuous and convex function $\mathcal{F}$ denotes the von Mises flow function
$$\mathcal{F}(\tau) = \vert\tau^D\vert - \sigma_0¸\quad\text{with}\quad \tau^D
= \tau - \dfrac{1}{3}tr(\tau)I,$$
-$\sigma_0$ as yield stress and $\vert .\vert$ as the frobenius norm. If there
+$\sigma_0$ as yield stress and $\vert .\vert$ as the Frobenius norm. If there
are no plastic deformations in a particular point - that is $\lambda=0$ - this yields $\vert\sigma^D\vert <
\sigma_0$ and otherwise if $\lambda > 0$ it follows that $\vert\sigma^D\vert = \sigma_0$.
That means if the stress is smaller than the yield stress there are only elastic
deformations in that point.\\
To consider it the other way around if the deviator stress $\sigma^D$ is in a
norm bigger than the yield stress then $\sigma^D$ has to be projected back to the yield surface and there are plastic deformations which means $\lambda$
-would be positiv for that particular point. We refer that the stresses are
-computed by Hooke's law for isotorpic materials. You can find the description at the end of section 3. Else if the norm of the deviator stress tensor is smaller or equal the yield stress then $\lambda$ is zero and there are no plastic deformations in
+would be positive for that particular point. We refer that the stresses are
+computed by Hooke's law for isotropic materials. You can find the description at the end of section 3. Else if the norm of the deviator stress tensor is smaller or equal the yield stress then $\lambda$ is zero and there are no plastic deformations in
that point.\\
There the index $D$ denotes the deviator part of for example the stress where
$tr(.)$ is the trace of a tensor. The definition shows an additive decomposition
@@ -77,7 +77,7 @@ The third equation is called equilibrium condition with a force of volume
density $f$ which we will neglect in our example.
The boundary of $\Omega$ separates as follows $\Gamma=\Gamma_D\bigcup\Gamma_C$ and $\Gamma_D\bigcap\Gamma_C=\emptyset$.
At the boundary $\Gamma_D$ we have zero Dirichlet conditions. $\Gamma_C$ denotes the potential contact boundary.\\
-The last two lines decribe the so-called Signorini contact conditions. If there is no contact the normal stress
+The last two lines describe the so-called Signorini contact conditions. If there is no contact the normal stress
$$ \sigma_n = \sigma n\cdot n$$
is zero with the outward normal $n$. If there is contact ($u_n = g$) the tangential stress $\sigma_t = \sigma\cdot n - \sigma_n n$
vanishes, because we consider a frictionless situation and the normal stress is
@@ -95,10 +95,10 @@ $$W^{\textrm{div}}:=\lbrace \tau\in
L^2(\Omega,\mathbb{R}^{\textrm{dim}\times\textrm{dim}}_{\textrm{sym}}):\textrm{div}(\tau)\in L^2(\Omega,\mathbb{R}^{\textrm{dim}})\rbrace$$ and
$$\Pi \Sigma:=\lbrace \tau\in \Sigma, \mathcal{F}(\tau)\leq 0\rbrace$$
as the set of admissible stresses which is defined
-by a continious, convex flow function $\mathcal{F}$.
+by a continuous, convex flow function $\mathcal{F}$.
With the goal of deriving the dual formulation of the minimisation
-problem, we define a lagrange function:
+problem, we define a Lagrange function:
$$L(\tau,\varphi) := E(\tau) + (\varphi, \textrm{div}(\tau)),\quad \lbrace\tau,\varphi\rbrace\in\Pi W^{\textrm{div}}\times V^+$$
with
$$V^+ := \lbrace u\in V: u_n\leq g \text{ on } \Gamma_C \rbrace$$
@@ -220,7 +220,7 @@ tensor)},$$
$$\tau^D := C\varepsilon^D(u^i).$$
Remark that $a(.;.)$ is not differentiable in the common sense but it is
slantly differentiable like the function for the contact problem and again we refer to
-Hintermueller, Ito, Kunisch: The primal-dual active set strategy as a semismooth newton method, SIAM J. OPTIM., 2003, Vol. 13, No. 3, pp. 865-888.
+Hintermueller, Ito, Kunisch: The primal-dual active set strategy as a semismooth Newton method, SIAM J. OPTIM., 2003, Vol. 13, No. 3, pp. 865-888.
Again the first case is for elastic and the second for plastic deformation.
\section{Formulation as a saddle point problem}
@@ -271,7 +271,7 @@ The linearized problem is essentially like a pure elastic problem with contact l
in step-41. The only difference consists in the fact that the contact area
is at the boundary instead of in the domain. But this has no further consequence
so that we refer to the documentation of step-41 with the only hint that
-$\mathcal{S}$ containts all the vertices at the contact boundary $\Gamma_C$ this
+$\mathcal{S}$ contains all the vertices at the contact boundary $\Gamma_C$ this
time.
\section{The primal-dual active set algorithm combined with the inexact semi smooth
@@ -284,7 +284,7 @@ method for the contact. It sums up the results of the sections before and works
\item[(0)] Initialize $\mathcal{A}_k$ and $\mathcal{F}_k$, such that
$\mathcal{S} = \mathcal{A}_k \cup \mathcal{F}_k$ and $\mathcal{A}_k \cap
\mathcal{F}_k = \emptyset$ and set $k = 1$. The start value $\hat U^0 :=
- P_{\mathcal{A}_k}(0)$ fullfills our obstacle condition.
+ P_{\mathcal{A}_k}(0)$ fulfills our obstacle condition.
\item[(1)] Assemble the Newton matrix $A := a'(\hat
U^{k-1};\varphi_p,\varphi_q)$ and the right-hand-side $F(\hat U^{k-1})$.
\item[(2)] Find the primal-dual pair $(\bar U^k,\Lambda^k)$ that satisfies
@@ -321,10 +321,10 @@ g_{h,p}, & \textrm{if}\quad
p\in\mathcal{A}.
\end{cases}$$\\
The matrix $B\in\mathbb{R}^{n\times m}$, $n>m$ describes the coupling of the
-bases for the displacements and lagrange multiplier (contact forces)
+bases for the displacements and Lagrange multiplier (contact forces)
and it is not quadratic in our situation since $\Lambda^k$ is only defined on
$\Gamma_C$. Due to the ansatz functions $\psi_i$ (scalar valued) of the
-lagrange multiplier are fullfilling the following biorthogonal condition (see Hüeber, Wohlmuth: A primal–dual active
+Lagrange multiplier are fulfilling the following biorthogonal condition (see Hüeber, Wohlmuth: A primal–dual active
set strategy for non-linear multibody contact problems, Comput. Methods Appl. Mech. Engrg.
194, 2005, pp. 3147-3166)
$$ \int\limits_{\Gamma_C}\psi_i(x)\varphi_j(x)dx =
@@ -335,8 +335,8 @@ $$B_{pq} = \begin{cases}
p,q\in\mathcal{S}\\
0I_3, & \text{if}\quad p\neq q,\quad p\textrm{ or }q\notin\mathcal{S}.
\end{cases}$$
-Here $I_3$ denotes the threedimensional identity matrix.
-In our programm we use the structure of a quadratic sparse for
+Here $I_3$ denotes the three-dimensional identity matrix.
+In our program we use the structure of a quadratic sparse for
$B\in\mathbb{R}^{n\times n}$ and for $\Lambda^k$ a vector with length $n$ where
$\Lambda^k_p = 0$ for $p\notin \mathcal{S}$.
The vector $G$ is defined by a suitable approximation $g_h$ of the gap $g$
@@ -344,8 +344,8 @@ $$G_p = \begin{cases}
g_{h,p}\int\limits_{\Gamma_C}\varphi_p(x)dx, & \text{if}\quad p\in\mathcal{S}\\
0, & \text{if}\quad p\notin\mathcal{S}.
\end{cases}$$\\
-Note that $G_p$ is a threedimensional vector and that again we applied the
-biorthogonal property of the lagrange multiplier ansatz functions to the
+Note that $G_p$ is a three-dimensional vector and that again we applied the
+biorthogonal property of the Lagrange multiplier ansatz functions to the
integral $\int\limits_{\Gamma_C}g_h(x)\varphi_p(x)dx$ with $g_h(x)=\sum\limits_i
g_{h,p}\varphi_p(x)$ (see the reference mentioned above).\\
Compared to step-41, step (1) is added but it should be clear from the sections
@@ -377,19 +377,19 @@ results it yields a quite reasonable adaptive mesh for the contact zone.
\section{Implementation}
-This tutorial is essentailly a mixture of step-40 and step-41 but instead of
+This tutorial is essentially a mixture of step-40 and step-41 but instead of
PETSc we let the Trilinos library deal with parallelizing the linear algebra
(like in step-32). Since we are trying to solve a similar problem like in
step-41 we will use the same methods but now in parallel.
Another difficulty is the handling of the different constraints from
-(the dirichlet conditons), the hanging nodes and the inequality condition that
+(the Dirichlet conditions), the hanging nodes and the inequality condition that
arises from the contact. For this purpose we create three objects of type
ConstraintMatrix.
Beside the ConstitutiveLaw class there is another new class called Input. This
class allows us to read in an obstacle from a file. In our example the file
-'obstacle\_file.dat' containts data which describe an Chinese, Japanese or
+'obstacle\_file.dat' contains data which describe an Chinese, Japanese or
Korean symbol for force or power. (See www.orientaloutpost.com/: ``This word can be used for motivation - it
can also mean power / motion / propulsion / force. It can be anything
internal or external that keeps you going. This is the safest way to express
diff --git a/deal.II/examples/step-42/doc/intro.dox b/deal.II/examples/step-42/doc/intro.dox
index e968148ad2..e1bd59cf9e 100644
--- a/deal.II/examples/step-42/doc/intro.dox
+++ b/deal.II/examples/step-42/doc/intro.dox
@@ -65,8 +65,8 @@ deformations in that point.
To consider it the other way around if the deviator stress $\sigma^D$ is in a
norm bigger than the yield stress then $\sigma^D$ has to be projected back to the yield surface and there are plastic deformations which means $\lambda$
-would be positiv for that particular point. We refer that the stresses are
-computed by Hooke's law for isotorpic materials. You can find the description at the end of section 3. Else if the norm of the deviator stress tensor is smaller or equal the yield stress then $\lambda$ is zero and there are no plastic deformations in
+would be positive for that particular point. We refer that the stresses are
+computed by Hooke's law for isotropic materials. You can find the description at the end of section 3. Else if the norm of the deviator stress tensor is smaller or equal the yield stress then $\lambda$ is zero and there are no plastic deformations in
that point.
There the index $D$ denotes the deviator part of for example the stress where
@@ -80,7 +80,7 @@ density $f$ which we will neglect in our example.
The boundary of $\Omega$ separates as follows $\Gamma=\Gamma_D\bigcup\Gamma_C$ and $\Gamma_D\bigcap\Gamma_C=\emptyset$.
At the boundary $\Gamma_D$ we have zero Dirichlet conditions. $\Gamma_C$ denotes the potential contact boundary.
-The last two lines decribe the so-called Signorini contact conditions. If there is no contact the normal stress
+The last two lines describe the so-called Signorini contact conditions. If there is no contact the normal stress
@f{gather*} \sigma_n = \sigma n\cdot n@f}
is zero with the outward normal $n$. If there is contact ($u_n = g$) the tangential stress $\sigma_t = \sigma\cdot n - \sigma_n n$
vanishes, because we consider a frictionless situation and the normal stress is
@@ -98,10 +98,10 @@ with
L^2(\Omega,\mathbb{R}^{\textrm{dim}\times\textrm{dim}}_{\textrm{sym}}):\textrm{div}(\tau)\in L^2(\Omega,\mathbb{R}^{\textrm{dim}})\rbrace@f} and
@f{gather*}\Pi \Sigma:=\lbrace \tau\in \Sigma, \mathcal{F}(\tau)\leq 0\rbrace@f}
as the set of admissible stresses which is defined
-by a continious, convex flow function $\mathcal{F}$.
+by a continuous, convex flow function $\mathcal{F}$.
With the goal of deriving the dual formulation of the minimisation
-problem, we define a lagrange function:
+problem, we define a Lagrange function:
@f{gather*}L(\tau,\varphi) := E(\tau) + (\varphi, \textrm{div}(\tau)),\quad \lbrace\tau,\varphi\rbrace\in\Pi W^{\textrm{div}}\times V^+@f}
with
@f{gather*}V^+ := \lbrace u\in V: u_n\leq g \text{ on } \Gamma_C \rbrace@f}
@@ -233,7 +233,7 @@ tensor)},@f}
@f{gather*}\tau^D := C\varepsilon^D(u^i).@f}
Remark that $a(.;.)$ is not differentiable in the common sense but it is
slantly differentiable like the function for the contact problem and again we refer to
-Hintermueller, Ito, Kunisch: The primal-dual active set strategy as a semismooth newton method, SIAM J. OPTIM., 2003, Vol. 13, No. 3, pp. 865-888.
+Hintermueller, Ito, Kunisch: The primal-dual active set strategy as a semismooth Newton method, SIAM J. OPTIM., 2003, Vol. 13, No. 3, pp. 865-888.
Again the first case is for elastic and the second for plastic deformation.
@@ -285,7 +285,7 @@ The linearized problem is essentially like a pure elastic problem with contact l
in step-41. The only difference consists in the fact that the contact area
is at the boundary instead of in the domain. But this has no further consequence
so that we refer to the documentation of step-41 with the only hint that
-$\mathcal{S}$ containts all the vertices at the contact boundary $\Gamma_C$ this
+$\mathcal{S}$ contains all the vertices at the contact boundary $\Gamma_C$ this
time.
@@ -299,7 +299,7 @@ method for the contact. It sums up the results of the sections before and works
- Initialize $\mathcal{A}_k$ and $\mathcal{F}_k$, such that
$\mathcal{S} = \mathcal{A}_k \cup \mathcal{F}_k$ and $\mathcal{A}_k \cap
\mathcal{F}_k = \emptyset$ and set $k = 1$. The start value $\hat U^0 :=
- P_{\mathcal{A}_k}(0)$ fullfills our obstacle condition.
+ P_{\mathcal{A}_k}(0)$ fulfills our obstacle condition.
- Assemble the Newton matrix $A := a'(\hat
U^{k-1};\varphi_p,\varphi_q)$ and the right-hand-side $F(\hat U^{k-1})$.
- Find the primal-dual pair $(\bar U^k,\Lambda^k)$ that satisfies
@@ -339,10 +339,10 @@ g_{h,p}, & \textrm{if}\quad
p\in\mathcal{A}.
\end{cases}@f}\\
The matrix $B\in\mathbb{R}^{n\times m}$, $n>m$ describes the coupling of the
-bases for the displacements and lagrange multiplier (contact forces)
+bases for the displacements and Lagrange multiplier (contact forces)
and it is not quadratic in our situation since $\Lambda^k$ is only defined on
$\Gamma_C$. Due to the ansatz functions $\psi_i$ (scalar valued) of the
-lagrange multiplier are fullfilling the following biorthogonal condition (see Hüeber, Wohlmuth: A primal–dual active
+Lagrange multiplier are fulfilling the following biorthogonal condition (see Hüeber, Wohlmuth: A primal–dual active
set strategy for non-linear multibody contact problems, Comput. Methods Appl. Mech. Engrg.
194, 2005, pp. 3147-3166)
@f{gather} \int\limits_{\Gamma_C}\psi_i(x)\varphi_j(x)dx =
@@ -353,8 +353,8 @@ this yields
p,q\in\mathcal{S}\\
0I_3, & \text{if}\quad p\neq q,\quad p\textrm{ or }q\notin\mathcal{S}.
\end{cases}@f}
-Here $I_3$ denotes the threedimensional identity matrix.
-In our programm we use the structure of a quadratic sparse for
+Here $I_3$ denotes the three-dimensional identity matrix.
+In our program we use the structure of a quadratic sparse for
$B\in\mathbb{R}^{n\times n}$ and for $\Lambda^k$ a vector with length $n$ where
$\Lambda^k_p = 0$ for $p\notin \mathcal{S}$.
The vector $G$ is defined by a suitable approximation $g_h$ of the gap $g$
@@ -363,8 +363,8 @@ g_{h,p}\int\limits_{\Gamma_C}\varphi_p(x)dx, & \text{if}\quad p\in\mathcal{S}\\
0, & \text{if}\quad p\notin\mathcal{S}.
\end{cases}@f}
-Note that $G_p$ is a threedimensional vector and that again we applied the
-biorthogonal property of the lagrange multiplier ansatz functions to the
+Note that $G_p$ is a three-dimensional vector and that again we applied the
+biorthogonal property of the Lagrange multiplier ansatz functions to the
integral $\int\limits_{\Gamma_C}g_h(x)\varphi_p(x)dx$ with $g_h(x)=\sum\limits_i
g_{h,p}\varphi_p(x)$ (see the reference mentioned above).
@@ -410,13 +410,13 @@ PETSc we let the Trilinos library deal with parallelizing the linear algebra
step-41 we will use the same methods but now in parallel.
Another difficulty is the handling of the different constraints from
-(the dirichlet conditons), the hanging nodes and the inequality condition that
+(the Dirichlet conditions), the hanging nodes and the inequality condition that
arises from the contact. For this purpose we create three objects of type
ConstraintMatrix.
Beside the ConstitutiveLaw class there is another new class called Input. This
class allows us to read in an obstacle from a file. In our example the file
-'obstacle_file.dat' containts data which describe an Chinese, Japanese or
+'obstacle_file.dat' contains data which describe an Chinese, Japanese or
Korean symbol for force or power. (See http://www.orientaloutpost.com/ :
"This word can be used for motivation - it
can also mean power/motion/propulsion/force. It can be anything
diff --git a/deal.II/examples/step-42/doc/results.dox b/deal.II/examples/step-42/doc/results.dox
index 11104ccc74..b2ad56df80 100644
--- a/deal.II/examples/step-42/doc/results.dox
+++ b/deal.II/examples/step-42/doc/results.dox
@@ -347,7 +347,7 @@ Cycle 5:
For each adaptive refinement cycle the iterations end if the active set doesn't
change anymore and if the residual is accurate enough. In the tables
-at the end of each cycle you find informations about computing time and the
+at the end of each cycle you find information about computing time and the
number of calls of different parts of the program like Assembling or calculating
the residual. In the second cycle there are 12 calls for the residual function
and only 10 calls for Solving or Assembling what means that two damping steps were
@@ -359,7 +359,7 @@ we used a pure elastic solution). That worked out well for the ball but for the
Chinese symbol as obstacle it turns out that an elastic start solution for each
cycle results in fewer Newton iterations.
-In every refinement step you can observe that the acitve set - the contact points -
+In every refinement step you can observe that the active set - the contact points -
are iterated out at first. After that the Newton method has only to struggle with the
plasticity. For the finer meshes there is quadratic convergence not until the
last 4 or 5 Newton iterations.
@@ -382,7 +382,7 @@ last 4 or 5 Newton iterations.
The picture shows the adaptive refinement and as well how much a cell is
-plastified druing the contact with the ball. Remember that we consider the
+plastified during the contact with the ball. Remember that we consider the
norm of the deviator part of the stress in each quadrature point to
see if there is elastic or plastic behavior. In the middle of the top -
where the mesh is finest - you can see the hollow caused by the ball. The blue
@@ -399,7 +399,7 @@ Problems, preprint.
Possibilities for extensions
Extend the program from a static to a quasi-static problem, perhaps by choosing a
-backward-euler-scheme for the time discretization (for theoretical results see Frohne: FEM-Simulation
+backward-Euler-scheme for the time discretization (for theoretical results see Frohne: FEM-Simulation
der Umformtechnik metallischer Oberflächen im Mikrokosmos, Ph.D. thesis,
University of Siegen, Germany, 2011).
diff --git a/deal.II/examples/step-42/step-42.cc b/deal.II/examples/step-42/step-42.cc
index 30a8f36706..f9445dd9de 100644
--- a/deal.II/examples/step-42/step-42.cc
+++ b/deal.II/examples/step-42/step-42.cc
@@ -941,7 +941,7 @@ namespace Step42
locally_relevant_dofs);
}
- // setup hanging nodes and dirichlet constraints
+ // setup hanging nodes and Dirichlet constraints
{
TimerOutput::Scope t(computing_timer, "Setup: constraints");
constraints_hanging_nodes.reinit(locally_relevant_dofs);
@@ -1460,7 +1460,7 @@ namespace Step42
// This function defines the new ConstraintMatrix
// constraints_dirichlet_hanging_nodes. It contains
-// the dirichlet boundary values as well as the
+// the Dirichlet boundary values as well as the
// hanging nodes constraints.
template
void
diff --git a/deal.II/examples/step-43/doc/intro.dox b/deal.II/examples/step-43/doc/intro.dox
index 446dd335c0..658f1971f4 100644
--- a/deal.II/examples/step-43/doc/intro.dox
+++ b/deal.II/examples/step-43/doc/intro.dox
@@ -568,7 +568,7 @@ Z Chen.
-
JL Guermond and R Pasquetti.
-
Entropy-based nonlinear viscosity for fourier approximations of
+
Entropy-based nonlinear viscosity for Fourier approximations of
conservation laws.
Comptes Rendus Mathematique, 346(13-14):801-806, 2008.
diff --git a/deal.II/examples/step-43/step-43.cc b/deal.II/examples/step-43/step-43.cc
index 2c3477bc36..5647ece9ec 100644
--- a/deal.II/examples/step-43/step-43.cc
+++ b/deal.II/examples/step-43/step-43.cc
@@ -483,7 +483,7 @@ namespace Step43
// Unlike step-31, this step uses one more ConstraintMatrix object called
// darcy_preconditioner_constraints. This constraint object is used only for
// assembling the matrix for the Darcy preconditioner and includes hanging
- // node constrants as well as Dirichlet boundary value constraints for the
+ // node constraints as well as Dirichlet boundary value constraints for the
// pressure variable. We need this because we are building a Laplace matrix
// for the pressure as an approximation of the Schur complement) which is
// only positive definite if boundary conditions are applied.
@@ -1510,7 +1510,7 @@ namespace Step43
// the run() function, the central one in this program.
//
// At the beginning of the function, we ask whether to solve the
- // pressure-velocity part by evaluating the posteriori criterion (see the
+ // pressure-velocity part by evaluating the a posteriori criterion (see the
// following function). If necessary, we will solve the pressure-velocity
// part using the GMRES solver with the Schur complement block
// preconditioner as is described in the introduction.
@@ -2172,7 +2172,7 @@ namespace Step43
//
// With the exception of the startup code that loops back to the beginning
// of the function through the goto start_time_iteration
label,
- // everything should be relatively straightforward. In any case, it mimicks
+ // everything should be relatively straightforward. In any case, it mimics
// the corresponding function in step-31.
template
void TwoPhaseFlowProblem::run ()
diff --git a/deal.II/examples/step-44/doc/intro.dox b/deal.II/examples/step-44/doc/intro.dox
index 79b68b3582..d33136b696 100644
--- a/deal.II/examples/step-44/doc/intro.dox
+++ b/deal.II/examples/step-44/doc/intro.dox
@@ -13,7 +13,7 @@ The subject of this tutorial is nonlinear solid mechanics.
Classical single-field approaches (see e.g. step-18) can not correctly describe the response of quasi-incompressible materials.
The response is overly stiff; a phenomenon known as locking.
Locking problems can be circumvented using a variety of alternative strategies.
-One such straegy is the three-field formulation.
+One such strategy is the three-field formulation.
It is used here to model the three-dimensional, fully-nonlinear (geometrical and material) response of an isotropic continuum body.
The material response is approximated as hyperelastic.
Additionally, the three-field formulation employed is valid for quasi-incompressible as well as compressible materials.
@@ -573,7 +573,7 @@ where
@f}
There are no derivatives of the pressure and dilatation (primary) variables present in the formulation.
-Thus the discontinuous finite element interpolation of the pressure ana dilatation yields a block
+Thus the discontinuous finite element interpolation of the pressure and dilatation yields a block
diagonal matrix for
$\mathbf{\mathsf{K}}_{\widetilde{p}\widetilde{J}}$,
$\mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}}$ and
diff --git a/deal.II/examples/step-44/doc/results.dox b/deal.II/examples/step-44/doc/results.dox
index 55e5746456..22756ccaa3 100644
--- a/deal.II/examples/step-44/doc/results.dox
+++ b/deal.II/examples/step-44/doc/results.dox
@@ -89,7 +89,7 @@ Much of the code in the tutorial has been developed based on the optimisations d
discussed and demonstrated in Step-18 and others.
With over 93% of the time being spent in the linear solver, it is obvious that it may be necessary
to invest in a better solver for large three-dimensional problems.
-The SSOR preconditioner is not multi-threaded but is effective for this class of solid problems.
+The SSOR preconditioner is not multithreaded but is effective for this class of solid problems.
It may be beneficial to investigate the use of another solver such as those available through the Trilinos library.
diff --git a/deal.II/examples/step-44/step-44.cc b/deal.II/examples/step-44/step-44.cc
index fe51603a0b..5e27647810 100644
--- a/deal.II/examples/step-44/step-44.cc
+++ b/deal.II/examples/step-44/step-44.cc
@@ -620,7 +620,7 @@ namespace Step44
}
protected:
- // Define constitutive model paramaters $\kappa$ (bulk modulus) and the
+ // Define constitutive model parameters $\kappa$ (bulk modulus) and the
// neo-Hookean model parameter $c_1$:
const double kappa;
const double c_1;
@@ -742,11 +742,11 @@ namespace Step44
// \textrm{Grad}\ \mathbf{u}$: Since $I$ has data type SymmetricTensor,
// just writing I + Grad_u_n
would convert the second
// argument to a symmetric tensor, perform the sum, and then cast the
- // result to a Tensor (i.e., the type of a possibly non-symmetric
+ // result to a Tensor (i.e., the type of a possibly nonsymmetric
// tensor). However, since Grad_u_n
is nonsymmetric in
// general, the conversion to SymmetricTensor will fail. We can avoid this
// back and forth by converting $I$ to Tensor first, and then performing
- // the addition as between non-symmetric tensors:
+ // the addition as between nonsymmetric tensors:
void update_values (const Tensor<2, dim> &Grad_u_n,
const double p_tilde,
const double J_tilde)
@@ -885,7 +885,7 @@ namespace Step44
determine_component_extractors();
// Several functions to assemble the system and right hand side matrices
- // using multi-threading. Each of them comes as a wrapper function, one
+ // using multithreading. Each of them comes as a wrapper function, one
// that is executed to do the work in the WorkStream model on one cell,
// and one that copies the work done on this one cell into the global
// object that represents it:
@@ -981,7 +981,7 @@ namespace Step44
std::vector > quadrature_point_history;
// A description of the finite-element system including the displacement
- // polynomial degree, the degree-of-freedom handler, number of dof's per
+ // polynomial degree, the degree-of-freedom handler, number of DoFs per
// cell and the extractor objects used to retrieve information from the
// solution vectors:
const unsigned int degree;
@@ -1564,7 +1564,7 @@ namespace Step44
block_component[J_component] = J_dof; // Dilatation
// The DOF handler is then initialised and we renumber the grid in an
- // efficient manner. We also record the number of DOF's per block.
+ // efficient manner. We also record the number of DOFs per block.
dof_handler_ref.distribute_dofs(fe);
DoFRenumbering::Cuthill_McKee(dof_handler_ref);
DoFRenumbering::component_wise(dof_handler_ref, block_component);
@@ -1739,7 +1739,7 @@ namespace Step44
// @sect4{Solid::update_qph_incremental}
// As the update of QP information occurs frequently and involves a number of
-// expensive operations, we define a multi-threaded approach to distributing
+// expensive operations, we define a multithreaded approach to distributing
// the task across a number of CPU cores.
//
// To start this, we first we need to obtain the total solution as it stands
@@ -2562,7 +2562,7 @@ namespace Step44
// @sect4{Solid::solve_linear_system}
// Solving the entire block system is a bit problematic as there are no
// contributions to the $\mathsf{\mathbf{k}}_{ \widetilde{J} \widetilde{J}}$
-// block, rendering it non-invertible.
+// block, rendering it noninvertible.
// Since the pressure and dilatation variables DOFs are discontinuous, we can
// condense them out to form a smaller displacement-only system which
// we will then solve and subsequently post-process to retrieve the
diff --git a/deal.II/examples/step-46/doc/intro.dox b/deal.II/examples/step-46/doc/intro.dox
index dd7a984fba..45d36183a7 100644
--- a/deal.II/examples/step-46/doc/intro.dox
+++ b/deal.II/examples/step-46/doc/intro.dox
@@ -290,7 +290,7 @@ points:
- Implementing the bilinear form, and in particular dealing with the
interface term, both in the matrix and the sparsity pattern.
- Implementing Dirichlet boundary conditions on the external and
- internal parts of the boundaryies
+ internal parts of the boundaries
$\partial\Omega_f,\partial\Omega_s$.
diff --git a/deal.II/examples/step-46/step-46.cc b/deal.II/examples/step-46/step-46.cc
index 8ccfd0e64d..22a2696cde 100644
--- a/deal.II/examples/step-46/step-46.cc
+++ b/deal.II/examples/step-46/step-46.cc
@@ -153,7 +153,7 @@ namespace Step46
// The following classes do as their names suggest. The boundary values for
// the velocity are $\mathbf u=(0, \sin(\pi x))^T$ in 2d and $\mathbf u=(0,
// 0, \sin(\pi x)\sin(\pi y))^T$ in 3d, respectively. The remaining boundary
- // conditions for this problem are all homogenous and have been discussed in
+ // conditions for this problem are all homogeneous and have been discussed in
// the introduction. The right hand side forcing term is zero for both the
// fluid and the solid.
template
@@ -374,7 +374,7 @@ namespace Step46
// this end, we first have to set the active FE indices with the function
// immediately above, then distribute degrees of freedom, and then determine
// constraints on the linear system. The latter includes hanging node
- // constraints as usual, but also the inhomogenous boundary values at the
+ // constraints as usual, but also the inhomogeneous boundary values at the
// top fluid boundary, and zero boundary values along the perimeter of the
// solid subdomain.
template
diff --git a/deal.II/examples/step-47/step-47.cc b/deal.II/examples/step-47/step-47.cc
index 89f687a95a..4dfcf9e10d 100644
--- a/deal.II/examples/step-47/step-47.cc
+++ b/deal.II/examples/step-47/step-47.cc
@@ -448,7 +448,7 @@ namespace Step47
// To integrate the enriched elements we have to find the geometrical
// decomposition of the original element in subelements. The subelements are
// used to integrate the elements on both sides of the discontinuity. The
-// disontinuity line is approximated by a piece-wise linear interpolation
+// discontinuity line is approximated by a piece-wise linear interpolation
// between the intersection of the discontinuity with the edges of the
// elements. The vector level_set_values has the values of the level set
// function at the vertices of the elements. From these values can be found by
@@ -476,7 +476,7 @@ namespace Step47
// the sign of the level set function at the 4 nodes of the elements can
// be positive + or negative - depending on the sign of the level set
- // function we have the folloing three classes of decomposition type 1:
+ // function we have the following three classes of decomposition type 1:
// ++++, ---- type 2: -+++, +-++, ++-+, +++-, +---, -+--, --+-, ---+ type
// 3: +--+, ++--, +-+-, -++-, --++, -+-+
diff --git a/deal.II/examples/step-48/doc/intro.dox b/deal.II/examples/step-48/doc/intro.dox
index 59b9fab473..6a89c11ca8 100644
--- a/deal.II/examples/step-48/doc/intro.dox
+++ b/deal.II/examples/step-48/doc/intro.dox
@@ -125,7 +125,7 @@ the indices_local_to_global
variable contains the global
indices of the DoFs that it is constrained to. Then, we have another
variable constraint_indicator
at hand that holds, for
each cell, the local indices of DoFs that are constrained as well as
-the identifer of the type of constraint. Actually, you will not see
+the identifier of the type of constraint. Actually, you will not see
these data structures in the example program since the class
FEEvaluationGL
takes care of the constraints without user
interaction.
diff --git a/deal.II/examples/step-48/doc/results.dox b/deal.II/examples/step-48/doc/results.dox
index 52308a1754..213aced063 100644
--- a/deal.II/examples/step-48/doc/results.dox
+++ b/deal.II/examples/step-48/doc/results.dox
@@ -66,7 +66,7 @@ product.
Parallel run in 3D
-To demonstrate how the example scales for a parallel run and to demsonstrate
+To demonstrate how the example scales for a parallel run and to demonstrate
that hanging node constraints can be handled in an efficient way, we run the
example in 3D with $\mathcal{Q}_4$ elements. First, we run it on a notebook
with 2 cores (Sandy Bridge CPU) at 2.7 GHz.
@@ -106,7 +106,7 @@ cluster with 2 nodes and each node runs 8 threads, we get the following times:
@endcode
We observe a considerable speedup over the notebook (16 cores versus 2 cores;
-nonethess, one notebook core is considerably faster than one core of the
+nonetheless, one notebook core is considerably faster than one core of the
cluster because of a newer processor architecture). If we run the same program
on 4 nodes with 8 threads on each node, we get:
@code
diff --git a/deal.II/examples/step-48/step-48.cc b/deal.II/examples/step-48/step-48.cc
index 953827c0e0..95d9d67d9d 100644
--- a/deal.II/examples/step-48/step-48.cc
+++ b/deal.II/examples/step-48/step-48.cc
@@ -345,7 +345,7 @@ namespace Step48
// dimensions of extent $[-15,15]$. We refine the mesh more in the center of
// the domain since the solution is concentrated there. We first refine all
// cells whose center is within a radius of 11, and then refine once more
- // for a radius 6. This is simple ad-hoc refinement could be done better by
+ // for a radius 6. This simple ad hoc refinement could be done better by
// adapting the mesh to the solution using error estimators during the time
// stepping as done in other example programs, and using
// parallel::distributed::SolutionTransfer to transfer the solution to the
diff --git a/deal.II/examples/step-49/doc/intro.dox b/deal.II/examples/step-49/doc/intro.dox
index 8bac7c1f8a..992764b57b 100644
--- a/deal.II/examples/step-49/doc/intro.dox
+++ b/deal.II/examples/step-49/doc/intro.dox
@@ -130,7 +130,7 @@ of the current program):
Modifying a Mesh
-After aquiring one (or several) meshes in the ways described above, there are
+After acquiring one (or several) meshes in the ways described above, there are
many ways to manipulate them before using them in a finite element
computation.
diff --git a/deal.II/examples/step-49/doc/results.dox b/deal.II/examples/step-49/doc/results.dox
index 30e01d4f99..f418c219a4 100644
--- a/deal.II/examples/step-49/doc/results.dox
+++ b/deal.II/examples/step-49/doc/results.dox
@@ -1,7 +1,7 @@
Results
The program produces a series of .eps
files of the
-Triangulations. The methods are discussed above.
+triangulations. The methods are discussed above.
Next steps: Curved boundaries
diff --git a/deal.II/examples/step-50/step-50.cc b/deal.II/examples/step-50/step-50.cc
index 72198b1a4d..8d986f5be3 100644
--- a/deal.II/examples/step-50/step-50.cc
+++ b/deal.II/examples/step-50/step-50.cc
@@ -21,7 +21,7 @@
*/
-// parallel geometric multi-grid. work in progress!
+// parallel geometric multigrid. work in progress!
// As discussed in the introduction, most of
// this program is copied almost verbatim
@@ -82,7 +82,7 @@
#include
// These, now, are the include necessary for
-// the multi-level methods. The first two
+// the multilevel methods. The first two
// declare classes that allow us to enumerate
// degrees of freedom not only on the finest
// mesh level, but also on intermediate
@@ -387,7 +387,7 @@ namespace Step50
// Now for the things that concern the
// multigrid data structures. First, we
- // resize the multi-level objects to hold
+ // resize the multilevel objects to hold
// matrices and sparsity patterns for every
// level. The coarse level is zero (this is
// mandatory right now but may change in a
@@ -456,7 +456,7 @@ namespace Step50
// @sect4{LaplaceProblem::assemble_system}
// The following function assembles the
- // linear system on the finesh level of the
+ // linear system on the finest level of the
// mesh. It is almost exactly the same as in
// step-6, with the exception that we don't
// eliminate hanging nodes and boundary
@@ -540,7 +540,7 @@ namespace Step50
// over all existing cells instead of just
// the active ones, and the results must be
// entered into the correct matrix. Note also
- // that since we only do multi-level
+ // that since we only do multilevel
// preconditioning, no right-hand side needs
// to be assembled here.
//
@@ -579,8 +579,8 @@ namespace Step50
// level that are located on interfaces between adaptively refined
// levels, and those that lie on the interface but also on the
// exterior boundary of the domain. As in many other parts of the
- // library, we do this by using boolean masks, i.e. vectors of
- // booleans each element of which indicates whether the
+ // library, we do this by using Boolean masks, i.e. vectors of
+ // Booleans each element of which indicates whether the
// corresponding degree of freedom index is an interface DoF or
// not. The MGConstraints
already computed the
// information for us when we called initialize in
@@ -755,7 +755,7 @@ namespace Step50
MGTransferPrebuilt mg_transfer(hanging_node_constraints, mg_constrained_dofs);
// Now the prolongation matrix has to be built. This matrix needs
// to take the boundary values on each level into account and
- // needs to know about the indices at the refinement egdes. The
+ // needs to know about the indices at the refinement edges. The
// MGConstraints
knows about that so pass it as an
// argument.
mg_transfer.build_matrices(mg_dof_handler);
diff --git a/deal.II/examples/step-51/step-51.cc b/deal.II/examples/step-51/step-51.cc
index 717356f1c7..93e756143e 100644
--- a/deal.II/examples/step-51/step-51.cc
+++ b/deal.II/examples/step-51/step-51.cc
@@ -301,7 +301,7 @@ double RightHandSide::value (const Point &p,
// difference is the use of 3 different sets of DoFHandler
and FE
// objects, along with the ChunkSparseMatrix
and the
// corresponding solutions vectors. We also use WorkStream to enable a
-// multi-threaded local solution process which exploits the embarrassingly
+// multithreaded local solution process which exploits the embarrassingly
// parallel nature of the local solver. For WorkStream, we define the local
// operations on a cell and a copy function into the global matrix and
// vector. We do this once for the assembly (which is run twice, once when we
@@ -360,7 +360,7 @@ private:
Vector system_rhs;
// As stated in the introduction, HDG solutions can be post-processed to
- // attain superconvegence rates of $\mathcal{O}(h^{p+2})$. The
+ // attain superconvergence rates of $\mathcal{O}(h^{p+2})$. The
// post-processed solution is a discontinuous finite element solution
// representing the primal variable on the interior of each cell. We define
// a FE type of degree $p+1$ to represent this post-processed solution,
diff --git a/deal.II/examples/step-6/step-6.cc b/deal.II/examples/step-6/step-6.cc
index 0f9495a5ea..a8434e0ee6 100644
--- a/deal.II/examples/step-6/step-6.cc
+++ b/deal.II/examples/step-6/step-6.cc
@@ -77,7 +77,7 @@
#include
// Finally, we need a simple way to actually compute the refinement indicators
-// based on some error estimat. While in general, adaptivity is very
+// based on some error estimate. While in general, adaptivity is very
// problem-specific, the error indicator in the following file often yields
// quite nicely adapted grids for a wide class of problems.
#include
@@ -299,7 +299,7 @@ void Step6::setup_system ()
// After setting up all the degrees of freedoms, here are now the
// differences compared to step-5, all of which are related to constraints
- // associated with the hanging nodes. In the class desclaration, we have
+ // associated with the hanging nodes. In the class declaration, we have
// already allocated space for an object constraints
that will
// hold a list of these constraints (they form a matrix, which is reflected
// in the name of the class, but that is immaterial for the moment). Now we
@@ -764,7 +764,7 @@ int main ()
// approximately the same string as would be generated if the exception was
// thrown using the Assert
macro. You have seen the output of
// such an exception in the previous example, and you then know that it
- // contains the file and line number of where the exception occured, and
+ // contains the file and line number of where the exception occurred, and
// some other information. This is also what the following statements would
// print.
//
diff --git a/deal.II/examples/step-8/doc/intro.dox b/deal.II/examples/step-8/doc/intro.dox
index 9c04cb3435..5dd3e7bff4 100644
--- a/deal.II/examples/step-8/doc/intro.dox
+++ b/deal.II/examples/step-8/doc/intro.dox
@@ -96,7 +96,7 @@ the $z$ direction; this is in contrast to many other two-dimensional equations
that can be obtained by assuming that the body has infinite extent in
$z$-direction and that the solution function does not depend on the $z$
coordinate. On the other hand, there are equations for two-dimensional models
-of elasticity; see for example the wikipedia article on plane
strain, antiplane shear and DoFHandler class and all other
// classes used here are fully aware that the finite element we want to use
// is vector-valued, and take care of the vector-valuedness of the finite
diff --git a/deal.II/examples/step-9/step-9.cc b/deal.II/examples/step-9/step-9.cc
index 1feb9d9c0a..c82d9bab0d 100644
--- a/deal.II/examples/step-9/step-9.cc
+++ b/deal.II/examples/step-9/step-9.cc
@@ -46,7 +46,7 @@
#include
#include
-// The following two files provide classes and information for multi-threaded
+// The following two files provide classes and information for multithreaded
// programs. In the first one, the classes and functions are declared which we
// need to start new threads and to wait for threads to return (i.e. the
// Thread
class and the new_thread
functions). The
@@ -517,7 +517,7 @@ namespace Step9
// information, we could use the value of the global variable
// multithread_info.n_cpus
, which is determined at start-up
// time of your program automatically. (Note that if the library was not
- // configured for multi-threading, then the number of CPUs is set to one.)
+ // configured for multithreading, then the number of CPUs is set to one.)
// However, sometimes there might be reasons to use another value. For
// example, you might want to use less processors than there are in your
// system in order not to use too many computational resources. On the
@@ -555,7 +555,7 @@ namespace Step9
// can be omitted. (However, you still need to write the angle brackets,
// even if they are empty.)
//
- // If you did not configure for multi-threading, then the
+ // If you did not configure for multithreading, then the
// new_thread
function that is supposed to start a new thread
// in parallel only executes the function which should be run in parallel,
// waits for it to return (i.e. the function is executed sequentially),
@@ -569,7 +569,7 @@ namespace Step9
// the same size. Each thread will then assemble the local contributions
// of the cells within its chunk and transfer these contributions to the
// global matrix. As splitting a range of cells is a rather common task
- // when using multi-threading, there is a function in the
+ // when using multithreading, there is a function in the
// Threads
namespace that does exactly this. In fact, it does
// this not only for a range of cell iterators, but for iterators in
// general, so you could use it for std::vector::iterator
or
@@ -596,7 +596,7 @@ namespace Step9
n_threads);
// Finally, for each of the chunks of iterators we have computed, start
- // one thread (or if not in multi-thread mode: execute assembly on these
+ // one thread (or if not in multithread mode: execute assembly on these
// chunks sequentially). This is done using the following sequence of
// function calls:
for (unsigned int thread=0; threadassemble_system_interval function on the present object
// (the this
pointer), with the arguments following in the
@@ -626,7 +626,7 @@ namespace Step9
// container, which just calls join
on each of the thread
// objects it stores.
//
- // Again, if the library was not configured to use multi-threading, then
+ // Again, if the library was not configured to use multithreading, then
// no threads can run in parallel and the function returns immediately.
threads.join_all ();
@@ -855,7 +855,7 @@ namespace Step9
// i.e. other threads can now enter into the protected section by
// acquiring the lock. Two final notes are in place here, however:
//
- // 1. If the library was not configured for multi-threading, then
+ // 1. If the library was not configured for multithreading, then
// there can't be parallel threads and there is no need to
// synchronize. Thus, the lock
and release
// functions are no-ops, i.e. they return without doing anything.
@@ -1283,7 +1283,7 @@ namespace Step9
// can determine an approximation of the gradient for the present
// cell, then we need to have passed over vectors y
which
// span the whole space, otherwise we would not have all components of
- // the gradient. This is indicated by the invertability of the matrix.
+ // the gradient. This is indicated by the invertibility of the matrix.
//
// If the matrix should not be invertible, this means that the present
// cell had an insufficient number of active neighbors. In contrast to
diff --git a/deal.II/include/deal.II/base/config.h.in b/deal.II/include/deal.II/base/config.h.in
index ca90c59df9..ad1681b62e 100644
--- a/deal.II/include/deal.II/base/config.h.in
+++ b/deal.II/include/deal.II/base/config.h.in
@@ -347,6 +347,31 @@
#cmakedefine DEAL_II_WITH_P4EST
#ifdef DEAL_II_WITH_P4EST
# define DEAL_II_USE_P4EST
+
+# define DEAL_II_P4EST_VERSION_MAJOR @P4EST_VERSION_MAJOR@
+# define DEAL_II_P4EST_VERSION_MINOR @P4EST_VERSION_MINOR@
+# define DEAL_II_P4EST_VERSION_SUBMINOR @P4EST_VERSION_SUBMINOR@
+# define DEAL_II_P4EST_VERSION_PATCH @P4EST_VERSION_PATCH@
+
+# define DEAL_II_P4EST_VERSION_GTE(major,minor,subminor,patch) \
+ ((DEAL_II_P4EST_VERSION_MAJOR * 1000000 + \
+ DEAL_II_P4EST_VERSION_MINOR * 10000 + \
+ DEAL_II_P4EST_VERSION_SUBMINOR * 100 + \
+ DEAL_II_P4EST_VERSION_PATCH) \
+ >= \
+ (major)*1000000 + (minor)*10000 + (subminor)*100 + (patch))
+#else
+ // p4est up to 0.3.4.1 didn't define P4EST_VERSION_*. since
+ // we didn't supports anything before 0.3.4, we assume 0.3.4
+ // This means that we can't use the new features in 0.3.4.1
+# define DEAL_II_P4EST_VERSION_GTE(major,minor,subminor,patch) \
+ ((0 * 1000000 + \
+ 3 * 10000 + \
+ 4 * 100 + \
+ 0) \
+ >= \
+ (major)*1000000 + (minor)*10000 + (subminor)*100 + (patch))
+
#endif
diff --git a/deal.II/include/deal.II/distributed/tria.h b/deal.II/include/deal.II/distributed/tria.h
index d17c8c082d..6275739a61 100644
--- a/deal.II/include/deal.II/distributed/tria.h
+++ b/deal.II/include/deal.II/distributed/tria.h
@@ -970,8 +970,8 @@ namespace parallel
* these variables at a
* couple places anyway.
*/
- std::vector coarse_cell_to_p4est_tree_permutation;
- std::vector p4est_tree_to_coarse_cell_permutation;
+ std::vector coarse_cell_to_p4est_tree_permutation;
+ std::vector p4est_tree_to_coarse_cell_permutation;
/**
* dummy settings
diff --git a/deal.II/include/deal.II/dofs/dof_accessor.templates.h b/deal.II/include/deal.II/dofs/dof_accessor.templates.h
index 989591f843..0a6ce9538c 100644
--- a/deal.II/include/deal.II/dofs/dof_accessor.templates.h
+++ b/deal.II/include/deal.II/dofs/dof_accessor.templates.h
@@ -476,12 +476,10 @@ namespace internal
const unsigned int local_index,
const dealii::internal::int2type<1> &)
{
- return dof_handler.levels[obj_level]->dof_object.
- get_dof_index (dof_handler,
- obj_index,
+ return dof_handler.levels[obj_level]->
+ get_dof_index (obj_index,
fe_index,
- local_index,
- obj_level);
+ local_index);
}
@@ -496,13 +494,11 @@ namespace internal
const dealii::internal::int2type<1> &,
const types::global_dof_index global_index)
{
- dof_handler.levels[obj_level]->dof_object.
- set_dof_index (dof_handler,
- obj_index,
+ dof_handler.levels[obj_level]->
+ set_dof_index (obj_index,
fe_index,
local_index,
- global_index,
- obj_level);
+ global_index);
}
@@ -518,7 +514,7 @@ namespace internal
{
return dof_handler.faces->lines.
get_dof_index (dof_handler,
- obj_index,
+ obj_index,
fe_index,
local_index,
obj_level);
@@ -556,12 +552,10 @@ namespace internal
const unsigned int local_index,
const dealii::internal::int2type<2> &)
{
- return dof_handler.levels[obj_level]->dof_object.
- get_dof_index (dof_handler,
- obj_index,
+ return dof_handler.levels[obj_level]->
+ get_dof_index (obj_index,
fe_index,
- local_index,
- obj_level);
+ local_index);
}
@@ -576,13 +570,11 @@ namespace internal
const dealii::internal::int2type<2> &,
const types::global_dof_index global_index)
{
- dof_handler.levels[obj_level]->dof_object.
- set_dof_index (dof_handler,
- obj_index,
+ dof_handler.levels[obj_level]->
+ set_dof_index (obj_index,
fe_index,
local_index,
- global_index,
- obj_level);
+ global_index);
}
@@ -676,12 +668,10 @@ namespace internal
const unsigned int local_index,
const dealii::internal::int2type<3> &)
{
- return dof_handler.levels[obj_level]->dof_object.
- get_dof_index (dof_handler,
- obj_index,
+ return dof_handler.levels[obj_level]->
+ get_dof_index (obj_index,
fe_index,
- local_index,
- obj_level);
+ local_index);
}
@@ -696,13 +686,11 @@ namespace internal
const dealii::internal::int2type<3> &,
const types::global_dof_index global_index)
{
- dof_handler.levels[obj_level]->dof_object.
- set_dof_index (dof_handler,
- obj_index,
+ dof_handler.levels[obj_level]->
+ set_dof_index (obj_index,
fe_index,
local_index,
- global_index,
- obj_level);
+ global_index);
}
@@ -825,23 +813,21 @@ namespace internal
const unsigned int fe_index,
const dealii::internal::int2type<1> &)
{
- return dof_handler.levels[obj_level]->dof_object.fe_index_is_active(dof_handler,
- obj_index,
- fe_index,
- obj_level);
+ return dof_handler.levels[obj_level]->fe_index_is_active(obj_index,
+ fe_index);
}
template
static
unsigned int
- n_active_fe_indices (const dealii::hp::DoFHandler<1,spacedim> &dof_handler,
+ n_active_fe_indices (const dealii::hp::DoFHandler<1,spacedim> &,
const unsigned int obj_level,
const unsigned int obj_index,
const dealii::internal::int2type<1> &)
{
- return dof_handler.levels[obj_level]->dof_object.n_active_fe_indices (dof_handler,
- obj_index);
+ // on a cell, the number of active elements is one
+ return 1;
}
@@ -855,10 +841,8 @@ namespace internal
const unsigned int n,
const dealii::internal::int2type<1> &)
{
- return dof_handler.levels[obj_level]->dof_object.nth_active_fe_index (dof_handler,
- obj_level,
- obj_index,
- n);
+ Assert (n==0, ExcMessage("On cells, there can only be one active FE index"));
+ return dof_handler.levels[obj_level]->active_fe_index (obj_index);
}
@@ -917,23 +901,21 @@ namespace internal
const unsigned int fe_index,
const dealii::internal::int2type<2> &)
{
- return dof_handler.levels[obj_level]->dof_object.fe_index_is_active(dof_handler,
- obj_index,
- fe_index,
- obj_level);
+ return dof_handler.levels[obj_level]->fe_index_is_active(obj_index,
+ fe_index);
}
template
static
unsigned int
- n_active_fe_indices (const dealii::hp::DoFHandler<2,spacedim> &dof_handler,
+ n_active_fe_indices (const dealii::hp::DoFHandler<2,spacedim> &,
const unsigned int obj_level,
const unsigned int obj_index,
const dealii::internal::int2type<2> &)
{
- return dof_handler.levels[obj_level]->dof_object.n_active_fe_indices (dof_handler,
- obj_index);
+ // on a cell, the number of active elements is one
+ return 1;
}
@@ -947,10 +929,8 @@ namespace internal
const unsigned int n,
const dealii::internal::int2type<2> &)
{
- return dof_handler.levels[obj_level]->dof_object.nth_active_fe_index (dof_handler,
- obj_level,
- obj_index,
- n);
+ Assert (n==0, ExcMessage("On cells, there can only be one active FE index"));
+ return dof_handler.levels[obj_level]->active_fe_index (obj_index);
}
@@ -1026,10 +1006,8 @@ namespace internal
const unsigned int fe_index,
const dealii::internal::int2type<3> &)
{
- return dof_handler.levels[obj_level]->dof_object.fe_index_is_active(dof_handler,
- obj_index,
- fe_index,
- obj_level);
+ return dof_handler.levels[obj_level]->fe_index_is_active(obj_index,
+ fe_index);
}
@@ -1067,13 +1045,13 @@ namespace internal
template
static
unsigned int
- n_active_fe_indices (const dealii::hp::DoFHandler<3,spacedim> &dof_handler,
+ n_active_fe_indices (const dealii::hp::DoFHandler<3,spacedim> &,
const unsigned int obj_level,
const unsigned int obj_index,
const dealii::internal::int2type<3> &)
{
- return dof_handler.levels[obj_level]->dof_object.n_active_fe_indices (dof_handler,
- obj_index);
+ // on a cell, the number of active elements is one
+ return 1;
}
@@ -1087,10 +1065,8 @@ namespace internal
const unsigned int n,
const dealii::internal::int2type<3> &)
{
- return dof_handler.levels[obj_level]->dof_object.nth_active_fe_index (dof_handler,
- obj_level,
- obj_index,
- n);
+ Assert (n==0, ExcMessage("On cells, there can only be one active FE index"));
+ return dof_handler.levels[obj_level]->active_fe_index (obj_index);
}
/**
@@ -2827,8 +2803,10 @@ namespace internal
types::global_dof_index *cache = &accessor.dof_handler->levels[accessor.level()]
->cell_dof_indices_cache[accessor.present_index *
accessor.get_fe().dofs_per_cell];
- for ( ; local_values_begin != local_values_end; ++local_values_begin, ++cache)
- *local_values_begin = values(*cache);
+
+ values.extract_subvector_to (cache,
+ cache + accessor.get_fe().dofs_per_cell,
+ local_values_begin);
}
/**
@@ -2858,8 +2836,9 @@ namespace internal
std::vector local_dof_indices (dofs_per_cell);
get_dof_indices (accessor, local_dof_indices);
- for (unsigned int i=0; i(accessor.level()) < accessor.dof_handler->levels.size(),
ExcMessage ("DoFHandler not initialized"));
- Assert (static_cast::size_type>(accessor.present_index) <
- accessor.dof_handler->levels[accessor.level()]->active_fe_indices.size (),
- ExcIndexRange (accessor.present_index, 0,
- accessor.dof_handler->levels[accessor.level()]->active_fe_indices.size ()));
+
return accessor.dof_handler->levels[accessor.level()]
- ->active_fe_indices[accessor.present_index];
+ ->active_fe_index(accessor.present_index);
}
@@ -3046,12 +3022,9 @@ namespace internal
Assert (static_cast(accessor.level()) <
accessor.dof_handler->levels.size(),
ExcMessage ("DoFHandler not initialized"));
- Assert (static_cast::size_type>(accessor.present_index) <
- accessor.dof_handler->levels[accessor.level()]->active_fe_indices.size (),
- ExcIndexRange (accessor.present_index, 0,
- accessor.dof_handler->levels[accessor.level()]->active_fe_indices.size ()));
+
accessor.dof_handler->levels[accessor.level()]
- ->active_fe_indices[accessor.present_index] = i;
+ ->set_active_fe_index (accessor.present_index, i);
}
diff --git a/deal.II/include/deal.II/fe/fe_values.h b/deal.II/include/deal.II/fe/fe_values.h
index 1b0164b656..4963d54c30 100644
--- a/deal.II/include/deal.II/fe/fe_values.h
+++ b/deal.II/include/deal.II/fe/fe_values.h
@@ -440,6 +440,17 @@ namespace FEValuesViews
* this class should not be used for this
* context.
*
+ * This class allows to query the
+ * value, gradient and divergence of
+ * (components of) shape functions
+ * and solutions representing
+ * vectors. The
+ * gradient of a vector
+ * $d_{k}, 0\le k<\text{dim}$ is
+ * defined as
+ * $S_{ij} = \frac{\partial d_{i}}{\partial x_j},
+ * 0\le i,j<\text{dim}$.
+ *
* You get an object of this type if you
* apply a FEValuesExtractors::Vector to an
* FEValues, FEFaceValues or
@@ -467,6 +478,9 @@ namespace FEValuesViews
* dim
components of the
* finite element, the gradient is a
* Tensor@<2,spacedim@>
.
+ *
+ * See the general documentation of this class for how exactly
+ * the gradient of a vector is defined.
*/
typedef dealii::Tensor<2,spacedim> gradient_type;
@@ -647,6 +661,9 @@ namespace FEValuesViews
* function and quadrature point
* selected by the arguments.
*
+ * See the general documentation of this class for how exactly
+ * the gradient of a vector is defined.
+ *
* @note The meaning of the arguments
* is as documented for the value()
* function.
diff --git a/deal.II/include/deal.II/grid/grid_tools.h b/deal.II/include/deal.II/grid/grid_tools.h
index bdbc8ab792..eabd288760 100644
--- a/deal.II/include/deal.II/grid/grid_tools.h
+++ b/deal.II/include/deal.II/grid/grid_tools.h
@@ -1084,7 +1084,11 @@ namespace GridTools
*
* This function tries to match all faces belonging to the first
* boundary with faces belonging to the second boundary with the help
- * of orthogonal_equality.
+ * of orthogonal_equality().
+ *
+ * The bitset that is returned together with the second face encodes the
+ * _relative_ orientation of the first face with respect to the second
+ * face, see the documentation of orthogonal_equality for further details.
*
* The @p offset is a vector tangential to the faces that is added to the
* location of vertices of the 'first' boundary when attempting to match
@@ -1099,13 +1103,14 @@ namespace GridTools
const typename identity::type &end,
const types::boundary_id b_id1,
const types::boundary_id b_id2,
- int direction,
+ const int direction,
const dealii::Tensor<1,FaceIterator::AccessorType::space_dimension> &offset);
/**
* Same function as above, but accepts a Triangulation or DoFHandler
- * object @p dof_handler instead of an explicit face iterator range.
+ * object @p container (a container is a collection of objects, here a
+ * collection of cells) instead of an explicit face iterator range.
*
* This function will collect periodic face pairs on the highest (i.e.
* coarsest) mesh level.
@@ -1114,10 +1119,10 @@ namespace GridTools
*/
template
std::map > >
- collect_periodic_face_pairs (const DH &dof_handler, /*TODO: Name*/
+ collect_periodic_face_pairs (const DH &container,
const types::boundary_id b_id1,
const types::boundary_id b_id2,
- int direction,
+ const int direction,
const dealii::Tensor<1,DH::space_dimension> &offset);
@@ -1145,7 +1150,7 @@ namespace GridTools
std::map
collect_periodic_face_pairs (const DH &dof_handler, /*TODO: Name*/
const types::boundary_id b_id,
- int direction,
+ const int direction,
const dealii::Tensor<1,DH::space_dimension> &offset);
diff --git a/deal.II/include/deal.II/hp/dof_levels.h b/deal.II/include/deal.II/hp/dof_levels.h
index 19d9f7e33d..761af81512 100644
--- a/deal.II/include/deal.II/hp/dof_levels.h
+++ b/deal.II/include/deal.II/hp/dof_levels.h
@@ -19,116 +19,168 @@
#include
-#include
+#include
#include
+
DEAL_II_NAMESPACE_OPEN
+namespace hp
+{
+ template class DoFHandler;
+}
+
+
namespace internal
{
namespace hp
{
- template
- class DoFLevel;
+ namespace DoFHandler
+ {
+ struct Implementation;
+ }
+ }
+}
+
+namespace internal
+{
+ namespace hp
+ {
/**
- * Store the indices of the degrees of freedom that are located on
- * objects of dimension @p structdim.
- *
- * The things we store here are very similar to what is stored in the
- * internal::DoFHandler::DoFLevel class hierarchy (see there for more
- * information, in particular on the layout of the class hierarchy,
- * and the use of file names). There are two main
- * differences, discussed in the following subsections. In addition to
- * the data already stored by the internal::DoFHandler::DoFLevel
- * classes, we also have to store which finite element each cell
- * uses.
- *
- *
- * Offset computations
- *
- * For hp methods, not all cells may use the same finite element, and
- * it is consequently more complicated to determine where the DoF
- * indices for a given line, quad, or hex are stored. As described in
- * the documentation of the internal::DoFHandler::DoFLevel class, we
- * can compute the location of the first line DoF, for example, by
- * calculating the offset as line_index *
- * dof_handler.get_fe().dofs_per_line
. This of course doesn't
- * work any more if different lines may have different numbers of
- * degrees of freedom associated with them. Consequently, rather than
- * using this simple multiplication, each of the lines.dofs, quads.dofs,
- * and hexes.dofs arrays has an associated array lines.dof_offsets,
- * quads.dof_offsets, and hexes.dof_offsets. The data corresponding to a
- * line then starts at index line_dof_offsets[line_index]
- * within the line_dofs
array.
- *
- *
- * Multiple data sets per object
- *
- * If an object corresponds to a cell, the global dof indices of this
- * cell are stored at the location indicated above in sequential
- * order.
- *
- * However, if two adjacent cells use different finite elements, then
- * the face that they share needs to store DoF indices for both
- * involved finite elements. While faces therefore have to have at
- * most two sets of DoF indices, it is easy to see that vertices for
- * example can have as many sets of DoF indices associated with them
- * as there are adjacent cells, and the same holds for lines in 3d.
- *
- * Consequently, for objects that have a lower dimensionality than
- * cells, we have to store a map from the finite element index to the
- * set of DoF indices associated. Since real sets are typically very
- * inefficient to store, and since most of the time we expect the
- * number of individual keys to be small (frequently, adjacent cells
- * will have the same finite element, and only a single entry will
- * exist in the map), what we do is instead to store a linked list. In
- * this format, the first entry starting at position
- * lines.dofs[lines.dof_offsets[line_index]]
will denote
- * the finite element index of the set of DoF indices following; after
- * this set, we will store the finite element index of the second set
- * followed by the corresponding DoF indices; and so on. Finally, when
- * all finite element indices adjacent to this object have been
- * covered, we write a -1 to indicate the end of the list.
- *
- * Access to this kind of data, as well as the distinction between
- * cells and objects of lower dimensionality are encoded in the
- * accessor functions, DoFObjects::set_dof_index() and
- * DoFLevel::get_dof_index() They are able to traverse this
- * list and pick out or set a DoF index given the finite element index
- * and its location within the set of DoFs corresponding to this
- * finite element.
- *
- *
- * @ingroup hp
- * @author Wolfgang Bangerth, 1998, 2006, Oliver Kayser-Herold 2003.
+ * This is the class that stores the degrees of freedom on cells in a hp
+ * hierarchy. Compared to faces and edges, the task here is simple since
+ * each cell can only have a single active finite element index. Consequently,
+ * all we need is one long array with DoF indices and one array of offsets
+ * where each cell's indices start within the array of indices. This is in
+ * contrast to the DoFObjects class where each face or edge may have more than
+ * one associated finite element with corresponding degrees of freedom.
*/
template
class DoFLevel
{
- public:
+ private:
/**
- * Indices specifying the finite
- * element of hp::FECollection to use
- * for the different cells on the current level. The
- * meaning what a cell is, is
- * dimension specific, therefore also
- * the length of this vector depends
- * on the dimension: in one dimension,
- * the length of this vector equals
- * the length of the @p lines vector,
- * in two dimensions that of the @p
- * quads vector, etc. The vector stores one element per cell
- * since the actiev_fe_index is unique for cells.
+ * Indices specifying the finite element of hp::FECollection to
+ * use for the different cells on the current level. The vector
+ * stores one element per cell since the active_fe_index is
+ * unique for cells.
+ *
+ * If a cell is not active on the level corresponding to the
+ * current object (i.e., it has children on higher levels) then
+ * it does not have an associated fe index and we store
+ * an invalid fe index marker instead.
*/
std::vector active_fe_indices;
/**
- * Store the dof-indices and related data of
- * the cells on the current level corresponding to this object.
+ * Store the start index for the degrees of freedom of each
+ * object in the @p dofs array. If the cell corresponding to
+ * a particular index in this array is not active on this level,
+ * then we do not store any DoFs for it. In that case, the offset
+ * we store here must be an invalid number and indeed we store
+ * (std::vector::size_type)(-1)
+ * for it.
+ *
+ * The type we store is then obviously the type the @p dofs array
+ * uses for indexing.
+ */
+ std::vector::size_type> dof_offsets;
+
+ /**
+ * Store the global indices of the degrees of freedom.
+ * information. The dof_offsets field determines where each
+ * (active) cell's data is stored.
+ */
+ std::vector dofs;
+
+ public:
+
+ /**
+ * Set the global index of
+ * the @p local_index-th
+ * degree of freedom located
+ * on the object with number @p
+ * obj_index to the value
+ * given by @p global_index. The @p
+ * dof_handler argument is
+ * used to access the finite
+ * element that is to be used
+ * to compute the location
+ * where this data is stored.
+ *
+ * The third argument, @p
+ * fe_index, denotes which of
+ * the finite elements
+ * associated with this
+ * object we shall
+ * access. Refer to the
+ * general documentation of
+ * the internal::hp::DoFLevel
+ * class template for more
+ * information.
+ */
+ void
+ set_dof_index (const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ const types::global_dof_index global_index);
+
+ /**
+ * Return the global index of
+ * the @p local_index-th
+ * degree of freedom located
+ * on the object with number @p
+ * obj_index. The @p
+ * dof_handler argument is
+ * used to access the finite
+ * element that is to be used
+ * to compute the location
+ * where this data is stored.
+ *
+ * The third argument, @p
+ * fe_index, denotes which of
+ * the finite elements
+ * associated with this
+ * object we shall
+ * access. Refer to the
+ * general documentation of
+ * the internal::hp::DoFLevel
+ * class template for more
+ * information.
+ */
+ types::global_dof_index
+ get_dof_index (const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index) const;
+
+ /**
+ * Return the fe_index of the
+ * active finite element
+ * on this object.
*/
- internal::hp::DoFObjects dof_object;
+ unsigned int
+ active_fe_index (const unsigned int obj_index) const;
+
+ /**
+ * Check whether a given
+ * finite element index is
+ * used on the present
+ * object or not.
+ */
+ bool
+ fe_index_is_active (const unsigned int obj_index,
+ const unsigned int fe_index) const;
+
+ /**
+ * Set the fe_index of the
+ * active finite element
+ * on this object.
+ */
+ void
+ set_active_fe_index (const unsigned int obj_index,
+ const unsigned int fe_index);
/**
* Determine an estimate for the
@@ -136,7 +188,124 @@ namespace internal
* of this object.
*/
std::size_t memory_consumption () const;
+
+ /**
+ * Make hp::DoFHandler and its auxiliary class a friend since it
+ * is the class that needs to create these data structures.
+ */
+ template friend class dealii::hp::DoFHandler;
+ friend struct dealii::internal::hp::DoFHandler::Implementation;
};
+
+
+ // -------------------- template functions --------------------------------
+
+ template
+ inline
+ types::global_dof_index
+ DoFLevel::
+ get_dof_index (const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index) const
+ {
+ Assert (obj_index < dof_offsets.size(),
+ ExcIndexRange (obj_index, 0, dof_offsets.size()));
+
+ // make sure we are on an
+ // object for which DoFs have
+ // been allocated at all
+ Assert (dof_offsets[obj_index] != numbers::invalid_dof_index,
+ ExcMessage ("You are trying to access degree of freedom "
+ "information for an object on which no such "
+ "information is available"));
+
+ Assert (fe_index == active_fe_indices[obj_index],
+ ExcMessage ("FE index does not match that of the present cell"));
+ return dofs[dof_offsets[obj_index]+local_index];
+ }
+
+
+
+ template
+ inline
+ void
+ DoFLevel::
+ set_dof_index (const unsigned int obj_index,
+ const unsigned int fe_index,
+ const unsigned int local_index,
+ const types::global_dof_index global_index)
+ {
+ Assert (obj_index < dof_offsets.size(),
+ ExcIndexRange (obj_index, 0, dof_offsets.size()));
+
+ // make sure we are on an
+ // object for which DoFs have
+ // been allocated at all
+ Assert (dof_offsets[obj_index] != numbers::invalid_dof_index,
+ ExcMessage ("You are trying to access degree of freedom "
+ "information for an object on which no such "
+ "information is available"));
+
+ Assert (fe_index == active_fe_indices[obj_index],
+ ExcMessage ("FE index does not match that of the present cell"));
+ dofs[dof_offsets[obj_index]+local_index] = global_index;
+ }
+
+
+
+ template
+ inline
+ unsigned int
+ DoFLevel::
+ active_fe_index (const unsigned int obj_index) const
+ {
+ Assert (obj_index < active_fe_indices.size(),
+ ExcIndexRange (obj_index, 0, active_fe_indices.size()));
+
+ return active_fe_indices[obj_index];
+ }
+
+
+
+ template
+ inline
+ bool
+ DoFLevel::
+ fe_index_is_active (const unsigned int obj_index,
+ const unsigned int fe_index) const
+ {
+ const unsigned int invalid_fe_index = numbers::invalid_unsigned_int;
+ Assert ((fe_index != invalid_fe_index),
+ ExcMessage ("You need to specify a FE index when working "
+ "with hp DoFHandlers"));
+
+ // make sure we are on an
+ // object for which DoFs have
+ // been allocated at all
+ Assert (dof_offsets[obj_index] != numbers::invalid_dof_index,
+ ExcMessage ("You are trying to access degree of freedom "
+ "information for an object on which no such "
+ "information is available"));
+
+ Assert (obj_index < active_fe_indices.size(),
+ ExcInternalError());
+ return (fe_index == active_fe_indices[obj_index]);
+ }
+
+
+ template
+ inline
+ void
+ DoFLevel::
+ set_active_fe_index (const unsigned int obj_index,
+ const unsigned int fe_index)
+ {
+ Assert (obj_index < active_fe_indices.size(),
+ ExcIndexRange (obj_index, 0, active_fe_indices.size()));
+
+ active_fe_indices[obj_index] = fe_index;
+ }
+
} // namespace hp
} // namespace internal
diff --git a/deal.II/include/deal.II/hp/dof_objects.h b/deal.II/include/deal.II/hp/dof_objects.h
index 15bae95dad..59ec451d53 100644
--- a/deal.II/include/deal.II/hp/dof_objects.h
+++ b/deal.II/include/deal.II/hp/dof_objects.h
@@ -32,7 +32,9 @@ namespace internal
/**
* Store the indices of the degrees of freedom which are located on
- * objects of dimension @p dim.
+ * objects of dimension @p structdim < dim, i.e., for faces or edges
+ * of cells. This is opposed to the internal::hp::DoFLevels class
+ * that stores the DoF indices on cells.
*
* The things we store here is very similar to what is stored in the
* internal::DoFHandler::DoFObjects classes (see there for more
@@ -58,16 +60,12 @@ namespace internal
*
* Multiple data sets per object
*
- * If an object corresponds to a cell, the global dof indices of this
- * cell are stored at the location indicated above in sequential
- * order.
- *
- * However, if two adjacent cells use different finite elements, then
+ * If two adjacent cells use different finite elements, then
* the face that they share needs to store DoF indices for both
* involved finite elements. While faces therefore have to have at
- * most two sets of DoF indices, it is easy to see that vertices for
- * example can have as many sets of DoF indices associated with them
- * as there are adjacent cells, and the same holds for lines in 3d.
+ * most two sets of DoF indices, it is easy to see that edges and
+ * vertices can have as many sets of DoF indices associated with them
+ * as there are adjacent cells.
*
* Consequently, for objects that have a lower dimensionality than
* cells, we have to store a map from the finite element index to the
@@ -87,7 +85,7 @@ namespace internal
* Access to this kind of data, as well as the distinction between
* cells and objects of lower dimensionality are encoded in the
* accessor functions, DoFObjects::set_dof_index() and
- * DoFLevel::get_dof_index() They are able to traverse this
+ * DoFLevel::get_dof_index(). They are able to traverse this
* list and pick out or set a DoF index given the finite element index
* and its location within the set of DoFs corresponding to this
* finite element.
@@ -96,7 +94,7 @@ namespace internal
* @ingroup hp
* @author Tobias Leicht, 2006
*/
- template
+ template
class DoFObjects
{
public:
@@ -142,9 +140,9 @@ namespace internal
* class template for more
* information.
*/
- template
+ template
void
- set_dof_index (const dealii::hp::DoFHandler &dof_handler,
+ set_dof_index (const dealii::hp::DoFHandler &dof_handler,
const unsigned int obj_index,
const unsigned int fe_index,
const unsigned int local_index,
@@ -174,9 +172,9 @@ namespace internal
* class template for more
* information.
*/
- template
+ template
types::global_dof_index
- get_dof_index (const dealii::hp::DoFHandler &dof_handler,
+ get_dof_index (const dealii::hp::DoFHandler &dof_handler,
const unsigned int obj_index,
const unsigned int fe_index,
const unsigned int local_index,
@@ -205,9 +203,9 @@ namespace internal
* been distributed and zero
* is returned.
*/
- template
+ template
unsigned int
- n_active_fe_indices (const dealii::hp::DoFHandler &dof_handler,
+ n_active_fe_indices (const dealii::hp::DoFHandler &dof_handler,
const unsigned int obj_index) const;
/**
@@ -215,9 +213,9 @@ namespace internal
* n-th active finite element
* on this object.
*/
- template
+ template
types::global_dof_index
- nth_active_fe_index (const dealii::hp::DoFHandler &dof_handler,
+ nth_active_fe_index (const dealii::hp::DoFHandler &dof_handler,
const unsigned int obj_level,
const unsigned int obj_index,
const unsigned int n) const;
@@ -228,9 +226,9 @@ namespace internal
* used on the present
* object or not.
*/
- template
+ template
bool
- fe_index_is_active (const dealii::hp::DoFHandler &dof_handler,
+ fe_index_is_active (const dealii::hp::DoFHandler &dof_handler,
const unsigned int obj_index,
const unsigned int fe_index,
const unsigned int obj_level) const;
@@ -244,20 +242,20 @@ namespace internal
};
-// --------------------- inline and template functions ------------------
+ // --------------------- inline and template functions ------------------
- template
- template
+ template
+ template
inline
types::global_dof_index
- DoFObjects::
- get_dof_index (const dealii::hp::DoFHandler &dof_handler,
+ DoFObjects::
+ get_dof_index (const dealii::hp::DoFHandler &dof_handler,
const unsigned int obj_index,
const unsigned int fe_index,
const unsigned int local_index,
const unsigned int obj_level) const
{
- Assert ((fe_index != dealii::hp::DoFHandler::default_fe_index),
+ Assert ((fe_index != dealii::hp::DoFHandler::default_fe_index),
ExcMessage ("You need to specify a FE index when working "
"with hp DoFHandlers"));
Assert (&dof_handler != 0,
@@ -268,10 +266,10 @@ namespace internal
Assert (fe_index < dof_handler.get_fe().size(),
ExcIndexRange (fe_index, 0, dof_handler.get_fe().size()));
Assert (local_index <
- dof_handler.get_fe()[fe_index].template n_dofs_per_object(),
+ dof_handler.get_fe()[fe_index].template n_dofs_per_object(),
ExcIndexRange(local_index, 0,
dof_handler.get_fe()[fe_index]
- .template n_dofs_per_object()));
+ .template n_dofs_per_object()));
Assert (obj_index < dof_offsets.size(),
ExcIndexRange (obj_index, 0, dof_offsets.size()));
@@ -283,64 +281,43 @@ namespace internal
"information for an object on which no such "
"information is available"));
- if (dim == dimm)
- {
- // if we are on a cell, then
- // the only set of indices we
- // store is the one for the
- // cell, which is unique. then
- // fe_index must be
- // active_fe_index
- Assert (fe_index == dof_handler.levels[obj_level]->active_fe_indices[obj_index],
- ExcMessage ("FE index does not match that of the present cell"));
- return dofs[dof_offsets[obj_index]+local_index];
- }
- else
- {
- // we are in higher space
- // dimensions, so there may
- // be multiple finite
- // elements associated with
- // this object. hop along
- // the list of index sets
- // until we find the one
- // with the correct
- // fe_index, and then poke
- // into that part. trigger
- // an exception if we can't
- // find a set for this
- // particular fe_index
- const types::global_dof_index starting_offset = dof_offsets[obj_index];
- const types::global_dof_index *pointer = &dofs[starting_offset];
- while (true)
- {
- Assert (*pointer != numbers::invalid_dof_index,
- ExcInternalError());
- if (*pointer == fe_index)
- return *(pointer + 1 + local_index);
- else
- pointer += static_cast(
- dof_handler.get_fe()[*pointer]
- .template n_dofs_per_object() + 1);
- }
- }
+ Assert (structdim(
+ dof_handler.get_fe()[*pointer]
+ .template n_dofs_per_object() + 1);
+ }
}
- template
- template
+ template
+ template
inline
void
- DoFObjects::
- set_dof_index (const dealii::hp::DoFHandler &dof_handler,
+ DoFObjects::
+ set_dof_index (const dealii::hp::DoFHandler &dof_handler,
const unsigned int obj_index,
const unsigned int fe_index,
const unsigned int local_index,
const types::global_dof_index global_index,
const unsigned int obj_level)
{
- Assert ((fe_index != dealii::hp::DoFHandler::default_fe_index),
+ Assert ((fe_index != dealii::hp::DoFHandler::default_fe_index),
ExcMessage ("You need to specify a FE index when working "
"with hp DoFHandlers"));
Assert (&dof_handler != 0,
@@ -351,10 +328,10 @@ namespace internal
Assert (fe_index < dof_handler.get_fe().size(),
ExcIndexRange (fe_index, 0, dof_handler.get_fe().size()));
Assert (local_index <
- dof_handler.get_fe()[fe_index].template n_dofs_per_object(),
+ dof_handler.get_fe()[fe_index].template n_dofs_per_object(),
ExcIndexRange(local_index, 0,
dof_handler.get_fe()[fe_index]
- .template n_dofs_per_object()));
+ .template n_dofs_per_object()));
Assert (obj_index < dof_offsets.size(),
ExcIndexRange (obj_index, 0, dof_offsets.size()));
@@ -366,62 +343,40 @@ namespace internal
"information for an object on which no such "
"information is available"));
- if (dim == dimm)
- {
- // if we are on a cell, then
- // the only set of indices we
- // store is the one for the
- // cell, which is unique. then
- // fe_index must be
- // active_fe_index
- Assert (fe_index == dof_handler.levels[obj_level]->active_fe_indices[obj_index],
- ExcMessage ("FE index does not match that of the present cell"));
- dofs[dof_offsets[obj_index]+local_index] = global_index;
- }
- else
- {
- // we are in higher space
- // dimensions, so there may
- // be multiple finite
- // elements associated with
- // this object. hop along
- // the list of index sets
- // until we find the one
- // with the correct
- // fe_index, and then poke
- // into that part. trigger
- // an exception if we can't
- // find a set for this
- // particular fe_index
- const types::global_dof_index starting_offset = dof_offsets[obj_index];
- types::global_dof_index *pointer = &dofs[starting_offset];
- while (true)
- {
- Assert (*pointer != numbers::invalid_dof_index,
- ExcInternalError());
- if (*pointer == fe_index)
- {
- *(pointer + 1 + local_index) = global_index;
- return;
- }
- else
- pointer += dof_handler.get_fe()[*pointer]
- .template n_dofs_per_object() + 1;
- }
- }
+ Assert (structdim() + 1;
+ }
}
- template
- template
+ template
+ template
inline
unsigned int
- DoFObjects::
- n_active_fe_indices (const dealii::hp::DoFHandler &dof_handler,
+ DoFObjects::
+ n_active_fe_indices (const dealii::hp::DoFHandler &dof_handler,
const unsigned int obj_index) const
{
- Assert (dim <= dimm, ExcInternalError());
Assert (&dof_handler != 0,
ExcMessage ("No DoFHandler is specified for this iterator"));
Assert (&dof_handler.get_fe() != 0,
@@ -436,57 +391,42 @@ namespace internal
if (dof_offsets[obj_index] == numbers::invalid_dof_index)
return 0;
- // if we are on a cell, then the
- // only set of indices we store
- // is the one for the cell,
- // which is unique
- if (dim == dimm)
- return 1;
- else
- {
- // otherwise, there may be
- // multiple finite elements
- // associated with this
- // object. hop along the
- // list of index sets until
- // we find the one with the
- // correct fe_index, and
- // then poke into that
- // part. trigger an
- // exception if we can't
- // find a set for this
- // particular fe_index
- const unsigned int starting_offset = dof_offsets[obj_index];
- const types::global_dof_index *pointer = &dofs[starting_offset];
- unsigned int counter = 0;
- while (true)
- {
- if (*pointer == numbers::invalid_dof_index)
- // end of list reached
- return counter;
- else
- {
- ++counter;
- pointer += dof_handler.get_fe()[*pointer]
- .template n_dofs_per_object() + 1;
- }
- }
- }
+ Assert (structdim() + 1;
+ }
+ }
}
- template
- template
+ template
+ template
inline
types::global_dof_index
- DoFObjects::
- nth_active_fe_index (const dealii::hp::DoFHandler &dof_handler,
+ DoFObjects::
+ nth_active_fe_index (const dealii::hp::DoFHandler &dof_handler,
const unsigned int obj_level,
const unsigned int obj_index,
const unsigned int n) const
{
- Assert (dim <= dimm, ExcInternalError());
Assert (&dof_handler != 0,
ExcMessage ("No DoFHandler is specified for this iterator"));
Assert (&dof_handler.get_fe() != 0,
@@ -503,65 +443,47 @@ namespace internal
"information for an object on which no such "
"information is available"));
- if (dim == dimm)
- {
- // this is a cell, so there
- // is only a single
- // fe_index
- Assert (n == 0, ExcIndexRange (n, 0, 1));
-
- return dof_handler.levels[obj_level]->active_fe_indices[obj_index];
- }
- else
- {
- Assert (n < n_active_fe_indices(dof_handler, obj_index),
- ExcIndexRange (n, 0,
- n_active_fe_indices(dof_handler, obj_index)));
-
- // we are in higher space
- // dimensions, so there may
- // be multiple finite
- // elements associated with
- // this object. hop along
- // the list of index sets
- // until we find the one
- // with the correct
- // fe_index, and then poke
- // into that part. trigger
- // an exception if we can't
- // find a set for this
- // particular fe_index
- const unsigned int starting_offset = dof_offsets[obj_index];
- const types::global_dof_index *pointer = &dofs[starting_offset];
- unsigned int counter = 0;
- while (true)
- {
- Assert (*pointer != numbers::invalid_dof_index,
- ExcInternalError());
-
- const unsigned int fe_index = *pointer;
-
- Assert (fe_index < dof_handler.get_fe().size(),
- ExcInternalError());
-
- if (counter == n)
- return fe_index;
-
- ++counter;
- pointer += dof_handler.get_fe()[fe_index]
- .template n_dofs_per_object() + 1;
- }
- }
+ Assert (structdim() + 1;
+ }
}
- template
- template
+ template
+ template
inline
bool
- DoFObjects::
- fe_index_is_active (const dealii::hp::DoFHandler &dof_handler,
+ DoFObjects::
+ fe_index_is_active (const dealii::hp::DoFHandler &dof_handler,
const unsigned int obj_index,
const unsigned int fe_index,
const unsigned int obj_level) const
@@ -573,7 +495,7 @@ namespace internal
"this DoFHandler"));
Assert (obj_index < dof_offsets.size(),
ExcIndexRange (obj_index, 0, static_cast(dof_offsets.size())));
- Assert ((fe_index != dealii::hp::DoFHandler::default_fe_index),
+ Assert ((fe_index != dealii::hp::DoFHandler::default_fe_index),
ExcMessage ("You need to specify a FE index when working "
"with hp DoFHandlers"));
Assert (fe_index < dof_handler.get_fe().size(),
@@ -587,47 +509,27 @@ namespace internal
"information for an object on which no such "
"information is available"));
- if (dim == dimm)
- {
- // if we are on a cell,
- // then the only set of
- // indices we store is the
- // one for the cell, which
- // is unique
- Assert (obj_index < dof_handler.levels[obj_level]->active_fe_indices.size(),
- ExcInternalError());
- return (fe_index == dof_handler.levels[obj_level]->active_fe_indices[obj_index]);
- }
- else
- {
- // we are in higher space
- // dimensions, so there may
- // be multiple finite
- // elements associated with
- // this object. hop along
- // the list of index sets
- // until we find the one
- // with the correct
- // fe_index, and then poke
- // into that part. trigger
- // an exception if we can't
- // find a set for this
- // particular fe_index
- const types::global_dof_index starting_offset = dof_offsets[obj_index];
- const types::global_dof_index *pointer = &dofs[starting_offset];
- while (true)
- {
- if (*pointer == numbers::invalid_dof_index)
- // end of list reached
- return false;
- else if (*pointer == fe_index)
- return true;
- else
- pointer += static_cast(
- dof_handler.get_fe()[*pointer]
- .template n_dofs_per_object()+1);
- }
- }
+ Assert (structdim(
+ dof_handler.get_fe()[*pointer]
+ .template n_dofs_per_object()+1);
+ }
}
}
diff --git a/deal.II/include/deal.II/lac/block_vector_base.h b/deal.II/include/deal.II/lac/block_vector_base.h
index c66dfd4730..8991123408 100644
--- a/deal.II/include/deal.II/lac/block_vector_base.h
+++ b/deal.II/include/deal.II/lac/block_vector_base.h
@@ -927,6 +927,29 @@ public:
*/
reference operator[] (const size_type i);
+ /**
+ * A collective get operation: instead
+ * of getting individual elements of a
+ * vector, this function allows to get
+ * a whole set of elements at once. The
+ * indices of the elements to be read
+ * are stated in the first argument,
+ * the corresponding values are returned in the
+ * second.
+ */
+ template
+ void extract_subvector_to (const std::vector &indices,
+ std::vector &values) const;
+
+ /**
+ * Just as the above, but with pointers.
+ * Useful in minimizing copying of data around.
+ */
+ template
+ void extract_subvector_to (ForwardIterator indices_begin,
+ const ForwardIterator indices_end,
+ OutputIterator values_begin) const;
+
/**
* Copy operator: fill all components of
* the vector with the given scalar
@@ -2524,6 +2547,33 @@ BlockVectorBase::operator[] (const size_type i)
return operator()(i);
}
+
+
+template
+template
+inline
+void BlockVectorBase::extract_subvector_to (const std::vector &indices,
+ std::vector &values) const
+{
+ for (size_type i = 0; i < indices.size(); ++i)
+ values[i] = operator()(indices[i]);
+}
+
+
+
+template
+template
+inline
+void BlockVectorBase::extract_subvector_to (ForwardIterator indices_begin,
+ const ForwardIterator indices_end,
+ OutputIterator values_begin) const
+{
+ while (indices_begin != indices_end) {
+ *values_begin = operator()(*indices_begin);
+ indices_begin++; values_begin++;
+ }
+}
+
#endif // DOXYGEN
DEAL_II_NAMESPACE_CLOSE
diff --git a/deal.II/include/deal.II/lac/parallel_vector.h b/deal.II/include/deal.II/lac/parallel_vector.h
index 613fb61ab4..6d966154a0 100644
--- a/deal.II/include/deal.II/lac/parallel_vector.h
+++ b/deal.II/include/deal.II/lac/parallel_vector.h
@@ -571,6 +571,29 @@ namespace parallel
*/
Number &operator [] (const size_type global_index);
+ /**
+ * A collective get operation: instead
+ * of getting individual elements of a
+ * vector, this function allows to get
+ * a whole set of elements at once. The
+ * indices of the elements to be read
+ * are stated in the first argument,
+ * the corresponding values are returned in the
+ * second.
+ */
+ template
+ void extract_subvector_to (const std::vector &indices,
+ std::vector &values) const;
+
+ /**
+ * Just as the above, but with pointers.
+ * Useful in minimizing copying of data around.
+ */
+ template
+ void extract_subvector_to (ForwardIterator indices_begin,
+ const ForwardIterator indices_end,
+ OutputIterator values_begin) const;
+
/**
* Read access to the data field specified by @p local_index. Locally
* owned indices can be accessed with indices
@@ -1585,6 +1608,33 @@ namespace parallel
+ template
+ template
+ inline
+ void Vector::extract_subvector_to (const std::vector &indices,
+ std::vector &values) const
+ {
+ for (size_type i = 0; i < indices.size(); ++i)
+ values[i] = operator()(indices[i]);
+ }
+
+
+
+ template
+ template
+ inline
+ void Vector::extract_subvector_to (ForwardIterator indices_begin,
+ const ForwardIterator indices_end,
+ OutputIterator values_begin) const
+ {
+ while (indices_begin != indices_end) {
+ *values_begin = operator()(*indices_begin);
+ indices_begin++; values_begin++;
+ }
+ }
+
+
+
template
inline
Number
diff --git a/deal.II/include/deal.II/lac/petsc_vector_base.h b/deal.II/include/deal.II/lac/petsc_vector_base.h
index 8d0f7e056f..709e1e6185 100644
--- a/deal.II/include/deal.II/lac/petsc_vector_base.h
+++ b/deal.II/include/deal.II/lac/petsc_vector_base.h
@@ -473,6 +473,28 @@ namespace PETScWrappers
void set (const std::vector &indices,
const std::vector &values);
+ /**
+ * A collective get operation: instead
+ * of getting individual elements of a
+ * vector, this function allows to get
+ * a whole set of elements at once. The
+ * indices of the elements to be read
+ * are stated in the first argument,
+ * the corresponding values are returned in the
+ * second.
+ */
+ void extract_subvector_to (const std::vector &indices,
+ std::vector &values) const;
+
+ /**
+ * Just as the above, but with pointers.
+ * Useful in minimizing copying of data around.
+ */
+ template
+ void extract_subvector_to (const ForwardIterator indices_begin,
+ const ForwardIterator indices_end,
+ OutputIterator values_begin) const;
+
/**
* A collective add operation: This
* function adds a whole set of values
@@ -1222,6 +1244,119 @@ namespace PETScWrappers
return comm;
}
+ inline
+ void VectorBase::extract_subvector_to (const std::vector &indices,
+ std::vector &values) const
+ {
+ extract_subvector_to(&(indices[0]), &(indices[0]) + indices.size(), &(values[0]));
+ }
+
+ template
+ inline
+ void VectorBase::extract_subvector_to (const ForwardIterator indices_begin,
+ const ForwardIterator indices_end,
+ OutputIterator values_begin) const
+ {
+ const PetscInt n_idx = static_cast(indices_end - indices_begin);
+ if (n_idx == 0)
+ return;
+
+ // if we are dealing
+ // with a parallel vector
+ if (ghosted )
+ {
+
+ int ierr;
+
+ // there is the possibility
+ // that the vector has
+ // ghost elements. in that
+ // case, we first need to
+ // figure out which
+ // elements we own locally,
+ // then get a pointer to
+ // the elements that are
+ // stored here (both the
+ // ones we own as well as
+ // the ghost elements). in
+ // this array, the locally
+ // owned elements come
+ // first followed by the
+ // ghost elements whose
+ // position we can get from
+ // an index set
+ PetscInt begin, end, i;
+ ierr = VecGetOwnershipRange (vector, &begin, &end);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ Vec locally_stored_elements = PETSC_NULL;
+ ierr = VecGhostGetLocalForm(vector, &locally_stored_elements);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ PetscInt lsize;
+ ierr = VecGetSize(locally_stored_elements, &lsize);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ PetscScalar *ptr;
+ ierr = VecGetArray(locally_stored_elements, &ptr);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ for (i = 0; i < n_idx; i++) {
+ const unsigned int index = *(indices_begin+i);
+ if ( index>=static_cast(begin)
+ && index(end) )
+ {
+ //local entry
+ *(values_begin+i) = *(ptr+index-begin);
+ }
+ else
+ {
+ //ghost entry
+ const unsigned int ghostidx
+ = ghost_indices.index_within_set(index);
+
+ Assert(ghostidx+end-begin<(unsigned int)lsize, ExcInternalError());
+ *(values_begin+i) = *(ptr+ghostidx+end-begin);
+ }
+ }
+
+ ierr = VecRestoreArray(locally_stored_elements, &ptr);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ ierr = VecGhostRestoreLocalForm(vector, &locally_stored_elements);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ }
+ // if the vector is local or the
+ // caller, then simply access the
+ // element we are interested in
+ else
+ {
+ int ierr;
+
+ PetscInt begin, end;
+ ierr = VecGetOwnershipRange (vector, &begin, &end);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ PetscScalar *ptr;
+ ierr = VecGetArray(vector, &ptr);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ for (PetscInt i = 0; i < n_idx; i++) {
+ const unsigned int index = *(indices_begin+i);
+
+ Assert(index>=static_cast(begin)
+ && index(end), ExcInternalError());
+
+ *(values_begin+i) = *(ptr+index-begin);
+ }
+
+ ierr = VecRestoreArray(vector, &ptr);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ }
+ }
+
#endif // DOXYGEN
}
diff --git a/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h b/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h
index 8349654f99..6f3327f85b 100644
--- a/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h
+++ b/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h
@@ -3448,7 +3448,7 @@ namespace TrilinosWrappers
indices);
for (TrilinosWrappers::types::int_type i=0; i &indices,
+ std::vector &values) const;
+
+ /**
+ * Just as the above, but with pointers.
+ * Useful in minimizing copying of data around.
+ */
+ template
+ void extract_subvector_to (ForwardIterator indices_begin,
+ const ForwardIterator indices_end,
+ OutputIterator values_begin) const;
+
/**
* Return the value of the vector
* entry i. Note that this
@@ -1316,6 +1338,30 @@ namespace TrilinosWrappers
+ inline
+ void VectorBase::extract_subvector_to (const std::vector &indices,
+ std::vector &values) const
+ {
+ for (size_type i = 0; i < indices.size(); ++i)
+ values[i] = operator()(indices[i]);
+ }
+
+
+
+ template
+ inline
+ void VectorBase::extract_subvector_to (ForwardIterator indices_begin,
+ const ForwardIterator indices_end,
+ OutputIterator values_begin) const
+ {
+ while (indices_begin != indices_end) {
+ *values_begin = operator()(*indices_begin);
+ indices_begin++; values_begin++;
+ }
+ }
+
+
+
inline
VectorBase::iterator
VectorBase::begin()
diff --git a/deal.II/include/deal.II/lac/vector.h b/deal.II/include/deal.II/lac/vector.h
index 6866873d44..fb67e97c1c 100644
--- a/deal.II/include/deal.II/lac/vector.h
+++ b/deal.II/include/deal.II/lac/vector.h
@@ -717,6 +717,29 @@ public:
* Exactly the same as operator().
*/
Number &operator[] (const size_type i);
+
+ /**
+ * A collective get operation: instead
+ * of getting individual elements of a
+ * vector, this function allows to get
+ * a whole set of elements at once. The
+ * indices of the elements to be read
+ * are stated in the first argument,
+ * the corresponding values are returned in the
+ * second.
+ */
+ template
+ void extract_subvector_to (const std::vector &indices,
+ std::vector &values) const;
+
+ /**
+ * Just as the above, but with pointers.
+ * Useful in minimizing copying of data around.
+ */
+ template
+ void extract_subvector_to (ForwardIterator indices_begin,
+ const ForwardIterator indices_end,
+ OutputIterator values_begin) const;
//@}
@@ -1324,6 +1347,33 @@ Number &Vector::operator[] (const size_type i)
+template
+template
+inline
+void Vector::extract_subvector_to (const std::vector &indices,
+ std::vector &values) const
+{
+ for (size_type i = 0; i < indices.size(); ++i)
+ values[i] = operator()(indices[i]);
+}
+
+
+
+template
+template
+inline
+void Vector::extract_subvector_to (ForwardIterator indices_begin,
+ const ForwardIterator indices_end,
+ OutputIterator values_begin) const
+{
+ while (indices_begin != indices_end) {
+ *values_begin = operator()(*indices_begin);
+ indices_begin++; values_begin++;
+ }
+}
+
+
+
template
inline
Vector &
diff --git a/deal.II/source/distributed/tria.cc b/deal.II/source/distributed/tria.cc
index 3613f5b7ca..d4b8767c96 100644
--- a/deal.II/source/distributed/tria.cc
+++ b/deal.II/source/distributed/tria.cc
@@ -3185,7 +3185,7 @@ namespace parallel
template
- const std::vector &
+ const std::vector &
Triangulation::get_p4est_tree_to_coarse_cell_permutation() const
{
return p4est_tree_to_coarse_cell_permutation;
diff --git a/deal.II/source/dofs/dof_renumbering.cc b/deal.II/source/dofs/dof_renumbering.cc
index 52646d1be3..2bc1ed384c 100644
--- a/deal.II/source/dofs/dof_renumbering.cc
+++ b/deal.II/source/dofs/dof_renumbering.cc
@@ -1200,11 +1200,11 @@ namespace DoFRenumbering
if (tria)
{
#ifdef DEAL_II_WITH_P4EST
- //this is a distributed Triangulation. We need to traverse the coarse
- //cells in the order p4est does
+ // this is a distributed Triangulation. We need to traverse the coarse
+ // cells in the order p4est does
for (unsigned int c = 0; c < tria->n_cells (0); ++c)
{
- unsigned int coarse_cell_index =
+ const unsigned int coarse_cell_index =
tria->get_p4est_tree_to_coarse_cell_permutation() [c];
const typename DoFHandler::level_cell_iterator
diff --git a/deal.II/source/hp/dof_handler.cc b/deal.II/source/hp/dof_handler.cc
index 205071413e..8a32c2022a 100644
--- a/deal.II/source/hp/dof_handler.cc
+++ b/deal.II/source/hp/dof_handler.cc
@@ -542,7 +542,8 @@ namespace internal
std::vector >
active_fe_backup(dof_handler.levels.size ());
for (unsigned int level = 0; levelactive_fe_indices, active_fe_backup[level]);
+ std::swap (dof_handler.levels[level]->active_fe_indices,
+ active_fe_backup[level]);
// delete all levels and set them up
// newly, since vectors are
@@ -576,10 +577,10 @@ namespace internal
// finite element is used for it
for (unsigned int level=0; leveln_levels(); ++level)
{
- dof_handler.levels[level]->dof_object.dof_offsets
+ dof_handler.levels[level]->dof_offsets
= std::vector::size_type> (
dof_handler.tria->n_raw_lines(level),
- DoFHandler::invalid_dof_index);
+ (std::vector::size_type)(-1));
types::global_dof_index next_free_dof = 0;
for (typename DoFHandler::active_cell_iterator
@@ -587,11 +588,11 @@ namespace internal
cell!=dof_handler.end_active(level); ++cell)
if (!cell->has_children())
{
- dof_handler.levels[level]->dof_object.dof_offsets[cell->index()] = next_free_dof;
+ dof_handler.levels[level]->dof_offsets[cell->index()] = next_free_dof;
next_free_dof += cell->get_fe().dofs_per_line;
}
- dof_handler.levels[level]->dof_object.dofs
+ dof_handler.levels[level]->dofs
= std::vector (next_free_dof,
DoFHandler::invalid_dof_index);
}
@@ -613,12 +614,12 @@ namespace internal
if (!cell->has_children())
counter += cell->get_fe().dofs_per_line;
- Assert (dof_handler.levels[level]->dof_object.dofs.size() == counter,
+ Assert (dof_handler.levels[level]->dofs.size() == counter,
ExcInternalError());
Assert (static_cast
- (std::count (dof_handler.levels[level]->dof_object.dof_offsets.begin(),
- dof_handler.levels[level]->dof_object.dof_offsets.end(),
- DoFHandler::invalid_dof_index))
+ (std::count (dof_handler.levels[level]->dof_offsets.begin(),
+ dof_handler.levels[level]->dof_offsets.end(),
+ (std::vector::size_type)(-1)))
==
dof_handler.tria->n_raw_lines(level) - dof_handler.tria->n_active_lines(level),
ExcInternalError());
@@ -693,10 +694,10 @@ namespace internal
// finite element is used for it
for (unsigned int level=0; leveln_levels(); ++level)
{
- dof_handler.levels[level]->dof_object.dof_offsets
+ dof_handler.levels[level]->dof_offsets
= std::vector::size_type> (
dof_handler.tria->n_raw_quads(level),
- DoFHandler::invalid_dof_index);
+ (std::vector