# A macro for the inst.in file expansion
#
# Usage:
-# EXPAND_INSTANTATIONS(target inst_in_files)
+# expand_instantiations(target inst_in_files)
#
# Options:
#
endif()
#
- # Clang-14.0.5 complaines loudly about not being able to vectorize some
+ # Clang-14.0.5 complains loudly about not being able to vectorize some
# of our loops that we have annotated with DEAL_II_OPENMP_SIMD:
#
# warning: loop not vectorized: the optimizer was unable to perform
plt.show()
@endcode
-A python package which mimicks the `R` ggplot2 (which is based on specifying the grammar of the graphics) is
+A python package which mimics the `R` ggplot2 (which is based on specifying the grammar of the graphics) is
<a href="https://plotnine.org/">plotnine</a>.
@code{.py}
We need to import the following from the <code>plotnine</code> package
// columns in the local matrix and putting the entry 1 in the <i>i</i>th
// slot and a zero entry in all other slots, i.e., we apply the cell-wise
// differential operator on one unit vector at a time. The inner part
- // invoking FEEvaluation::evaluate, the loop over quadrature points, and
- // FEEvalution::integrate, is exactly the same as in the local_apply
+ // invoking FEEvaluation::evaluate(), the loop over quadrature points, and
+ // FEEvaluation::integrate(), is exactly the same as in the local_apply
// function. Afterwards, we pick out the <i>i</i>th entry of the local
// result and put it to a temporary storage (as we overwrite all entries in
// the array behind FEEvaluation::get_dof_value() with the next loop
in functionality to what PETSc provides, but it does so in a very different
way (namely, as a bunch of independent and loosely coupled sub-projects,
rather than as a single library). This nothwithstanding, the classes
- deal.II provides in namepace TrilinosWrappers are very similar to the
+ deal.II provides in namespace TrilinosWrappers are very similar to the
ones in namespace PETScWrappers. Trilinos, like PETSc, is run on some of
the biggest machines in the world.
- The classes in namespace TrilinosWrappers are generally written for the
function concept, p-multigrid, and traditional h-multigrid. The main
ingredient is to define an appropriate MGTwoLevelTransfer object and call
MGTwoLevelTransfer::reinit_geometric_transfer() or
-MGTwoLevelTranfer::reinit_polynomial_transfer(), respectively.
+MGTwoLevelTransfer::reinit_polynomial_transfer(), respectively.
}
- // @sect3{<code>AdvectionProlem</code> class}
+ // @sect3{<code>AdvectionProblem</code> class}
// This is the main class of the program, and should look very similar to
// step-16. The major difference is that, since we are defining our multigrid
// differentiable number type is hard-coded here, but with some clever
// templating it is possible to select which framework to use at run time
// (e.g., as selected through the parameter file). We'll simultaneously
- // perform the experiments with the counterpary material law that was
+ // perform the experiments with the counterpart material law that was
// fully implemented by hand, and check what it computes against our
// assisted implementation.
{
dim, DataComponentInterpretation::component_is_part_of_vector);
pcout << " - write data (background mesh)" << std::endl;
DataOut<dim> data_out_background;
- DataOutBase::VtkFlags flags_backround;
- flags_backround.write_higher_order_cells = true;
- data_out_background.set_flags(flags_backround);
+ DataOutBase::VtkFlags flags_background;
+ flags_background.write_higher_order_cells = true;
+ data_out_background.set_flags(flags_background);
data_out_background.add_data_vector(
dof_handler_background,
force_vector,
/**
* Return an ArborX::nearest(ArborX::Sphere,
- * SphereNearestPredicate::get_n_nearest_neightbors) object constructed from
+ * SphereNearestPredicate::get_n_nearest_neighbors) object constructed from
* the `i`th sphere stored in @p sph_nearest.
*/
static auto
eigen_system = eigenvectors(original_tensor);
std::pair<SymmetricTensor<2, dim, Number>, SymmetricTensor<2, dim, Number>>
- postive_negative_tensors;
+ positive_negative_tensors;
- auto &[positive_part_tensor, negative_part_tensor] = postive_negative_tensors;
+ auto &[positive_part_tensor, negative_part_tensor] =
+ positive_negative_tensors;
positive_part_tensor = 0;
for (unsigned int i = 0; i < dim; ++i)
symmetrize(outer_product(eigen_system[i].second,
eigen_system[i].second));
- return postive_negative_tensors;
+ return positive_negative_tensors;
}
/**
~InternalData() override;
/**
- * Give write-access to the pointer to a @p InternalData of the @p
- * base_noth base element.
+ * Give write-access to the pointer to a @p InternalData of the
+ * `base_no`th base element.
*/
void
set_fe_data(
std::unique_ptr<typename FiniteElement<dim, spacedim>::InternalDataBase>);
/**
- * Give read-access to the pointer to a @p InternalData of the @p
- * base_noth base element.
+ * Give read-access to the pointer to a @p InternalData of the
+ * `base_no`th base element.
*/
typename FiniteElement<dim, spacedim>::InternalDataBase &
get_fe_data(const unsigned int base_no) const;
* @p grid_generator_function_arguments.
*
* The string that supplies the arguments is passed to the function
- * Patterns::Tools::Convert<TupleTyple>::to_value(), where `TupleType` here is
+ * Patterns::Tools::Convert<TupleType>::to_value(), where `TupleType` here is
* a tuple containing **all** the arguments of the GridGenerator function,
* including all optional arguments.
*
restore();
/**
- * Differential restore. Performs the @p step_noth local refinement and
+ * Differential restore. Performs the `step_no`th local refinement and
* coarsening step. Step 0 stands for the copying of the coarse grid.
*
* This function will only succeed if the triangulation is in just the state
*
* @ingroup Exceptions
*/
- DeclException1(ExcLineInexistant,
+ DeclException1(ExcLineInexistent,
size_type,
<< "The specified line " << arg1 << " does not exist.");
/**
{
Assert(sorted == false, ExcMatrixIsClosed());
Assert(is_constrained(constrained_dof_index),
- ExcLineInexistant(constrained_dof_index));
+ ExcLineInexistent(constrained_dof_index));
ConstraintLine &line =
lines[lines_cache[calculate_line_index(constrained_dof_index)]];
* Internally, the minimum and maximum eigenvalues of the preconditioned
* system are estimated by an eigenvalue algorithm, and the resulting estimate
* is multiplied by the 1.2 for safety reasons. For more details on the
- * unterlying algorithms, see PreconditionChebyshev.
+ * underlying algorithms, see PreconditionChebyshev.
*/
template <typename MatrixType = SparseMatrix<double>,
typename PreconditionerType = IdentityMatrix>
// Communicate the vector to the correct map.
// Remark: We use here doImport on an Export object since we have to use
- // the communication plan stored in the tpetra_comm_patern backward.
+ // the communication plan stored in the tpetra_comm_pattern
+ // backward.
target_vector.doImport(vector, tpetra_export, Tpetra::INSERT);
# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
/*
* Exception
*/
- DeclExceptionMsg(ExcColMapMissmatch,
+ DeclExceptionMsg(ExcColMapMismatch,
"The column partitioning of a matrix does not match "
"the partitioning of a vector you are trying to "
"multiply it with. Are you multiplying the "
/*
* Exception
*/
- DeclExceptionMsg(ExcDomainMapMissmatch,
+ DeclExceptionMsg(ExcDomainMapMismatch,
"The row partitioning of a matrix does not match "
"the partitioning of a vector you are trying to "
"put the result of a matrix-vector product in. "
{
Assert(src.trilinos_vector().getMap()->isSameAs(
*M.trilinos_matrix().getDomainMap()),
- SparseMatrix<double>::ExcColMapMissmatch());
+ SparseMatrix<double>::ExcColMapMismatch());
Assert(dst.trilinos_vector().getMap()->isSameAs(
*M.trilinos_matrix().getRangeMap()),
- SparseMatrix<double>::ExcDomainMapMissmatch());
+ SparseMatrix<double>::ExcDomainMapMismatch());
}
else
{
Assert(dst.trilinos_vector().getMap()->isSameAs(
*M.trilinos_matrix().getDomainMap()),
- SparseMatrix<double>::ExcColMapMissmatch());
+ SparseMatrix<double>::ExcColMapMismatch());
Assert(src.trilinos_vector().getMap()->isSameAs(
*M.trilinos_matrix().getRangeMap()),
- SparseMatrix<double>::ExcDomainMapMissmatch());
+ SparseMatrix<double>::ExcDomainMapMismatch());
}
M.trilinos_matrix().apply(
const StridedArrayView<const ScalarNumber, stride_view> &solution_values,
const EvaluationFlags::EvaluationFlags &evaluation_flags)
{
- Assert(this->is_reinitialized, ExcMessage("Is not reinitalized!"));
+ Assert(this->is_reinitialized, ExcMessage("Is not reinitialized!"));
if (this->n_q_points == 0)
return;
const EvaluationFlags::EvaluationFlags &integration_flags,
const bool sum_into_values)
{
- Assert(this->is_reinitialized, ExcMessage("Is not reinitalized!"));
+ Assert(this->is_reinitialized, ExcMessage("Is not reinitialized!"));
Assert(!(integration_flags & EvaluationFlags::hessians), ExcNotImplemented());
const EvaluationFlags::EvaluationFlags &integration_flags,
const bool sum_into_values)
{
- Assert(this->is_reinitialized, ExcMessage("Is not reinitalized!"));
+ Assert(this->is_reinitialized, ExcMessage("Is not reinitialized!"));
Assert(!(integration_flags & EvaluationFlags::hessians), ExcNotImplemented());
{
// Support points have a hierarchic numbering, L2 DoFs have
// lexicographic numbering. Therefore, we need to convert the DoF
- // indices if DoFHander is L2 conforming and has degree > 0.
+ // indices if DoFHandler is L2 conforming and has degree > 0.
const bool needs_conversion =
dof_handler.get_fe().conforming_space ==
FiniteElementData<dim>::Conformity::L2 &&
!additional_data.enforce_all_points_found || rpe.all_points_found(),
ExcMessage(
"You requested that all points should be found, but this didn'thappen."
- " You can change this option through the AdditionaData struct in the constructor."));
+ " You can change this option through the AdditionalData struct in the constructor."));
// set up MappingInfo for easier data access
mapping_info = internal::fill_mapping_info<dim, Number>(rpe);
* flag). In practice, this is rarely the case because two triangulations,
* partitioned in their own ways, will not typically have corresponding
* cells owned by the same process, and implementing the interpolation
- * procedure would require transfering data between processes in ways
+ * procedure would require transferring data between processes in ways
* that are difficult to implement efficiently. However, some special
* cases can more easily be implemented, namely the case where one
* of the meshes is strictly coarser or finer than the other. For these
// an exception and therefore call_and_possibly_capture_exception
// returns code different from 0, then NOX does not interrupt the
// solution process but rather performs a recovery step. To ensure this
- // feature is available to the user, we need to supress the exception in
- // this case, since it is exactly that, what NOX expects from our
+ // feature is available to the user, we need to suppress the exception
+ // in this case, since it is exactly that, what NOX expects from our
// callbacks.
const bool do_rescue =
parameters->sublist("Newton").get("Rescue Bad Newton Solve", true);
const typename dealii::Triangulation<dim,
spacedim>::cell_iterator
neighbor_cell_at_face = cell->neighbor(f);
- const CellId neigbor_cell_id = neighbor_cell_at_face->id();
+ const CellId neighbor_cell_id = neighbor_cell_at_face->id();
// Only fix sign if the orientation is opposite and only do so
// on the face dofs on the cell with smaller cell_id.
- if (((nn + f) % 2 == 0) && this_cell_id < neigbor_cell_id)
+ if (((nn + f) % 2 == 0) && this_cell_id < neighbor_cell_id)
for (unsigned int j = 0; j < fe.n_dofs_per_face(f); ++j)
{
const unsigned int cell_j = fe.face_to_cell_index(j, f);
// For the tetrahedron the parent consists of the vertices
// 0,1,2,3, the new vertices 4-9 are defined as the
- // midpoints fo the edges: 4 -> (0,1), 5 -> (1,2), 6 ->
+ // midpoints of the edges: 4 -> (0,1), 5 -> (1,2), 6 ->
// (2,0), 7 -> (0,3), 8 -> (1,3), 9 -> (2,3).
// Order is defined by the reference cell, see
// https://dealii.org/developer/doxygen/deal.II/group__simplex.html#simplex_reference_cells.
// The order of the lines is defined by the ordering
// of the faces of the reference cell and the ordering
// of the lines within a face.
- // Each face is split into 4 child triangels, the
+ // Each face is split into 4 child triangles, the
// relevant lines are defined by the vertices of the
// center triangles: 0 -> (4,5), 1 -> (5,6), 2 -> (4,6),
// 3 -> (4,7), 4 -> (7,8), 5 -> (4,8), 6 -> (6,9), 7 ->
// 8 child tets. To build the child tets, 8 new faces are
// needed. The the vertices, which define the lines of these
// new faces are listed in table_tet. Now only the
- // coresponding index of the lines and quads have to be
+ // corresponding index of the lines and quads have to be
// listed in new_quad_lines_tet and cell_quads_tet.
// The first 4 define the faces which cut off the
{
// list of the indices of the surfaces which define the
// 8 new tets. the indices 0-7 are the new quads defined
- // above (so 0-3 cut off the corners and 4-7 sperate the
- // remaining octahedral), the indices between 8-11 are
- // the children of the first face, from 12-15 of the
+ // above (so 0-3 cut off the corners and 4-7 separate
+ // the remaining octahedral), the indices between 8-11
+ // are the children of the first face, from 12-15 of the
// second, etc.
for (unsigned int i = 0; i < n_new_quads; ++i)
quad_indices[i] = new_quads[i]->index();
// cell has been refined, all of its children have neighbors
// in all directions in which the parent cell has neighbors as
// well. The children's neighbors are either the parent
- // neighbor or the parent neigbor's children, or simply one of
+ // neighbor or the parent neighbor's children, or simply one of
// the other children of the current cell. This check is
// useful because if one creates a triangulation with an
// inconsistently ordered set of cells (e.g., because one has
<< hanging_nodes_only.n_constraints() << std::endl
<< " Total number of constraints: "
<< test_all_constraints.n_constraints() << std::endl
- << " Number of inhomogenous constraints: "
+ << " Number of inhomogeneous constraints: "
<< test_all_constraints.n_inhomogeneities() << std::endl
<< " Number of identity constraints: "
<< test_all_constraints.n_identities() << std::endl;
DEAL:2d:: Number of degrees of freedom: 864
DEAL:2d:: Number of hanging node constraints: 0
DEAL:2d:: Total number of constraints: 192
-DEAL:2d:: Number of inhomogenous constraints: 188
+DEAL:2d:: Number of inhomogeneous constraints: 188
DEAL:2d:: Number of identity constraints: 0
DEAL:2d:: Reference matrix nonzeros: 12672, actually: 8480
DEAL:2d:: Test matrix 1 nonzeros: 12672, actually: 8480
DEAL:2d:: Number of degrees of freedom: 1588
DEAL:2d:: Number of hanging node constraints: 36
DEAL:2d:: Total number of constraints: 284
-DEAL:2d:: Number of inhomogenous constraints: 254
+DEAL:2d:: Number of inhomogeneous constraints: 254
DEAL:2d:: Number of identity constraints: 12
DEAL:2d:: Reference matrix nonzeros: 24092, actually: 17520
DEAL:2d:: Test matrix 1 nonzeros: 24092, actually: 17520
DEAL:2d:: Number of degrees of freedom: 2982
DEAL:2d:: Number of hanging node constraints: 108
DEAL:2d:: Total number of constraints: 408
-DEAL:2d:: Number of inhomogenous constraints: 314
+DEAL:2d:: Number of inhomogeneous constraints: 314
DEAL:2d:: Number of identity constraints: 36
DEAL:2d:: Reference matrix nonzeros: 46420, actually: 36248
DEAL:2d:: Test matrix 1 nonzeros: 46420, actually: 36248
DEAL:3d:: Number of degrees of freedom: 27
DEAL:3d:: Number of hanging node constraints: 0
DEAL:3d:: Total number of constraints: 26
-DEAL:3d:: Number of inhomogenous constraints: 25
+DEAL:3d:: Number of inhomogeneous constraints: 25
DEAL:3d:: Number of identity constraints: 0
DEAL:3d:: Reference matrix nonzeros: 343, actually: 27
DEAL:3d:: Test matrix 1 nonzeros: 343, actually: 27
DEAL:3d:: Number of degrees of freedom: 60
DEAL:3d:: Number of hanging node constraints: 16
DEAL:3d:: Total number of constraints: 56
-DEAL:3d:: Number of inhomogenous constraints: 55
+DEAL:3d:: Number of inhomogeneous constraints: 55
DEAL:3d:: Number of identity constraints: 0
DEAL:3d:: Reference matrix nonzeros: 1006, actually: 66
DEAL:3d:: Test matrix 1 nonzeros: 1006, actually: 66
DEAL:3d:: Number of degrees of freedom: 189
DEAL:3d:: Number of hanging node constraints: 55
DEAL:3d:: Total number of constraints: 158
-DEAL:3d:: Number of inhomogenous constraints: 148
+DEAL:3d:: Number of inhomogeneous constraints: 148
DEAL:3d:: Number of identity constraints: 0
DEAL:3d:: Reference matrix nonzeros: 3767, actually: 415
DEAL:3d:: Test matrix 1 nonzeros: 3767, actually: 415
// check ghost handling on parallel block vectors for large
-// number of blocks with split compres_start()/update_ghosts_start().
+// number of blocks with split compress_start()/update_ghosts_start().
// almost copy-paste of parallel_block_vector_02.cc
#include <deal.II/base/index_set.h>