/**
* Compare two ArrayView objects of the same type. Two objects are considered
* equal if they have the same size and the same starting pointer.
- * This version always comapres with the non-const value_type.
+ * This version always compares with the non-const value_type.
*/
bool operator !=
(const ArrayView<typename std::remove_cv<value_type>::type> &other_view) const;
* selected using the extensions of the files themselves. This can be either
* `prm` or `xml` for input, and `prm`, `xml`, or `tex/latex` for output. If
* the output format is `prm`, then `output_style_for_prm_format` is used to
- * decide wether we write the full documentation as well, or only the
+ * decide whether we write the full documentation as well, or only the
* parameters.
*
* If the input file does not exist, a default one with the same name is created
DEAL_II_NAMESPACE_CLOSE
#endif
-
* have to be entered in the form <code>key: value</code>. In other words, a
* map is described in the form <code>key1: value1, key2: value2, key3:
* value3, ...</code>. Two constructor arguments allow to choose a delimiter
- * between pairs other than the comma, and a delimeter between key and value
+ * between pairs other than the comma, and a delimiter between key and value
* other than colon.
*
* With two additional parameters, the number of elements this list has to
/**
* Converter class. This class is used to generate strings and Patterns
* associated to the given type, and to convert from a string to the given
- * type and viceversa.
+ * type and vice versa.
*
* The second template parameter is used internally to allow for advanced
* SFINAE (substitution failure is not an error) tricks used to specialise
* A class has Rank equal to the number of different separators
* that are required to uniquely identify its element(s) in a string.
*
- * This class is used to detect wether the class T is compatible
+ * This class is used to detect whether the class T is compatible
* with a Patterns::List pattern or with a Patterns::Map pattern.
*
* Objects like Point() or std::complex<double> are vector-likes, and
else
is >> value;
- // If someone passes "123 abc" to the function, the method yelds an
+ // If someone passes "123 abc" to the function, the method yields an
// integer 123 alright, but the space terminates the read from the string
// although there is more to come. This case, however, is checked for in
// the call p->match(s) at the beginning of this function, and would
* thesis of Sabine Zaglmayr.
*
* This class was written based upon the existing deal.II Legendre class as a
- * base, but with the coefficents adjusted so that the recursive formula is for
+ * base, but with the coefficients adjusted so that the recursive formula is for
* the integrated Legendre polynomials described in the PhD thesis of Sabine
* Zaglmayr. The polynomials can be generated recursively from:
*
unsigned int offset;
/**
- * A pointer to the CellDataStorage class whose data will be transfered.
+ * A pointer to the CellDataStorage class whose data will be transferred.
*/
CellDataStorage<CellIteratorType,DataType> *data_storage;
Assert(it != map.end(), ExcMessage("Could not find QP data for the cell"));
// Cast base class to the desired class. This has to be done irrespectively of
- // T==DataType as we need to return shapred_ptr<const T> to make sure the user
+ // T==DataType as we need to return shared_ptr<const T> to make sure the user
// does not modify the content of QP objects
std::vector<std::shared_ptr<const T>> res (it->second.size());
for (unsigned int q = 0; q < res.size(); q++)
/**
- * Specializatin of split_string_list() for the case where the delimiter
+ * Specialization of split_string_list() for the case where the delimiter
* is a single char.
*/
std::vector<std::string>
* the embedding space of the second ChartManifold. If the first
* ChartManifold is periodic, so is the resulting ChartManifold, with
* the same periodicity. Periodicity on the second ChartManifold is not
- * allowed, and the constructor will throw an axception if the second
+ * allowed, and the constructor will throw an exception if the second
* Manifold is periodic.
*
* This class only works for dim <= chartdim <= intermediate_spacedim
/**
* Push forward the chartdim dimensional point to a spacedim
* Euclidean point. The function calls first the push_forward() of
- * F, and then the push_foward() of G.
+ * F, and then the push_forward() of G.
*/
virtual
Point<spacedim>
* Since one often only has some iterator and wants to set a filtered iterator
* to the first one that satisfies a predicate (for example, the first one for
* which the user flag is set, or the first one with a given subdomain id),
- * there are assignement functions #set_to_next_positive and
+ * there are assignment functions #set_to_next_positive and
* #set_to_previous_positive that assign the next or last previous iterator
* that satisfies the predicate, i.e. they follow the list of iterators in
* either direction until they find a matching one (or the past-the-end
/**
* Constructor. Set the iterator to the default state and use the given
- * predicate for filtering subsequent assignement and iteration.
+ * predicate for filtering subsequent assignment and iteration.
*/
template <typename Predicate>
FilteredIterator (Predicate p);
{
namespace FilteredIterator
{
- // The following classes create a nested sequencee of
+ // The following classes create a nested sequence of
// FilteredIterator<FilteredIterator<...<BaseIterator>...>> with as many
// levels of FilteredIterator classes as there are elements in the TypeList
// if the latter is given as a std::tuple<Args...>.
* duplicated vertices will be removed if their distance is lower
* than @p tol.
*
- * Only the elements compatible with the given dimension and spacedimension
+ * Only the elements compatible with the given dimension and space dimension
* will be extracted from the mesh, and only those elements that are
* compatible with deal.II are supported. If you set
* `ignore_unsupported_element_types`, all the other element types are simply
* extracted. The resulting mesh (as represented in the Triangulation object)
* may not make any sense if you are mixing compatible and incompatible
* element types. If `ignore_unsupported_element_types` is set to `false`,
- * then an exception is thrown when an unsupporte type is encountered.
+ * then an exception is thrown when an unsupported type is encountered.
*
* @param filename The file to read from
* @param mesh_index Index of the mesh within the file
* If @p points[a] and @p points[b] are the only two points that fall in @p cells[c],
* then @p qpoints[c][0] and @p qpoints[c][1] are the reference positions of
* @p points[a] and @p points[b] in @p cells[c], and @p indices[c][0] = a,
- * @p indices[c][1] = b. The function Mapping::tansform_unit_to_real(qpoints[c][0])
+ * @p indices[c][1] = b. The function Mapping::transform_unit_to_real(qpoints[c][0])
* returns @p points[a].
*
* The algorithm assumes it's easier to look for a point in the cell that was used previously.
* GridTools::find_cells_adjacent_to_vertex(). Lastly, for each of these cells, the
* function tests whether the point is inside. This check is performed using
* the given @p mapping argument to determine whether cells have straight
- * or curved boundarys, and if the latter then how exactly they are curved.
+ * or curved boundaries, and if the latter then how exactly they are curved.
*
* If a point lies on the boundary of two or more cells, then the algorithm
* tries to identify the cell that is of highest refinement level.
* If the minimal distance between the enclosing sphere of the an
* active cell and the enclosing sphere of any of the cells for which
* the @p predicate returns @p true is less than @p layer_thickness,
- * then the active cell is an \a active_cell_wthin_distance.
+ * then the active cell is an \a active_cell_within_distance.
* @return A list of active cells within a given geometric distance
* @p layer_thickness from the set of active cells for which the @p predicate
* returns @p true.
* valued DoFs of the first face should be modified prior to constraining
* to the DoFs of the second face.
*
- * The rotation matrix is used in DoFTools::make_periodicity_constriants()
+ * The rotation matrix is used in DoFTools::make_periodicity_constraints()
* by applying the rotation to all vector valued blocks listed in the
* parameter @p first_vector_components of the finite element space. For
* more details see DoFTools::make_periodicity_constraints() and the
* see the documentation of orthogonal_equality() for further details.
*
* The @p direction refers to the space direction in which periodicity is
- * enforced. When maching periodic faces this vector component is ignored.
+ * enforced. When matching periodic faces this vector component is ignored.
*
* The @p offset is a vector tangential to the faces that is added to the
* location of vertices of the 'first' boundary when attempting to match
* DoFAccessor::set_active_fe_index() on ghost cells. Rather, the
* @p unpack function directly accesses internal data structures. But
* you get the idea -- the code could, just as well, have exchanged
- * material ids, user indices, boundary indictors, or any kind of other
+ * material ids, user indices, boundary indicators, or any kind of other
* data with similar calls as the ones above.)
*/
template <typename DataType, typename MeshType>
* intermediate points. Internally the
* Manifold::get_intermediate_point() calls the
* Manifold::project_to_manifold() function after computing the convex
- * conbination of the given points. This allows derived classes to
+ * combination of the given points. This allows derived classes to
* only overload Manifold::project_to_manifold() for simple
* situations. This is often useful when describing manifolds that are
* embedded in higher dimensional space, e.g., the surface of a
* and $\mathbf x_2$, with weights $w_1$ and $w_2=1-w_1$, simply
* requires computing the point $\mathbf s(w_1)$. Computing a new
* point as a weighted average of more than two points can be done
- * by considering pairwise geodetics, finding suitable points on
+ * by considering pairwise geodesics, finding suitable points on
* the geodetic between the first two points, then on the geodetic
* between this new point and the third given point, etc.
*
* $\mathbf s'(0) \approx \frac{\mathbf s(\epsilon)-\mathbf x_1}{\epsilon}$
* for a small value of $\epsilon$, and the evaluation of $\mathbf s(\epsilon)$
* is done by calling get_new_point(). If possible, derived classes should
- * override this function by an implemention of the exact derivative.
+ * override this function by an implementation of the exact derivative.
*
* @param x1 The first point that describes the geodesic, and the one
* at which the "direction" is to be evaluated.
* with a straight line in polar coordinates would take the long road
* around the globe, without passing through the north pole.
*
- * These two points would be connented (using a PolarManifold) by the curve
+ * These two points would be connected (using a PolarManifold) by the curve
* @f{align*}{
* s: [0,1] & \rightarrow & \mathbb S^3 \\
* t & \mapsto & (1,\pi/3,0) + (0,0,t\pi)
* load_refine_flags functions. Normally, the code will look like this:
* @code
* // open output file
- * ofstream history("mesh.history");
+ * std::ofstream history("mesh.history");
* // do 10 refinement steps
* for (unsigned int step=0; step<10; ++step)
* {
* If you want to re-create the grid from the stored information, you write:
* @code
* // open input file
- * ifstream history("mesh.history");
+ * std::ifstream history("mesh.history");
* // do 10 refinement steps
* for (unsigned int step=0; step<10; ++step)
* {
static const unsigned int space_dimension = spacedim;
/**
- * Dimensionality of the object that the thing represented by this accessopr
+ * Dimensionality of the object that the thing represented by this accessor
* is part of. For example, if this accessor represents a line that is part
* of a hexahedron, then this value will be three.
*/
quad (const unsigned int i);
/**
- * Quad index of the @p ith quad bounding this object. Throws an excption.
+ * Quad index of the @p ith quad bounding this object. Throws an exception.
*/
static unsigned int quad_index (const unsigned int i);
* <tt>boundary_id()</tt> function.
*
* @warning You should never set the boundary indicator of an interior face
- * (a face not at the boundary of the domain), or set set the boundary
+ * (a face not at the boundary of the domain), or set the boundary
* indicator of an exterior face to numbers::internal_face_boundary_id (this
* value is reserved for another purpose). Algorithms may not work or
* produce very confusing results if boundary cells have a boundary
FECollection (FECollection<dim,spacedim> &&fe_collection) = default;
/**
- * Move assignement operator.
+ * Move assignment operator.
*/
FECollection<dim, spacedim> &
operator= (FECollection<dim,spacedim> &&fe_collection) = default;
virtual void equ(const Number a, const VectorSpaceVector<Number> &V) override;
/**
- * Return wether the vector contains only elements with value zero.
+ * Return whether the vector contains only elements with value zero.
*/
virtual bool all_zero() const override;
/**
* Return an index set that describes which elements of this vector are
* owned by the current processor. As a consequence, the index sets
- * returned on different procesors if this is a distributed vector will
+ * returned on different processors if this is a distributed vector will
* form disjoint sets that add up to the complete index set. Obviously, if
* a vector is created on only one processor, then the result would
* satisfy
/**
* Return an index set that describes which elements of this vector are
* owned by the current processor. As a consequence, the index sets
- * returned on different procesors if this is a distributed vector will
+ * returned on different processors if this is a distributed vector will
* form disjoint sets that add up to the complete index set. Obviously, if
* a vector is created on only one processor, then the result would
* satisfy
virtual void equ(const Number a, const VectorSpaceVector<Number> &V) override;
/**
- * Return wether the vector contains only elements with value zero.
+ * Return whether the vector contains only elements with value zero.
*/
virtual bool all_zero() const override;
/**
* Return an index set that describes which elements of this vector are
* owned by the current processor. As a consequence, the index sets
- * returned on different procesors if this is a distributed vector will
+ * returned on different processors if this is a distributed vector will
* form disjoint sets that add up to the complete index set. Obviously, if
* a vector is created on only one processor, then the result would
* satisfy
/**
* Scale each element of this vector by the corresponding element in the
* argument. This function is mostly meant to simulate multiplication
- * (and immediate re-assignement) by a diagonal scaling matrix. The
+ * (and immediate re-assignment) by a diagonal scaling matrix. The
* vectors need to have the same layout.
*/
virtual void scale(const VectorSpaceVector<double> &scaling_factors) override;
/**
- * Assignement <tt>*this = a*V</tt>.
+ * Assignment <tt>*this = a*V</tt>.
*/
virtual void equ(const double a, const VectorSpaceVector<double> &V) override;
/**
- * Return wether the vector contains only elements with value zero.
+ * Return whether the vector contains only elements with value zero.
*/
virtual bool all_zero() const override;
/**
* Return an index set that describes which elements of this vector are
* owned by the current processor. As a consequence, the index sets returned
- * on different procesors if this is a distributed vector will form disjoint
+ * on different processors if this is a distributed vector will form disjoint
* sets that add up to the complete index set. Obviously, if a vector is
* created on only one processor, then the result would satisfy
* @code
/**
* Return an index set that describes which elements of this vector are
* owned by the current processor. As a consequence, the index sets
- * returned on different procesors if this is a distributed vector will
+ * returned on different processors if this is a distributed vector will
* form disjoint sets that add up to the complete index set. Obviously, if
* a vector is created on only one processor, then the result would
* satisfy
* @ref GlossLocallyRelevantDof "locally relevant DoFs".
* The selection of DoFs is such that one can read all degrees of freedom on all
* locally relevant elements (locally active) plus the degrees of freedom
- * that contraints expand into from the locally owned cells. However, not
+ * that constraints expand into from the locally owned cells. However, not
* all locally relevant DoFs are stored because most of them would never be
* accessed in matrix-vector products and result in too much data sent
* around which impacts the performance.
* @ref GlossLocallyRelevantDof "locally relevant DoFs".
* The selection of DoFs is such that one can read all degrees of freedom on all
* locally relevant elements (locally active) plus the degrees of freedom
- * that contraints expand into from the locally owned cells. However, not
+ * that constraints expand into from the locally owned cells. However, not
* all locally relevant DoFs are stored because most of them would never be
* accessed in matrix-vector products and result in too much data sent
* around which impacts the performance.
* 3D this is done in two stages, edges first and then faces.
*
* For each cell, each edge, $e$, is projected by solving the linear system
- * $Ax=b$ where $x$ is the vector of contraints on degrees of freedom on the
+ * $Ax=b$ where $x$ is the vector of constraints on degrees of freedom on the
* edge and
*
* $A_{ij} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{s}_{j}\cdot\vec{t}) dS$
}
// Create the system matrix by multiplying the assembling matrix
- // with its transposed and the right hand side vector by mutliplying
+ // with its transposed and the right hand side vector by multiplying
// the assembling matrix with the assembling vector. Invert the
// system matrix.
assembling_matrix.mTmult (cell_matrix, assembling_matrix);
// Project the boundary function onto the shape functions for this face
// and set up a linear system of equations to get the values for the DoFs
// associated with this face. We also must include the residuals from the
- // shape funcations associated with edges.
+ // shape functions associated with edges.
Tensor<1, dim> tmp;
Tensor<1, dim> cross_product_i;
Tensor<1, dim> cross_product_j;
const std::vector<DerivativeForm<1,2,2> > &jacobians,
ConstraintMatrix &constraints)
{
- // Compute the intergral over the product of the normal components of
+ // Compute the integral over the product of the normal components of
// the boundary function times the normal components of the shape
// functions supported on the boundary.
const FEValuesExtractors::Vector vec (first_vector_component);
double mean_to_double(const std::complex<number> &mean_value)
{
// we need to return double as a norm, but mean value is a complex
- // number. Panick and return real-part while warning the user that
+ // number. Panic and return real-part while warning the user that
// he shall never do that.
Assert ( false, ExcMessage("Mean value norm is not implemented for complex-valued vectors") );
return mean_value.real();
* whenever the content of `prm` is updated.
*
* Make sure that this class lives longer than `prm`. Undefined behaviour
- * will occurr if you destroy this class, and then parse a parameter file
+ * will occur if you destroy this class, and then parse a parameter file
* using `prm`.
*/
void add_parameters(ParameterHandler &prm)
* is assumed to also perform the setup internally.
*
* The setup_jacobian() function may call a user-supplied function to
- * compute needed data related to the Jacobian matrix. Alterntively, it may
+ * compute needed data related to the Jacobian matrix. Alternatively, it may
* choose to retrieve and use stored values of this data. In either case,
* setup_jacobian() may also preprocess that data as needed for
* solve_jacobian_system(), which may involve calling a generic function
* amount of times. If convergence can be achieved without updating the
* Jacobian, then ARKode does not call setup_jacobian() again. If, on the
* contrary, internal ARKode convergence tests fail, then ARKode calls
- * again setup_jacobian() with updated vectors and coefficents so that
+ * again setup_jacobian() with updated vectors and coefficients so that
* successive calls to solve_jacobian_systems() lead to better convergence
* in the Newton process.
*
* evaluated at `t`, `ycur`. `fcur` is $f_I(t,ycur)$.
*
* A call to this function should store in `dst` the result of $J^{-1}$
- * applied to `src`, i.e., `J*dst = src`. It is the users responsability to
+ * applied to `src`, i.e., `J*dst = src`. It is the users responsibility to
* set up proper solvers and preconditioners inside this function.
*
*
* amount of times.
*
* A call to this function should store in `dst` the result of $M^{-1}$
- * applied to `src`, i.e., `M*dst = src`. It is the users responsability to
+ * applied to `src`, i.e., `M*dst = src`. It is the users responsibility to
* set up proper solvers and preconditioners inside this function.
*
* This function should return:
/**
* A function object that users may supply and that is intended to evaluate
- * wether the solver should be restarted (for example because the number of
+ * whether the solver should be restarted (for example because the number of
* degrees of freedom has changed).
*
* This function is supposed to perform all operations that are necessary
* \alpha \dfrac{\partial F}{\partial \dot y}\, ,
* \f]
*
- * and $\alpha = \alpha_{n,0}/h_n$. It is worth metioning that the
+ * and $\alpha = \alpha_{n,0}/h_n$. It is worth mentioning that the
* scalar $\alpha$ changes whenever the step size or method order
* changes.
*
* whenever the content of `prm` is updated.
*
* Make sure that this class lives longer than `prm`. Undefined behaviour
- * will occurr if you destroy this class, and then parse a parameter file
+ * will occur if you destroy this class, and then parse a parameter file
* using `prm`.
*/
void add_parameters(ParameterHandler &prm)
* construction time.
*
* Notice that you could in principle use this capabilities to solve for
- * stady state problems by setting y_dot to zero, and asking to compute
+ * steady state problems by setting y_dot to zero, and asking to compute
* $y(0)$ that satisfies $F(0, y(0), 0) = 0$, however the nonlinear solver
* used inside IDA may not be robust enough for complex problems with
* several millions unknowns.
* \f]
*
* If the user uses a matrix based computation of the Jacobian, than this
- * is the right place where an assembly routine shoulde be called to
+ * is the right place where an assembly routine should be called to
* assemble both a matrix and a preconditioner for the Jacobian system.
* Subsequent calls (possibly more than one) to solve_jacobian_system() can
* assume that this function has been called at least once.
* amount of times. If convergence can be achieved without updating the
* Jacobian, then IDA does not call setup_jacobian() again. If, on the
* contrary, internal IDA convergence tests fail, then IDA calls again
- * setup_jacobian() with updated vectors and coefficents so that successive
+ * setup_jacobian() with updated vectors and coefficients so that successive
* calls to solve_jacobian_systems() lead to better convergence in the
* Newton process.
*
* \f]
*
* A call to this function should store in `dst` the result of $J^{-1}$
- * applied to `src`, i.e., `J*dst = src`. It is the users responsability to
- * set up proper solvers and preconditioners inside this function.
+ * applied to `src`, i.e., `J*dst = src`. It is the users responsibility
+ * to set up proper solvers and preconditioners inside this function.
*
* This function should return:
* - 0: Success
const unsigned int step_number)> output_step;
/**
- * Evaluate wether the solver should be restarted (for example because the
+ * Evaluate whether the solver should be restarted (for example because the
* number of degrees of freedom has changed).
*
* This function is supposed to perform all operations that are necessary in
* Return an index set containing the differential components.
* Implementation of this function is optional. The default is to return a
* complete index set. If your equation is also algebraic (i.e., it
- * contains algebraic constraints, or lagrange multipliers), you should
+ * contains algebraic constraints, or Lagrange multipliers), you should
* overwrite this function in order to return only the differential
* components of your system.
*/
*
* Specifying residual() allows the user to use Newton strategies (i.e.,
* $F(u)=0$ will be solved), while specifying iteration_function(), fixed
- * point iteration or Pircard iteration will be used (i.e., $G(u)=u$ will be
+ * point iteration or Picard iteration will be used (i.e., $G(u)=u$ will be
* solved).
*
* If the use of a Newton method is desired, then the user should also supply
* amount of times. If convergence can be achieved without updating the
* Jacobian, then KINSOL does not call setup_jacobian() again. If, on the
* contrary, internal KINSOL convergence tests fail, then KINSOL calls
- * again setup_jacobian() with updated vectors and coefficents so that
+ * again setup_jacobian() with updated vectors and coefficients so that
* successive calls to solve_jacobian_systems() lead to better convergence
* in the Newton process.
*
* converge, or may converge very slowly.
*
* A call to this function should store in `dst` the result of $J^{-1}$
- * applied to `src`, i.e., `J*dst = src`. It is the users responsability to
- * set up proper solvers and preconditioners inside this function.
+ * applied to `src`, i.e., `J*dst = src`. It is the users responsibility
+ * to set up proper solvers and preconditioners inside this function.
*
*
* Arguments to the function are
"active cells on a lower level. Coarsening the mesh is " +
"currently not supported"));
- // This computes the distance of the surrouding points transformed to the unit
+ // This computes the distance of the surrounding points transformed to the unit
// cell from the unit cell.
typename Triangulation<dim,spacedim>::cell_iterator
cell = triangulation->begin(level_coarse),
bool use_structdim_2_guesses = false;
bool use_structdim_3_guesses = false;
// note that in the structdim 2 case: 0 - 6 and 2 - 7 should be roughly
- // parallel, while in the structdim 3 case, 0 - 6 and 2 - 7 shoud be roughly
+ // parallel, while in the structdim 3 case, 0 - 6 and 2 - 7 should be roughly
// orthogonal. Use the angle between these two vectors to figure out if we
// should turn on either structdim optimization.
if (surrounding_points.size() == 8)
// enable the structdim 2 optimization
use_structdim_2_guesses = true;
else if (spacedim == 3)
- // otherwise these vectors are roughly orthogonal are roughly
- // orthogonal: enable the structdim 3 optimization if we are in 3D
+ // otherwise these vectors are roughly orthogonal: enable the
+ // structdim 3 optimization if we are in 3D
use_structdim_3_guesses = true;
}
// we should enable at most one of the optimizations