From: Wolfgang Bangerth Date: Tue, 22 Aug 2017 16:29:42 +0000 (-0600) Subject: Use American English spelling. X-Git-Tag: v9.0.0-rc1~1185^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F4933%2Fhead;p=dealii.git Use American English spelling. --- diff --git a/examples/step-44/doc/intro.dox b/examples/step-44/doc/intro.dox index 26316bb0e5..2940deedb9 100644 --- a/examples/step-44/doc/intro.dox +++ b/examples/step-44/doc/intro.dox @@ -34,7 +34,7 @@ The setup of the example problem is then presented. @note This tutorial has been developed (and is described in the introduction) for the problem of elasticity in three dimensions. While the space dimension could be changed in the main() routine, care needs to be taken. - Two-dimensional elasticity problems, in general, exist only as idealisations of three-dimensional ones. + Two-dimensional elasticity problems, in general, exist only as idealizations of three-dimensional ones. That is, they are either plane strain or plane stress. The assumptions that follow either of these choices needs to be consistently imposed. For more information see the note in step-8. @@ -531,7 +531,7 @@ Note that the following terms are termed the geometrical stress and the materia @f} -

Discretisation of governing equations

+

Discretization of governing equations

The three-field formulation used here is effective for quasi-incompressible materials, that is where $\nu \rightarrow 0.5$ (where $\nu$ is solution_n; // Then define a number of variables to store norms and update norms and - // normalisation factors. + // normalization factors. struct Errors { Errors() @@ -1027,7 +1027,7 @@ namespace Step44 p = 1.0; J = 1.0; } - void normalise(const Errors &rhs) + void normalize(const Errors &rhs) { if (rhs.norm != 0.0) norm /= rhs.norm; @@ -1073,7 +1073,7 @@ namespace Step44 // @sect4{Public interface} -// We initialise the Solid class using data extracted from the parameter file. +// We initialize the Solid class using data extracted from the parameter file. template Solid::Solid(const std::string &input_file) : @@ -1561,7 +1561,7 @@ namespace Step44 block_component[p_component] = p_dof; // Pressure block_component[J_component] = J_dof; // Dilatation - // The DOF handler is then initialised and we renumber the grid in an + // The DOF handler is then initialized and we renumber the grid in an // efficient manner. We also record the number of DOFs per block. dof_handler_ref.distribute_dofs(fe); DoFRenumbering::Cuthill_McKee(dof_handler_ref); @@ -1615,7 +1615,7 @@ namespace Step44 // \\ \mathsf{\mathbf{F}}_{\widetilde{J}}(\widetilde{J}_{\textrm{i}}) //\end{bmatrix}}_{ \mathsf{\mathbf{F}}(\mathbf{\Xi}_{\textrm{i}}) } \, . // @f} - // We optimise the sparsity pattern to reflect this structure + // We optimize the sparsity pattern to reflect this structure // and prevent unnecessary data creation for the right-diagonal // block components. Table<2, DoFTools::Coupling> coupling(n_components, n_components); @@ -1838,10 +1838,10 @@ namespace Step44 if (newton_iteration == 0) error_residual_0 = error_residual; - // We can now determine the normalised residual error and check for + // We can now determine the normalized residual error and check for // solution convergence: error_residual_norm = error_residual; - error_residual_norm.normalise(error_residual_0); + error_residual_norm.normalize(error_residual_0); if (newton_iteration > 0 && error_update_norm.u <= parameters.tol_u && error_residual_norm.u <= parameters.tol_f) @@ -1866,12 +1866,12 @@ namespace Step44 if (newton_iteration == 0) error_update_0 = error_update; - // We can now determine the normalised Newton update error, and + // We can now determine the normalized Newton update error, and // perform the actual update of the solution increment for the current // time step, update all quadrature point information pertaining to // this new displacement and stress state and continue iterating: error_update_norm = error_update; - error_update_norm.normalise(error_update_0); + error_update_norm.normalize(error_update_0); solution_delta += newton_update; update_qph_incremental(solution_delta); @@ -2140,7 +2140,7 @@ namespace Step44 } // Of course, we still have to define how we assemble the tangent matrix -// contribution for a single cell. We first need to reset and initialise some +// contribution for a single cell. We first need to reset and initialize some // of the scratch data structures and retrieve some basic information // regarding the DOF numbering on this cell. We can precalculate the cell // shape function values and gradients. Note that the shape function gradients diff --git a/include/deal.II/base/quadrature_point_data.h b/include/deal.II/base/quadrature_point_data.h index 2169b0b6db..3ba484cec2 100644 --- a/include/deal.II/base/quadrature_point_data.h +++ b/include/deal.II/base/quadrature_point_data.h @@ -506,7 +506,7 @@ void CellDataStorage::initialize(const CellIteratorTy if (map.find(cell) == map.end()) { map[cell] = std::vector >(n_q_points); - // we need to initialise one-by-one as the std::vector<>(q, T()) + // we need to initialize one-by-one as the std::vector<>(q, T()) // will end with a single same T object stored in each element of the vector: auto it = map.find(cell); for (unsigned int q=0; q < n_q_points; q++) diff --git a/include/deal.II/base/symmetric_tensor.h b/include/deal.II/base/symmetric_tensor.h index eb38cb32b1..a0e5a6eb95 100644 --- a/include/deal.II/base/symmetric_tensor.h +++ b/include/deal.II/base/symmetric_tensor.h @@ -3018,7 +3018,7 @@ enum struct SymmetricTensorEigenvectorMethod hybrid, /** * The iterative QL algorithm with implicit shifts applied after - * tridiagonalisation of the tensor using the householder method. + * tridiagonalization of the tensor using the householder method. * * This method offers a compromise between speed of computation and its * robustness. This method is particularly useful when the elements diff --git a/include/deal.II/base/thread_management.h b/include/deal.II/base/thread_management.h index d22862debc..7c5310c459 100644 --- a/include/deal.II/base/thread_management.h +++ b/include/deal.II/base/thread_management.h @@ -174,7 +174,7 @@ namespace Threads * class names in multithreading and non-MT mode and thus may be compiled * with or without thread-support without the need to use conditional * compilation. Since a barrier class only makes sense in non-multithread - * mode if only one thread is to be synchronised (otherwise, the barrier + * mode if only one thread is to be synchronized (otherwise, the barrier * could not be left, since the one thread is waiting for some other part of * the program to reach a certain point of execution), the constructor of * this class throws an exception if the count argument @@ -188,7 +188,7 @@ namespace Threads public: /** * Constructor. Since barriers are only useful in single-threaded mode if - * the number of threads to be synchronised is one, this constructor + * the number of threads to be synchronized is one, this constructor * raises an exception if the count argument is one. */ DummyBarrier (const unsigned int count, diff --git a/include/deal.II/base/work_stream.h b/include/deal.II/base/work_stream.h index 3482e5cb08..7b84630764 100644 --- a/include/deal.II/base/work_stream.h +++ b/include/deal.II/base/work_stream.h @@ -55,11 +55,11 @@ DEAL_II_NAMESPACE_OPEN * many such examples, part of the work can be done entirely independently and * in parallel, possibly using several processor cores on a machine with * shared memory. However, some other part of this work may need to be - * synchronised and be done in order. In the example of assembling a matrix, + * synchronized and be done in order. In the example of assembling a matrix, * the computation of local contributions can be done entirely in parallel, * but copying the local contributions into the global matrix requires some * care: First, several threads can't write at the same time, but need to - * synchronise writing using a mutex; secondly, we want the order in which + * synchronize writing using a mutex; secondly, we want the order in which * local contributions are added to the global matrix to be always the same * because floating point addition is not commutative and adding local * contributions to the global matrix in different orders leads to subtly @@ -74,7 +74,7 @@ DEAL_II_NAMESPACE_OPEN * parallel on all of these objects and then passes each object to a * postprocessor function that runs sequentially and gets objects in exactly * the order in which they appear in the input iterator range. None of the - * synchronisation work is exposed to the user of this class. + * synchronization work is exposed to the user of this class. * * Internally, the range given to the run() function of this class is split * into a sequence of "items", which are then distributed according to some diff --git a/include/deal.II/dofs/dof_levels.h b/include/deal.II/dofs/dof_levels.h index 269a770e55..e2106895e5 100644 --- a/include/deal.II/dofs/dof_levels.h +++ b/include/deal.II/dofs/dof_levels.h @@ -51,7 +51,7 @@ namespace internal * The indices of degrees of freedom located on lower dimensional objects, * i.e. on lines for 2D and on quads and lines for 3D are treated * similarly than that on cells. However, these geometrical objects, which - * are called faces as a generalisation, are not organised in a + * are called faces as a generalization, are not organised in a * hierarchical structure of levels. Therefore, the degrees of freedom * located on these objects are stored in separate classes, namely the * DoFFaces classes. diff --git a/include/deal.II/lac/block_linear_operator.h b/include/deal.II/lac/block_linear_operator.h index c70fa70444..60fa0775dc 100644 --- a/include/deal.II/lac/block_linear_operator.h +++ b/include/deal.II/lac/block_linear_operator.h @@ -503,7 +503,7 @@ block_operator(const std::array::BlockType BlockType; - BlockLinearOperator return_op ((BlockPayload())); // TODO: Create block payload so that this can be initialised correctly + BlockLinearOperator return_op ((BlockPayload())); // TODO: Create block payload so that this can be initialized correctly return_op.n_block_rows = []() -> unsigned int { diff --git a/include/deal.II/lac/filtered_matrix.h b/include/deal.II/lac/filtered_matrix.h index 01e68e82d1..2280233363 100644 --- a/include/deal.II/lac/filtered_matrix.h +++ b/include/deal.II/lac/filtered_matrix.h @@ -183,7 +183,7 @@ template class FilteredMatrixBlock; *

Thread-safety

* * The functions that operate as a matrix and do not change the internal state - * of this object are synchronised and thus threadsafe. Consequently, you do + * of this object are synchronized and thus threadsafe. Consequently, you do * not need to serialize calls to @p vmult or @p residual . * * @author Wolfgang Bangerth 2001, Luca Heltai 2006, Guido Kanschat 2007, 2008 diff --git a/include/deal.II/lac/linear_operator.h b/include/deal.II/lac/linear_operator.h index b9b64a8c21..c0fd4d54da 100644 --- a/include/deal.II/lac/linear_operator.h +++ b/include/deal.II/lac/linear_operator.h @@ -1263,7 +1263,7 @@ template linear_operator(const OperatorExemplar &operator_exemplar, const Matrix &matrix) { - // Initialise the payload based on the input exemplar matrix + // Initialize the payload based on the input exemplar matrix LinearOperator return_op (Payload(operator_exemplar,matrix)); // Always store a reference to matrix and operator_exemplar in the lambda @@ -1317,7 +1317,7 @@ LinearOperator linear_operator(const LinearOperator &operator_exemplar, const Matrix &matrix) { - // Initialise the payload based on the LinearOperator exemplar + // Initialize the payload based on the LinearOperator exemplar auto return_op = operator_exemplar; typename std::conditional< diff --git a/include/deal.II/lac/petsc_parallel_sparse_matrix.h b/include/deal.II/lac/petsc_parallel_sparse_matrix.h index 9d5348cbb7..3bfcd7f0d4 100644 --- a/include/deal.II/lac/petsc_parallel_sparse_matrix.h +++ b/include/deal.II/lac/petsc_parallel_sparse_matrix.h @@ -136,7 +136,7 @@ namespace PETScWrappers /** * It is not safe to elide additions of zeros to individual elements * of this matrix. The reason is that additions to the matrix may - * trigger collective operations synchronising buffers on multiple + * trigger collective operations synchronizing buffers on multiple * processes. If an addition is elided on one process, this may lead * to other processes hanging in an infinite waiting loop. */ diff --git a/include/deal.II/lac/petsc_parallel_vector.h b/include/deal.II/lac/petsc_parallel_vector.h index be4267c188..8e46fd8e73 100644 --- a/include/deal.II/lac/petsc_parallel_vector.h +++ b/include/deal.II/lac/petsc_parallel_vector.h @@ -48,7 +48,7 @@ namespace PETScWrappers /** * Implementation of a parallel vector class based on PETSC and using MPI - * communication to synchronise distributed operations. All the + * communication to synchronize distributed operations. All the * functionality is actually in the base class, except for the calls to * generate a parallel vector. This is possible since PETSc only works on * an abstract vector type and internally distributes to functions that do @@ -105,7 +105,7 @@ namespace PETScWrappers * time, all these additions are executed. However, if one process adds to * an element, and another overwrites to it, the order of execution would * yield non-deterministic behavior if we don't make sure that a - * synchronisation with compress() happens in between. + * synchronization with compress() happens in between. * * In order to make sure these calls to compress() happen at the * appropriate time, the deal.II wrappers keep a state variable that store diff --git a/include/deal.II/lac/petsc_precondition.h b/include/deal.II/lac/petsc_precondition.h index b29abc3e96..e050bbac6f 100644 --- a/include/deal.II/lac/petsc_precondition.h +++ b/include/deal.II/lac/petsc_precondition.h @@ -603,7 +603,7 @@ namespace PETScWrappers /** * This quantity is added to the diagonal of the matrix during - * factorisation. + * factorization. */ double damping; }; diff --git a/include/deal.II/lac/sparse_direct.h b/include/deal.II/lac/sparse_direct.h index f5dbf9c18e..ed42e196b6 100644 --- a/include/deal.II/lac/sparse_direct.h +++ b/include/deal.II/lac/sparse_direct.h @@ -128,8 +128,8 @@ public: * spent in the factorization, so this functionality may not always be of * large benefit. * - * In contrast to the other direct solver classes, the initialisation method - * does nothing. Therefore initialise is not automatically called by this + * In contrast to the other direct solver classes, the initialization method + * does nothing. Therefore initialize is not automatically called by this * method, when the initialization step has not been performed yet. * * This function copies the contents of the matrix into its own storage; the diff --git a/include/deal.II/lac/swappable_vector.templates.h b/include/deal.II/lac/swappable_vector.templates.h index 9e19b5442c..08d64e7e8f 100644 --- a/include/deal.II/lac/swappable_vector.templates.h +++ b/include/deal.II/lac/swappable_vector.templates.h @@ -117,7 +117,7 @@ void SwappableVector::swap_out (const std::string &name) template void SwappableVector::reload () { - // if in MT mode: synchronise with + // if in MT mode: synchronize with // possibly existing @p alert // calls. if not in MT mode, this // is a no-op @@ -155,7 +155,7 @@ void SwappableVector::alert () return; #else - // synchronise with possible other + // synchronize with possible other // invocations of this function and // other functions in this class lock.acquire (); diff --git a/include/deal.II/lac/trilinos_sparse_matrix.h b/include/deal.II/lac/trilinos_sparse_matrix.h index 59f0527ff3..03a14892b1 100644 --- a/include/deal.II/lac/trilinos_sparse_matrix.h +++ b/include/deal.II/lac/trilinos_sparse_matrix.h @@ -2011,7 +2011,7 @@ namespace TrilinosWrappers /** * Trilinos doesn't allow to mix additions to matrix entries and - * overwriting them (to make synchronisation of %parallel computations + * overwriting them (to make synchronization of %parallel computations * simpler). The way we do it is to, for each access operation, store * whether it is an insertion or an addition. If the previous one was of * different type, then we first have to flush the Trilinos buffers; diff --git a/include/deal.II/lac/trilinos_vector.h b/include/deal.II/lac/trilinos_vector.h index 058068a32d..718a8e0a16 100644 --- a/include/deal.II/lac/trilinos_vector.h +++ b/include/deal.II/lac/trilinos_vector.h @@ -1216,7 +1216,7 @@ namespace TrilinosWrappers private: /** * Trilinos doesn't allow to mix additions to matrix entries and - * overwriting them (to make synchronisation of parallel computations + * overwriting them (to make synchronization of parallel computations * simpler). The way we do it is to, for each access operation, store * whether it is an insertion or an addition. If the previous one was of * different type, then we first have to flush the Trilinos buffers; diff --git a/include/deal.II/lac/vector_memory.h b/include/deal.II/lac/vector_memory.h index 405aeb686e..99fe0435fd 100644 --- a/include/deal.II/lac/vector_memory.h +++ b/include/deal.II/lac/vector_memory.h @@ -326,7 +326,7 @@ private: bool log_statistics; /** - * Mutex to synchronise access to internal data of this object from multiple + * Mutex to synchronize access to internal data of this object from multiple * threads. */ static Threads::Mutex mutex; diff --git a/include/deal.II/numerics/error_estimator.templates.h b/include/deal.II/numerics/error_estimator.templates.h index b64df7e7e3..97f746e614 100644 --- a/include/deal.II/numerics/error_estimator.templates.h +++ b/include/deal.II/numerics/error_estimator.templates.h @@ -69,14 +69,14 @@ namespace internal * which we found can take a significant amount of time if it happens * often even in the single threaded case (10-20 per cent in our * measurements); however, most importantly, memory allocation requires - * synchronisation in multithreaded mode. While that is done by the C++ + * synchronization in multithreaded mode. While that is done by the C++ * library and has not to be handcoded, it nevertheless seriously damages * the ability to efficiently run the functions of this class in parallel, - * since they are quite often blocked by these synchronisation points, + * since they are quite often blocked by these synchronization points, * slowing everything down by a factor of two or three. * * Thus, every thread gets an instance of this class to work with and - * needs not allocate memory itself, or synchronise with other threads. + * needs not allocate memory itself, or synchronize with other threads. * * The sizes of the arrays are initialized with the maximal number of * entries necessary for the hp case. Within the loop over individual @@ -113,7 +113,7 @@ namespace internal * points for each of the solution vectors (i.e. a temporary value). * This vector is not allocated inside the functions that use it, but * rather globally, since memory allocation is slow, in particular in - * presence of multiple threads where synchronisation makes things even + * presence of multiple threads where synchronization makes things even * slower. */ std::vector > > phi; diff --git a/include/deal.II/numerics/time_dependent.h b/include/deal.II/numerics/time_dependent.h index 4d330ec031..eeea7e1441 100644 --- a/include/deal.II/numerics/time_dependent.h +++ b/include/deal.II/numerics/time_dependent.h @@ -1251,7 +1251,7 @@ namespace TimeStepBase_Tria_Flags /** - * Specialisation of TimeStepBase which addresses some aspects of grid + * Specialization of TimeStepBase which addresses some aspects of grid * handling. In particular, this class is thought to make handling of grids * available that are adaptively refined on each time step separately or with * a loose coupling between time steps. It also takes care of deleting and diff --git a/source/fe/fe_abf.cc b/source/fe/fe_abf.cc index 6bd42a1b97..50f305295f 100644 --- a/source/fe/fe_abf.cc +++ b/source/fe/fe_abf.cc @@ -160,7 +160,7 @@ FE_ABF::initialize_support_points (const unsigned int deg) // These might be required when the faces contribution is computed - // Therefore they will be initialised at this point. + // Therefore they will be initialized at this point. std::vector* > polynomials_abf(dim); // Generate x_1^{i} x_2^{r+1} ... @@ -212,7 +212,7 @@ FE_ABF::initialize_support_points (const unsigned int deg) } - // Now initialise edge interior weights for the ABF elements. + // Now initialize edge interior weights for the ABF elements. // These are completely independent from the usual edge moments. They // stem from applying the Gauss theorem to the nodal values, which // was necessary to cast the ABF elements into the deal.II framework diff --git a/source/grid/grid_generator.cc b/source/grid/grid_generator.cc index 40356f743f..f839683775 100644 --- a/source/grid/grid_generator.cc +++ b/source/grid/grid_generator.cc @@ -834,7 +834,7 @@ namespace GridGenerator const Point (&corners) [dim], const bool colorize) { - // Equalise number of subdivisions in each dim-direction, their + // Equalize number of subdivisions in each dim-direction, their // validity will be checked later unsigned int n_subdivisions_ [dim]; for (unsigned int i=0; i::prepare_coarsening_and_refinement () // now for what is done in each loop: we have to fulfill several // tasks at the same time, namely several mesh smoothing algorithms - // and mesh regularisation, by which we mean that the next mesh + // and mesh regularization, by which we mean that the next mesh // fulfills several requirements such as no double refinement at // each face or line, etc. // @@ -12590,7 +12590,7 @@ bool Triangulation::prepare_coarsening_and_refinement () // these were done at once, so the code was rather impossible to // join into this, only, function), we do them one after each // other. the order in which we do them is such that the important - // tasks, namely regularisation, are done last and the least + // tasks, namely regularization, are done last and the least // important things are done the first. the following order is // chosen: // diff --git a/source/lac/trilinos_sparse_matrix.cc b/source/lac/trilinos_sparse_matrix.cc index 79cdf046dd..1808b8b8f9 100644 --- a/source/lac/trilinos_sparse_matrix.cc +++ b/source/lac/trilinos_sparse_matrix.cc @@ -2975,7 +2975,7 @@ namespace TrilinosWrappers static GrowingVectorMemory vector_memory; GVMVectorType *i = vector_memory.alloc(); - // Initialise intermediate vector + // Initialize intermediate vector const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap(); i->reinit(IndexSet(first_op_init_map), first_op.get_mpi_communicator(), @@ -3016,7 +3016,7 @@ namespace TrilinosWrappers const_cast(first_op).transpose(); const_cast(second_op).transpose(); - // Initialise intermediate vector + // Initialize intermediate vector const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap(); i->reinit(IndexSet(first_op_init_map), first_op.get_mpi_communicator(), @@ -3054,7 +3054,7 @@ namespace TrilinosWrappers static GrowingVectorMemory vector_memory; GVMVectorType *i = vector_memory.alloc(); - // Initialise intermediate vector + // Initialize intermediate vector const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap(); i->reinit(IndexSet(first_op_init_map), first_op.get_mpi_communicator(), @@ -3095,7 +3095,7 @@ namespace TrilinosWrappers const_cast(first_op).transpose(); const_cast(second_op).transpose(); - // Initialise intermediate vector + // Initialize intermediate vector const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap(); i->reinit(IndexSet(first_op_init_map), first_op.get_mpi_communicator(), @@ -3152,7 +3152,7 @@ namespace TrilinosWrappers static GrowingVectorMemory vector_memory; GVMVectorType *i = vector_memory.alloc(); - // Initialise intermediate vector + // Initialize intermediate vector const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap(); i->reinit(IndexSet(first_op_init_map), first_op.get_mpi_communicator(), @@ -3191,7 +3191,7 @@ namespace TrilinosWrappers const_cast(first_op).transpose(); const_cast(second_op).transpose(); - // Initialise intermediate vector + // Initialize intermediate vector const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap(); i->reinit(IndexSet(first_op_init_map), first_op.get_mpi_communicator(), @@ -3226,7 +3226,7 @@ namespace TrilinosWrappers static GrowingVectorMemory vector_memory; GVMVectorType *i = vector_memory.alloc(); - // Initialise intermediate vector + // Initialize intermediate vector const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap(); i->reinit(IndexSet(first_op_init_map), first_op.get_mpi_communicator(), @@ -3265,7 +3265,7 @@ namespace TrilinosWrappers const_cast(first_op).transpose(); const_cast(second_op).transpose(); - // Initialise intermediate vector + // Initialize intermediate vector const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap(); i->reinit(IndexSet(first_op_init_map), first_op.get_mpi_communicator(), diff --git a/tests/base/task_09.cc b/tests/base/task_09.cc index dadf5a46a7..9dfd811e0f 100644 --- a/tests/base/task_09.cc +++ b/tests/base/task_09.cc @@ -14,7 +14,7 @@ // --------------------------------------------------------------------- -// we used to synchronise child tasks with the one that spawned it by +// we used to synchronize child tasks with the one that spawned it by // using a mutex, but this could lead to deadlocks if there are more // tasks than processors available, see the emails on the mailing // lists in late nov 2010. diff --git a/tests/lapack/full_matrix_00.cc b/tests/lapack/full_matrix_00.cc index ca0a486db9..14ca5fc8c4 100644 --- a/tests/lapack/full_matrix_00.cc +++ b/tests/lapack/full_matrix_00.cc @@ -14,7 +14,7 @@ // --------------------------------------------------------------------- -// Tests reinitialisation of square and rectangle LAPACKFullMatrix +// Tests reinitialization of square and rectangle LAPACKFullMatrix #include "../tests.h" #include @@ -94,12 +94,12 @@ int main() logfile.precision(3); deallog.attach(logfile); - // Test square matrix initialisation + // Test square matrix initialization test (4, true); test (5, true); test (6, true); - // Test rectangle matrix initialisation + // Test rectangle matrix initialization test (4, false); test (5, false); test (6, false); diff --git a/tests/petsc_complex/00.cc b/tests/petsc_complex/00.cc index 9ac23af4b1..94c4b4cd6f 100644 --- a/tests/petsc_complex/00.cc +++ b/tests/petsc_complex/00.cc @@ -121,7 +121,7 @@ void make_petsc_complex () // initialised to 0+0i. void init_petsc_complex () { - deallog << "Check PetscScalar initialisation" << std::endl; + deallog << "Check PetscScalar initialization" << std::endl; // Initialise (no argument) to zero. const PetscScalar alpha; @@ -151,11 +151,11 @@ int main (int argc, char **argv) { Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1); { - // initialisation (zero and nonzero) + // initialization (zero and nonzero) init_petsc_complex (); make_petsc_complex (); - // initialisation from std::complex (and vice versa) + // initialization from std::complex (and vice versa) make_petsc_complex_from_std_complex (); make_std_complex_from_petsc_complex (); diff --git a/tests/petsc_complex/00.output b/tests/petsc_complex/00.output index a7cb78f5fb..0c14b51c92 100644 --- a/tests/petsc_complex/00.output +++ b/tests/petsc_complex/00.output @@ -1,5 +1,5 @@ -DEAL::Check PetscScalar initialisation +DEAL::Check PetscScalar initialization DEAL::OK DEAL::Check a nonzero PetscScalar DEAL::OK