From: Marc Fehling Date: Wed, 10 Jul 2024 09:38:07 +0000 (+0200) Subject: Run codespell on repository. X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F187%2Fhead;p=code-gallery.git Run codespell on repository. --- diff --git a/CeresFE/CMakeLists.txt b/CeresFE/CMakeLists.txt index bd40feb..e358977 100644 --- a/CeresFE/CMakeLists.txt +++ b/CeresFE/CMakeLists.txt @@ -48,7 +48,7 @@ FIND_LIBRARY(ARMADILLO_LIBRARY NAMES armadillo ) # -# Are all dependencies fullfilled? +# Are all dependencies fulfilled? # IF(NOT DEAL_II_WITH_UMFPACK) diff --git a/CeresFE/Readme.md b/CeresFE/Readme.md index c10580a..750ae30 100644 --- a/CeresFE/Readme.md +++ b/CeresFE/Readme.md @@ -4,7 +4,7 @@ Readme file for CeresFE Motivation for project ---------------------- -This code was made to simulate the evolution of global-scale topography on planetary bodies. Specifically, it is designed to compute the rates of topography relaxation on the dwarf planet Ceres. The NASA Dawn mission, in orbit around Ceres since March, 2015, has produced a high resolution shape model of its surface. As on other planets including the Earth, topography on Ceres is subject to decay over time due to processes such as viscous flow and brittle failure. Because the efficiency of these processes is dependent on the material properties of the body at depth, simulating the decay of topography and comparing it to the observed shape model permits insights into Ceres' internal stucture. +This code was made to simulate the evolution of global-scale topography on planetary bodies. Specifically, it is designed to compute the rates of topography relaxation on the dwarf planet Ceres. The NASA Dawn mission, in orbit around Ceres since March, 2015, has produced a high resolution shape model of its surface. As on other planets including the Earth, topography on Ceres is subject to decay over time due to processes such as viscous flow and brittle failure. Because the efficiency of these processes is dependent on the material properties of the body at depth, simulating the decay of topography and comparing it to the observed shape model permits insights into Ceres' internal structure. Some previous applications of this basic idea- using topography to constrain internal structure- may be found in the following references: @@ -49,7 +49,7 @@ Description of files in repo ---------------------------- * src/ceres.cc Main code -* support_code/config_in.h Reads config file and intializes system parameters +* support_code/config_in.h Reads config file and initializes system parameters * support_code/ellipsoid_fit.h Finds best-fit ellipse for surface and internal density boundaries. Also uses deal.II * support_code/ellipsoid_grav.h Analytically computes self gravity of layered ellipsoids structure * support_code/local_math.h Defines some constants for convenience diff --git a/Distributed_LDG_Method/CMakeLists.txt b/Distributed_LDG_Method/CMakeLists.txt index 4448450..4fe7c2c 100644 --- a/Distributed_LDG_Method/CMakeLists.txt +++ b/Distributed_LDG_Method/CMakeLists.txt @@ -30,7 +30,7 @@ IF(NOT ${deal.II_FOUND}) ENDIF() # -# Are all dependencies fullfilled? +# Are all dependencies fulfilled? # IF( NOT DEAL_II_WITH_MPI OR NOT DEAL_II_WITH_P4EST OR diff --git a/Distributed_LDG_Method/Functions.cc b/Distributed_LDG_Method/Functions.cc index 4468204..6626741 100644 --- a/Distributed_LDG_Method/Functions.cc +++ b/Distributed_LDG_Method/Functions.cc @@ -50,7 +50,7 @@ public: {} virtual void vector_value(const Point &p, - Vector &valuess) const override; + Vector &values) const override; }; template diff --git a/Distributed_LDG_Method/LDGPoisson.cc b/Distributed_LDG_Method/LDGPoisson.cc index 7f5cd39..310717a 100644 --- a/Distributed_LDG_Method/LDGPoisson.cc +++ b/Distributed_LDG_Method/LDGPoisson.cc @@ -64,7 +64,7 @@ #include -// The functions class contains all the defintions of the functions we +// The functions class contains all the definitions of the functions we // will use, i.e. the right hand side function, the boundary conditions // and the test functions. #include "Functions.cc" @@ -75,7 +75,7 @@ using namespace dealii; // Here is the main class for the Local Discontinuous Galerkin method // applied to Poisson's equation, we won't explain much of the // the class and method declarations, but dive deeper into describing the -// functions when they are defined. The only thing I will menion +// functions when they are defined. The only thing I will mention // about the class declaration is that this is where I labeled // the different types of boundaries using enums. template @@ -170,7 +170,7 @@ private: // @sect4{Class constructor and destructor} // The constructor and destructor for this class is very much like the // like those for step-40. The difference being that we'll be passing -// in an integer, degree, which tells us the maxiumum order +// in an integer, degree, which tells us the maximum order // of the polynomial to use as well as n_refine which is the // global number of times we refine our mesh. The other main differences // are that we use a FESystem object for our choice of basis @@ -183,7 +183,7 @@ private: // FE_DGQ(degree), 1) // // -// which tells us that the basis functions contain discontinous polynomials +// which tells us that the basis functions contain discontinuous polynomials // of order degree in each of the dim dimensions // for the vector field. For the scalar unknown we // use a discontinuous polynomial of the order degree. @@ -191,7 +191,7 @@ private: // as well as its gradient, just like the mixed finite element method. // However, unlike the mixed method, the LDG method uses discontinuous // polynomials to approximate both variables. -// The other difference bewteen our constructor and that of step-40 is that +// The other difference between our constructor and that of step-40 is that // we all instantiate our linear solver in the constructor definition. template LDGPoissonProblem:: @@ -253,7 +253,7 @@ make_grid() // the domain. This was just to show that // the LDG method is working with local // refinement and discussions on building - // more realistic refinement stategies are + // more realistic refinement strategies are // discussed elsewhere in the deal.ii // documentation. for (; cell != endc; ++cell) @@ -274,7 +274,7 @@ make_grid() // type, i.e. Dirichlet or Neumann, // we loop over all the cells in the mesh and then over // all the faces of each cell. We then have to figure out - // which faces are on the bounadry and set all faces + // which faces are on the boundary and set all faces // on the boundary to have // boundary_id to be Dirichlet. // We remark that one could easily set more complicated @@ -320,7 +320,7 @@ make_dofs() // with a distributed triangulation! dof_handler.distribute_dofs(fe); - // We now renumber the dofs so that the vector of unkonwn dofs + // We now renumber the dofs so that the vector of unknown dofs // that we are solving for, locally_relevant_solution, // corresponds to a vector of the form, // @@ -332,7 +332,7 @@ make_dofs() // matrix and vectors that we will write to. const IndexSet &locally_owned_dofs = dof_handler.locally_owned_dofs(); - // In additon to the locally owned dofs, we also need the the locally + // In addition to the locally owned dofs, we also need the the locally // relevant dofs. These are the dofs that have read access to and we // need in order to do computations on our processor, but, that // we do not have the ability to write to. @@ -356,7 +356,7 @@ make_dofs() // Just like step-40 we create a dynamic sparsity pattern // and distribute it to the processors. Notice how we do not have to - // explictly mention that we are using a FESystem for system of + // explicitly mention that we are using a FESystem for system of // variables instead of a FE_DGQ for a scalar variable // or that we are using a discributed DoFHandler. All these specifics // are taken care of under the hood by the deal.ii library. @@ -459,7 +459,7 @@ assemble_system() // for evaluating the basis functions // on one side of an element face as well as another FEFaceValues object, // fe_neighbor_face_values, for evaluating the basis functions - // on the opposite side of the face, i.e. on the neighoring element's face. + // on the opposite side of the face, i.e. on the neighboring element's face. // In addition, we also introduce a FESubfaceValues object, // fe_subface_values, that // will be used for dealing with faces that have multiple refinement @@ -495,7 +495,7 @@ assemble_system() FullMatrix ve_ue_matrix(dofs_per_cell, dofs_per_cell); // As explained in the section on the LDG method we take our test // function to be v and multiply it on the left side of our differential - // equation that is on u and peform integration by parts as explained in the + // equation that is on u and perform integration by parts as explained in the // introduction. Using this notation for test and solution function, // the matrices below will then stand for: // @@ -727,7 +727,7 @@ assemble_system() // At this point we know that this cell and the neighbor // of this cell are on the same refinement level and // the work to assemble the interior flux matrices - // is very much the same as before. Infact it is + // is very much the same as before. In fact it is // much simpler since we do not have to loop through the // subfaces. However, we have to check that we do // not compute the same contribution twice. This would @@ -800,7 +800,7 @@ assemble_system() // Now that have looped over all the faces for this - // cell and computed as well as disributed the local + // cell and computed as well as distributed the local // flux matrices to the system_matrix, we // can finally distribute the cell's local_matrix // and local_vector contribution to the @@ -812,7 +812,7 @@ assemble_system() // the faces on the boundary of the domain contribute // to the local_matrix // and system_rhs. We could distribute - // the local contributions for each component seperately, + // the local contributions for each component separately, // but writing to the distributed sparse matrix and vector // is expensive and want to to minimize the number of times // we do so. @@ -909,7 +909,7 @@ assemble_cell_terms( // Here we have the function that builds the local_matrix // contribution // and local right hand side vector, local_vector -// for the Dirichlet boundary condtions. +// for the Dirichlet boundary conditions. template void LDGPoissonProblem:: @@ -981,7 +981,7 @@ assemble_Dirichlet_boundary_terms( // @sect4{assemble_Neumann_boundary_terms} // Here we have the function that builds the local_matrix -// and local_vector for the Neumann boundary condtions. +// and local_vector for the Neumann boundary conditions. template void LDGPoissonProblem:: @@ -1027,7 +1027,7 @@ assemble_Neumann_boundary_terms( } // We also compute the contribution for the flux for - // $\widehat{q}$ on the Neumann bounary which is the + // $\widehat{q}$ on the Neumann boundary which is the // Neumann boundary condition and enters the right // hand side vector as // @@ -1328,17 +1328,17 @@ distribute_local_flux_to_global( // As mentioned earlier I used a direct solver to solve // the linear system of equations resulting from the LDG // method applied to the Poisson equation. One could also -// use a iterative sovler, however, we then need to use -// a preconditoner and that was something I did not wanted +// use a iterative solver, however, we then need to use +// a preconditioner and that was something I did not wanted // to get into. For information on preconditioners // for the LDG Method see this // -// paper. The uses of a direct sovler here is +// paper. The uses of a direct solver here is // somewhat of a limitation. The built-in distributed // direct solver in Trilinos reduces everything to one // processor, solves the system and then distributes // everything back out to the other processors. However, -// by linking to more advanced direct sovlers through +// by linking to more advanced direct solvers through // Trilinos one can accomplish fully distributed computations // and not much about the following function calls will // change. @@ -1359,7 +1359,7 @@ solve() TrilinosWrappers::MPI::Vector completely_distributed_solution(system_rhs); - // Now we can preform the solve on the completeley distributed + // Now we can perform the solve on the completeley distributed // right hand side vector, system matrix and the completely // distributed solution. solver.solve(system_matrix, @@ -1382,11 +1382,11 @@ solve() } // @sect4{output_results} -// This function deals with the writing of the reuslts in parallel +// This function deals with the writing of the results in parallel // to disk. It is almost exactly the same as -// in step-40 and we wont go into it. It is noteworthy +// in step-40 and we won't go into it. It is noteworthy // that in step-40 the output is only the scalar solution, -// while in our situation, we are outputing both the scalar +// while in our situation, we are outputting both the scalar // solution as well as the vector field solution. The only // difference between this function and the one in step-40 // is in the solution_names vector where we have to add diff --git a/Distributed_LDG_Method/README.md b/Distributed_LDG_Method/README.md index b56c6d8..fe737f8 100644 --- a/Distributed_LDG_Method/README.md +++ b/Distributed_LDG_Method/README.md @@ -21,7 +21,7 @@ I could not use this framework for solving my research problem and I needed to write the LDG method from scratch. I thought it would be helpful for others to have access to this example that goes through writing a discontinuous Galerkin method from -scatch and also shows how to do it in a distributed setting using the +scratch and also shows how to do it in a distributed setting using the Trilinos library. This example may also be of interest to users that wish to use the LDG method, as the method is distinctly different from the @@ -137,7 +137,7 @@ $\textbf{q}$: f(\textbf{x}) && \text{in} \ \Omega, \label{eq:Primary} \\ \textbf{q} \; &= \; - -\nabla u && \text{in} \ \Omega, \label{eq:Auxillary} \\ + -\nabla u && \text{in} \ \Omega, \label{eq:Auxiliary} \\ \textbf{q} \cdot \textbf{n} \; &= \; g_{N}(\textbf{x}) && \text{on} \ \partial \Omega_{N},\\ u &= g_{D}(\textbf{x}) && \mbox{on}\ \partial \Omega_{D}. @@ -275,7 +275,7 @@ parameter that is defined as, with $\tilde{\sigma}$ being a positive constant. There are other choices of -penalty values $\sigma$, but the one above produces in appoximations to solutions +penalty values $\sigma$, but the one above produces in approximations to solutions that are the most accurate, see this reference for more info. diff --git a/ElastoplasticTorsion/ElastoplasticTorsion.cc b/ElastoplasticTorsion/ElastoplasticTorsion.cc index fd8a1f7..9879af5 100644 --- a/ElastoplasticTorsion/ElastoplasticTorsion.cc +++ b/ElastoplasticTorsion/ElastoplasticTorsion.cc @@ -500,7 +500,7 @@ namespace nsp /*************************************************************/ -// formating +// formatting template void ElastoplasticTorsion::format_convergence_tables() @@ -624,7 +624,7 @@ namespace nsp } /***************************************************************************************/ - /* the coeffcients W, W' and G defining the problem. + /* the coefficients W, W' and G defining the problem. Min_u \int W(|Du|^2) dx @@ -1054,7 +1054,7 @@ namespace nsp } if (!done) { - std::cerr << ", max. no. of iterations reached wiht steplength= "<< alpha + std::cerr << ", max. no. of iterations reached with steplength= "<< alpha << ", fcn value= "<< phi_alpha< struct NodeAssemblyCopyData @@ -145,7 +145,7 @@ namespace MFMFE // Similarly, two ScratchData classes are defined. // One for the assembly part, where we need // FEValues, FEFaceValues, Quadrature and storage - // for the basis fuctions... + // for the basis functions... template struct NodeAssemblyScratchData { @@ -367,7 +367,7 @@ namespace MFMFE // First, the function that copies local cell contributions to the corresponding nodal // matrices and vectors is defined. It places the values obtained from local cell integration - // into the correct place in a matrix/vector corresponging to a specific node. + // into the correct place in a matrix/vector corresponding to a specific node. template void MultipointMixedDarcyProblem::copy_cell_to_node(const DataStructures::NodeAssemblyCopyData ©_data) { diff --git a/NavierStokes_TRBDF2_DG/navier_stokes_TRBDF2_DG.cc b/NavierStokes_TRBDF2_DG/navier_stokes_TRBDF2_DG.cc index 2abc653..5f02899 100644 --- a/NavierStokes_TRBDF2_DG/navier_stokes_TRBDF2_DG.cc +++ b/NavierStokes_TRBDF2_DG/navier_stokes_TRBDF2_DG.cc @@ -133,7 +133,7 @@ namespace NS_TRBDF2 { std::vector>& computed_quantities) const { const unsigned int n_quadrature_points = inputs.solution_values.size(); - /*--- Check the correctness of all data structres ---*/ + /*--- Check the correctness of all data structures ---*/ Assert(inputs.solution_gradients.size() == n_quadrature_points, ExcInternalError()); Assert(computed_quantities.size() == n_quadrature_points, ExcInternalError()); @@ -232,7 +232,7 @@ namespace NS_TRBDF2 { // @sect{ NavierStokesProjectionOperator::NavierStokesProjectionOperator } - // The following class sets effecively the weak formulation of the problems for the different stages + // The following class sets effectively the weak formulation of the problems for the different stages // and for both velocity and pressure. // The template parameters are the dimnesion of the problem, the polynomial degree for the pressure, // the polynomial degree for the velocity, the number of quadrature points for integrals for the pressure step, @@ -1356,7 +1356,7 @@ namespace NS_TRBDF2 { } - // Put together all the previous steps for porjection of pressure gradient. Here we loop only over cells + // Put together all the previous steps for projection of pressure gradient. Here we loop only over cells // template void NavierStokesProjectionOperator:: @@ -1390,7 +1390,7 @@ namespace NS_TRBDF2 { } - // Put together all previous steps. This is the overriden function that effectively performs the + // Put together all previous steps. This is the overridden function that effectively performs the // matrix-vector multiplication. // template @@ -1539,7 +1539,7 @@ namespace NS_TRBDF2 { Tensor<1, dim, VectorizedArray> tmp; for(unsigned int d = 0; d < dim; ++d) - tmp[d] = make_vectorized_array(1.0); /*--- We build the usal vector of ones that we will use as dof value ---*/ + tmp[d] = make_vectorized_array(1.0); /*--- We build the usual vector of ones that we will use as dof value ---*/ /*--- Now we loop over faces ---*/ for(unsigned int face = face_range.first; face < face_range.second; ++face) { @@ -2424,7 +2424,7 @@ namespace NS_TRBDF2 { void NavierStokesProjection::diffusion_step() { TimerOutput::Scope t(time_table, "Diffusion step"); - /*--- We first speicify that we want to deal with velocity dof_handler (index 0, since it is the first one + /*--- We first specify that we want to deal with velocity dof_handler (index 0, since it is the first one in the 'dof_handlers' vector) ---*/ const std::vector tmp = {0}; navier_stokes_matrix.initialize(matrix_free_storage, tmp, tmp); @@ -2446,7 +2446,7 @@ namespace NS_TRBDF2 { u_star = u_extr; } - /*--- Build the linear solver; in this case we specifiy the maximum number of iterations and residual ---*/ + /*--- Build the linear solver; in this case we specify the maximum number of iterations and residual ---*/ SolverControl solver_control(max_its, eps*rhs_u.l2_norm()); SolverGMRES> gmres(solver_control); @@ -2587,7 +2587,7 @@ namespace NS_TRBDF2 { // The following function is used in determining the maximal nodal difference - // between old and current velocity value in order to see if we have reched steady-state. + // between old and current velocity value in order to see if we have reached steady-state. // template double NavierStokesProjection::get_maximal_difference_velocity() { @@ -2662,7 +2662,7 @@ namespace NS_TRBDF2 { double local_lift = 0.0; /*--- We need to perform a unique loop because the whole stress tensor takes into account contributions of - velocity and pressure obviously. However, the two dof_handlers are different, so we neede to create an ad-hoc + velocity and pressure obviously. However, the two dof_handlers are different, so we need to create an ad-hoc iterator for the pressure that we update manually. It is guaranteed that the cells are visited in the same order (see the documentation) ---*/ auto tmp_cell = dof_handler_pressure.begin_active(); @@ -2779,7 +2779,7 @@ namespace NS_TRBDF2 { } triangulation.prepare_coarsening_and_refinement(); - /*--- Now we prepare the object for transfering, basically saving the old quantities using SolutionTransfer. + /*--- Now we prepare the object for transferring, basically saving the old quantities using SolutionTransfer. Since the 'prepare_for_coarsening_and_refinement' method can be called only once, but we have two vectors for dof_handler_velocity, we need to put them in an auxiliary vector. ---*/ std::vector*> velocities; diff --git a/Nonlinear_PoroViscoelasticity/CMakeLists.txt b/Nonlinear_PoroViscoelasticity/CMakeLists.txt index f9ac70b..55ef057 100644 --- a/Nonlinear_PoroViscoelasticity/CMakeLists.txt +++ b/Nonlinear_PoroViscoelasticity/CMakeLists.txt @@ -25,7 +25,7 @@ IF(NOT ${deal.II_FOUND}) ENDIF() -# Are all dependencies fullfilled? +# Are all dependencies fulfilled? IF(NOT DEAL_II_WITH_MPI OR NOT DEAL_II_WITH_TRILINOS OR NOT DEAL_II_TRILINOS_WITH_SACADO) diff --git a/Nonlinear_PoroViscoelasticity/nonlinear-poro-viscoelasticity.cc b/Nonlinear_PoroViscoelasticity/nonlinear-poro-viscoelasticity.cc index 37a3800..b81cf8a 100644 --- a/Nonlinear_PoroViscoelasticity/nonlinear-poro-viscoelasticity.cc +++ b/Nonlinear_PoroViscoelasticity/nonlinear-poro-viscoelasticity.cc @@ -2906,7 +2906,7 @@ namespace NonLinearPoroViscoElasticity Vector sum_solid_vol_fraction_vertex(vertex_handler_ref.n_dofs()); // We need to create a new FE space with a dim dof per node to - // be able to ouput data on nodes in vector form + // be able to output data on nodes in vector form FESystem fe_vertex_vec(FE_Q(1),dim); DoFHandler vertex_vec_handler_ref(triangulation); vertex_vec_handler_ref.distribute_dofs(fe_vertex_vec); @@ -3417,7 +3417,7 @@ namespace NonLinearPoroViscoElasticity double total_vol_reference = 0.0; std::vector> solution_vertices(tracked_vertices_IN.size()); - //Auxiliar variables needed for mpi processing + //Auxiliary variables needed for mpi processing Tensor<1,dim> sum_reaction_mpi; Tensor<1,dim> sum_reaction_pressure_mpi; Tensor<1,dim> sum_reaction_extra_mpi; diff --git a/Quasi_static_Finite_strain_Compressible_Elasticity/cook_membrane.cc b/Quasi_static_Finite_strain_Compressible_Elasticity/cook_membrane.cc index a07905d..f289829 100644 --- a/Quasi_static_Finite_strain_Compressible_Elasticity/cook_membrane.cc +++ b/Quasi_static_Finite_strain_Compressible_Elasticity/cook_membrane.cc @@ -2274,7 +2274,7 @@ int main (int argc, char *argv[]) std::cout << "Assembly method: Residual and linearisation computed using AD." << std::endl; // Sacado Rad-Fad is not thread-safe, so disable threading. - // Parallisation using MPI would be possible though. + // Parallelisation using MPI would be possible though. Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); diff --git a/Quasi_static_Finite_strain_Quasi_incompressible_ViscoElasticity/CMakeLists.txt b/Quasi_static_Finite_strain_Quasi_incompressible_ViscoElasticity/CMakeLists.txt index ede7579..3217d8f 100644 --- a/Quasi_static_Finite_strain_Quasi_incompressible_ViscoElasticity/CMakeLists.txt +++ b/Quasi_static_Finite_strain_Quasi_incompressible_ViscoElasticity/CMakeLists.txt @@ -5,7 +5,7 @@ # Set the name of the project and target: SET(TARGET "viscoelastic_strip_with_hole") -# Declare all source files the targest consists of: +# Declare all source files the target consists of: SET(TARGET_SRC ${TARGET}.cc ) @@ -31,7 +31,7 @@ IF(NOT ${deal.II_FOUND}) ENDIF() # -# Are all dependencies fullfilled? +# Are all dependencies fulfilled? # IF(NOT DEAL_II_WITH_CXX11 OR NOT DEAL_II_WITH_MPI OR diff --git a/Quasi_static_Finite_strain_Quasi_incompressible_ViscoElasticity/viscoelastic_strip_with_hole.cc b/Quasi_static_Finite_strain_Quasi_incompressible_ViscoElasticity/viscoelastic_strip_with_hole.cc index 58c3009..71f8c09 100644 --- a/Quasi_static_Finite_strain_Quasi_incompressible_ViscoElasticity/viscoelastic_strip_with_hole.cc +++ b/Quasi_static_Finite_strain_Quasi_incompressible_ViscoElasticity/viscoelastic_strip_with_hole.cc @@ -1291,7 +1291,7 @@ namespace ViscoElasStripHole tria_2d_not_flat); // Attach a manifold to the curved boundary and refine - // Note: We can only guarentee that the vertices sit on + // Note: We can only guarantee that the vertices sit on // the curve, so we must test with their position instead // of the cell centre. const Point<2> centre_2d (0,0); @@ -1583,7 +1583,7 @@ namespace ViscoElasStripHole } } Assert(vol_current > 0.0, ExcInternalError()); - // Sum across all porcessors + // Sum across all processors dil_L2_error = Utilities::MPI::sum(dil_L2_error,mpi_communicator); vol_reference = Utilities::MPI::sum(vol_reference,mpi_communicator); vol_current = Utilities::MPI::sum(vol_current,mpi_communicator); diff --git a/README.md b/README.md index 0a3d8ea..7d2e57f 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ as It will pick up the code gallery and create joint documentation for the tutorial and the code gallery. -### Maintainance of contributed codes +### Maintenance of contributed codes The examples in the code-gallery are periodically adjusted so that they maintain compatibility with recent versions of deal.II. This means diff --git a/Swift-Hohenberg-Solver/Generalized-Swift-Hohenberg-Solver.cc b/Swift-Hohenberg-Solver/Generalized-Swift-Hohenberg-Solver.cc index 1fada3d..b7b5b61 100644 --- a/Swift-Hohenberg-Solver/Generalized-Swift-Hohenberg-Solver.cc +++ b/Swift-Hohenberg-Solver/Generalized-Swift-Hohenberg-Solver.cc @@ -73,12 +73,12 @@ namespace SwiftHohenbergSolver - /** @brief This function warps points on a cyclindrical mesh by cosine wave along the central axis. + /** @brief This function warps points on a cylindrical mesh by cosine wave along the central axis. * We use this function to generate the "sinusoid" mesh, which is the surface of revolution * bounded by the cosine wave. * @tparam spacedim This is the dimension of the embedding space, which is where the input point lives * @param p This is the input point to be translated. - * @return The return as a tranlated point in the same dimensional space. This is the new point on the mesh. + * @return The return as a translated point in the same dimensional space. This is the new point on the mesh. */ template Point transform_function(const Point&p) @@ -87,7 +87,7 @@ namespace SwiftHohenbergSolver // because we are explicitly referencing the x, y, and z coordinates Assert(spacedim == 3, ExcNotImplemented()); - // Retruns a point where the x-coordinate is unchanged but the y and z coordinates are adjusted + // Returns a point where the x-coordinate is unchanged but the y and z coordinates are adjusted // by a cos wave of period 20, amplitude .5, and vertical shift 1 return Point(p(0), p(1)*(1 + .5*std::cos((3.14159/10)*p(0))), p(2)*(1 + .5*std::cos((3.14159/10)*p(0)))); } @@ -185,7 +185,7 @@ namespace SwiftHohenbergSolver double time; /// @brief The amount time is increased each iteration/ the denominator of the discretized time derivative double time_step; - /// @brief Counts the number of iterations that have ellapsed + /// @brief Counts the number of iterations that have elapsed unsigned int timestep_number; /// @brief Used to compute the time_step: time_step = 1/timestep_denominator unsigned int timestep_denominator; @@ -702,7 +702,7 @@ namespace SwiftHohenbergSolver , end_time(end_time) {} - /** @brief Distrubutes the finite element vectors to each DoF, creates the system matrix, solution, old_solution, and system_rhs vectors, + /** @brief Distributes the finite element vectors to each DoF, creates the system matrix, solution, old_solution, and system_rhs vectors, * and outputs the number of DoF's to the console. * @tparam dim The dimension of the manifold * @tparam spacedim The dimension of the ambient space @@ -905,7 +905,7 @@ namespace SwiftHohenbergSolver setup_system(); - // Counts total time ellapsed + // Counts total time elapsed time = 0.0; // Counts number of iterations timestep_number = 0; @@ -943,7 +943,7 @@ namespace SwiftHohenbergSolver const FEValuesExtractors::Scalar u(0); const FEValuesExtractors::Scalar v(1); - // Loops over the cells to create the system matrix. We do this only once becase the timestep is constant + // Loops over the cells to create the system matrix. We do this only once because the timestep is constant for(const auto &cell : dof_handler.active_cell_iterators()){ cell_matrix = 0; cell_rhs = 0; @@ -1131,9 +1131,9 @@ int main() { using namespace SwiftHohenbergSolver; - // An array of mesh types. We itterate over this to allow for longer runs without having to stop the code + // An array of mesh types. We iterate over this to allow for longer runs without having to stop the code MeshType mesh_types[5] = {HYPERCUBE, CYLINDER, SPHERE, TORUS, SINUSOID}; - // An array of initial condition types. We itterate this as well, for the same reason + // An array of initial condition types. We iterate this as well, for the same reason InitialConditionType ic_types[3] = {HOTSPOT, PSUEDORANDOM, RANDOM}; // Controls how long the code runs @@ -1157,7 +1157,7 @@ int main() try{ // Switch statement that determines what template parameters are used by the solver object. Template parameters must be known at compile time, so we cannot - // pass this as a varible unfortunately. In each case, we create a filename string (named appropriately for the particular case), output to the console what + // pass this as a variable unfortunately. In each case, we create a filename string (named appropriately for the particular case), output to the console what // we are running, create the solver object, and call run(). Note that for the cylinder, sphere, and sinusoid we decrease the refinement number by 1. This keeps // the number of dofs used in these cases comparable to the number of dofs on the 2D hypercube (otherwise the number of dofs is much larger). For the torus, we // decrease the refinement number by 2. @@ -1364,7 +1364,7 @@ int main() } catch (std::exception &exc) { - std::cout << "An error occured" << std::endl; + std::cout << "An error occurred" << std::endl; std::cerr << std::endl << std::endl << "----------------------------------------------------" @@ -1379,7 +1379,7 @@ int main() } catch (...) { - std::cout << "Error occured, made it past first catch" << std::endl; + std::cout << "Error occurred, made it past first catch" << std::endl; std::cerr << std::endl << std::endl << "----------------------------------------------------" diff --git a/Swift-Hohenberg-Solver/README.md b/Swift-Hohenberg-Solver/README.md index c618855..9d4a77e 100755 --- a/Swift-Hohenberg-Solver/README.md +++ b/Swift-Hohenberg-Solver/README.md @@ -15,7 +15,7 @@ wavelength of $2\pi$. We choose $r = 0.3$ because solutions are reasonably well behaved for small values of $r$ and $g_1$, but there are interesting behaviors that occur when $g_1$ is smaller or larger than $r$ in magnitude, so this allows us room to vary $g_1$ and -explore these behavior. Additionally, we choose $r = 0.3$ because this matches the parameters used by Gurevich in [1]. We chose our parameters to match so that we could compare the output of our program to the results presented in [1], which was useful for validating that our code was functioning properly during the developement process. To summarize, this code solves: +explore these behavior. Additionally, we choose $r = 0.3$ because this matches the parameters used by Gurevich in [1]. We chose our parameters to match so that we could compare the output of our program to the results presented in [1], which was useful for validating that our code was functioning properly during the development process. To summarize, this code solves: @f{align*}{ \frac{\partial u}{\partial t} = 0.3u - (1 + \Delta)^2 u + g_1 u^2 - u^3 diff --git a/coupled_laplace_problem/coupled_laplace_problem.cc b/coupled_laplace_problem/coupled_laplace_problem.cc index 31e1a88..0ec96c4 100644 --- a/coupled_laplace_problem/coupled_laplace_problem.cc +++ b/coupled_laplace_problem/coupled_laplace_problem.cc @@ -340,7 +340,7 @@ private: const types::boundary_id interface_boundary_id; Adapter adapter; - // The time-step size delta_t is the acutual time-step size used for all + // The time-step size delta_t is the actual time-step size used for all // computations. The preCICE time-step size is obtained by preCICE in order to // ensure a synchronization at all coupling time steps. The solver time // step-size is the desired time-step size of our individual solver. In more diff --git a/goal_oriented_elastoplasticity/CMakeLists.txt b/goal_oriented_elastoplasticity/CMakeLists.txt index c3d7bcc..af4e5c1 100644 --- a/goal_oriented_elastoplasticity/CMakeLists.txt +++ b/goal_oriented_elastoplasticity/CMakeLists.txt @@ -30,7 +30,7 @@ IF(NOT ${deal.II_FOUND}) ENDIF() # -# Are all dependencies fullfilled? +# Are all dependencies fulfilled? # IF( NOT DEAL_II_WITH_MPI OR NOT DEAL_II_WITH_P4EST OR diff --git a/goal_oriented_elastoplasticity/elastoplastic.cc b/goal_oriented_elastoplasticity/elastoplastic.cc index 2f7e78f..a8dda19 100644 --- a/goal_oriented_elastoplasticity/elastoplastic.cc +++ b/goal_oriented_elastoplasticity/elastoplastic.cc @@ -2214,9 +2214,9 @@ namespace ElastoPlastic Vector &rhs) const { // Assemble right hand side of the dual problem when the quantity of interest is - // a nonlinear functinoal. In this case, the QoI should be linearized which depends + // a nonlinear functional. In this case, the QoI should be linearized which depends // on the solution of the primal problem. - // The extracter of the linearized QoI functional is the gradient of the the original + // The extractor of the linearized QoI functional is the gradient of the the original // QoI functional with the primal solution values. AssertThrow (dim >= 2, ExcNotImplemented()); @@ -5447,7 +5447,7 @@ namespace ElastoPlastic ElastoPlasticProblem::refine_grid () { // --------------------------------------------------------------- - // Make a field variable for history varibales to be able to + // Make a field variable for history variables to be able to // transfer the data to the quadrature points of the new mesh FE_DGQ history_fe (1); DoFHandler history_dof_handler (triangulation); diff --git a/goal_oriented_elastoplasticity/readme.md b/goal_oriented_elastoplasticity/readme.md index 1e28c83..ca7cc4c 100644 --- a/goal_oriented_elastoplasticity/readme.md +++ b/goal_oriented_elastoplasticity/readme.md @@ -70,7 +70,7 @@ set error estimation strategy [can be set kelly_error or information)] set maximum relative error [set a criterion value for - perfoming the mesh adaptivity] + performing the mesh adaptivity] set output directory [determine a directory to save the output results] diff --git a/nonlinear-heat_transfer_with_AD_NOX/README.md b/nonlinear-heat_transfer_with_AD_NOX/README.md index 9a8a8fe..819b9bf 100644 --- a/nonlinear-heat_transfer_with_AD_NOX/README.md +++ b/nonlinear-heat_transfer_with_AD_NOX/README.md @@ -13,7 +13,7 @@ The source codes are there in the `source/` folder. Note that the geometry and i ## Documentation -In this example, we solve a simple transient nonlinear heat transfer equation. The nonlinearity is due to the temperature dependence of the thermal conductivity. Two main aspects covered by this example are (a) it develops the residual and the jacobian using automatic differentiation and (b) solves the nonlinear equations using TRILINOS NOX. The actual code contains the comments which will explain how these aspects are executed. Here, we give the full derivation and set up the equations. We also provide explanations to some of the functions important for this applicaiton. +In this example, we solve a simple transient nonlinear heat transfer equation. The nonlinearity is due to the temperature dependence of the thermal conductivity. Two main aspects covered by this example are (a) it develops the residual and the jacobian using automatic differentiation and (b) solves the nonlinear equations using TRILINOS NOX. The actual code contains the comments which will explain how these aspects are executed. Here, we give the full derivation and set up the equations. We also provide explanations to some of the functions important for this application. ### Strong form @@ -685,7 +685,7 @@ The variable `present_solution` is assigned the value of `converged_solution` fr ### Results -The results are essentially the time evolution of the temperature throughout the domain. The first of the pictures below shows the temperature distribution at the final step, i.e. at time $t=5$. This should be very similar to the figure at the bottom on the page [here](https://www.mathworks.com/help/pde/ug/heat-transfer-problem-with-temperature-dependent-properties.html). We also plot the time evolution of the temperature at a point close to the right edge of the domain indicated by the small magenta dot (close to $(0.49, 0.12)$) in the second of the pictures below. This is also simlar to the second figure at the [bottom of this page](https://www.mathworks.com/help/pde/ug/heat-transfer-problem-with-temperature-dependent-properties.html). There could be minor differences due to the choice of the point. Further, note that, we have plotted in the second of the pictures below the temperature as a function of time steps instead of time. Since the $\Delta t$ chosen is 0.1, 50 steps maps to $t=5$ as in the link. +The results are essentially the time evolution of the temperature throughout the domain. The first of the pictures below shows the temperature distribution at the final step, i.e. at time $t=5$. This should be very similar to the figure at the bottom on the page [here](https://www.mathworks.com/help/pde/ug/heat-transfer-problem-with-temperature-dependent-properties.html). We also plot the time evolution of the temperature at a point close to the right edge of the domain indicated by the small magenta dot (close to $(0.49, 0.12)$) in the second of the pictures below. This is also similar to the second figure at the [bottom of this page](https://www.mathworks.com/help/pde/ug/heat-transfer-problem-with-temperature-dependent-properties.html). There could be minor differences due to the choice of the point. Further, note that, we have plotted in the second of the pictures below the temperature as a function of time steps instead of time. Since the $\Delta t$ chosen is 0.1, 50 steps maps to $t=5$ as in the link. ![image](./doc/Images/contour.png) diff --git a/nonlinear-heat_transfer_with_AD_NOX/include/nonlinear_heat.h b/nonlinear-heat_transfer_with_AD_NOX/include/nonlinear_heat.h index 21502a9..05fb169 100755 --- a/nonlinear-heat_transfer_with_AD_NOX/include/nonlinear_heat.h +++ b/nonlinear-heat_transfer_with_AD_NOX/include/nonlinear_heat.h @@ -118,7 +118,7 @@ class Initialcondition : public Function<2> public: Initialcondition(): Function<2>(1) {} - // Returns the intitial values. + // Returns the initial values. virtual double value(const Point<2> &p, const unsigned int component =0) const override; }; diff --git a/nonlinear-heat_transfer_with_AD_NOX/source/compute_residual.cc b/nonlinear-heat_transfer_with_AD_NOX/source/compute_residual.cc index 7e65f85..56bfc64 100644 --- a/nonlinear-heat_transfer_with_AD_NOX/source/compute_residual.cc +++ b/nonlinear-heat_transfer_with_AD_NOX/source/compute_residual.cc @@ -97,7 +97,7 @@ void nonlinear_heat::compute_residual(const Vector & evaluation_point, V fe_values[t].get_function_values(converged_solution, consol); fe_values[t].get_function_gradients(converged_solution, consol_grad); /** - * residual_ad is defined and initalized in its symbolic form. + * residual_ad is defined and initialized in its symbolic form. */ std::vector residual_ad(n_dependent_variables, ADNumberType(0.0)); diff --git a/nonlinear-heat_transfer_with_AD_NOX/source/initial_conditions.cc b/nonlinear-heat_transfer_with_AD_NOX/source/initial_conditions.cc index 5507942..3191280 100644 --- a/nonlinear-heat_transfer_with_AD_NOX/source/initial_conditions.cc +++ b/nonlinear-heat_transfer_with_AD_NOX/source/initial_conditions.cc @@ -8,7 +8,7 @@ double Initialcondition::value(const Point<2> & /*p*/, const unsigned int /*comp*/) const { /** - * In the current case, we asume that the initial conditions are zero everywhere. + * In the current case, we assume that the initial conditions are zero everywhere. */ return 0.0; } diff --git a/parallel_in_time/src/BraidFuncs.cc b/parallel_in_time/src/BraidFuncs.cc index 08ef307..b5aea5f 100644 --- a/parallel_in_time/src/BraidFuncs.cc +++ b/parallel_in_time/src/BraidFuncs.cc @@ -187,7 +187,7 @@ my_BufSize(braid_App app, // This function packs a linear buffer with data so that the buffer // may be sent to another processor via MPI. The buffer is cast to // a type we can work with. The first element of the buffer is the -// size of the buffer. Then we iterate over soltuion vector u and +// size of the buffer. Then we iterate over solution vector u and // fill the buffer with our solution data. Finally we tell XBraid // how much data we wrote. int @@ -210,7 +210,7 @@ my_BufPack(braid_App app, return 0; } -// This function unpacks a buffer that was recieved from a different +// This function unpacks a buffer that was received from a different // processor via MPI. The size of the buffer is read from the first // element, then we iterate over the size of the buffer and fill // the values of solution vector u with the data in the buffer. diff --git a/parallel_in_time/src/BraidFuncs.hh b/parallel_in_time/src/BraidFuncs.hh index 16fbab0..fd7d9b1 100644 --- a/parallel_in_time/src/BraidFuncs.hh +++ b/parallel_in_time/src/BraidFuncs.hh @@ -35,7 +35,7 @@ // This struct contains all data that changes with time. For now // this is just the solution data. When doing AMR this should -// probably include the triangulization, the sparsity patter, +// probably include the triangulization, the sparsity pattern, // constraints, etc. /** * \brief Struct that contains the deal.ii vector. diff --git a/parallel_in_time/src/Utilities.cc b/parallel_in_time/src/Utilities.cc index 99c8a40..275ae5a 100644 --- a/parallel_in_time/src/Utilities.cc +++ b/parallel_in_time/src/Utilities.cc @@ -68,7 +68,7 @@ std::ostream& pout() s_pout_basename = "pout" ; s_pout_init = true ; } - // if MPI not initialized, we cant open the file so return cout + // if MPI not initialized, we can't open the file so return cout if ( ! flag_i || flag_f) { return std::cout; // MPI hasn't been started yet, or has ended.... diff --git a/time_dependent_navier_stokes/Readme.md b/time_dependent_navier_stokes/Readme.md index 75d4b10..1eb012c 100644 --- a/time_dependent_navier_stokes/Readme.md +++ b/time_dependent_navier_stokes/Readme.md @@ -31,7 +31,7 @@ and $c(u;u, v)$ is the convection term: c(u;u, v) = \int_{\Omega} (u \cdot \nabla u) \cdot v d\Omega @f} -Substracting $m(u^n, v) + \Delta{t}a((u^n, p^n), (v, q))$ from both sides of the equation, +Subtracting $m(u^n, v) + \Delta{t}a((u^n, p^n), (v, q))$ from both sides of the equation, we have the incremental form: @f{eqnarray*} m(\Delta{u}, v) + \Delta{t}\cdot a((\Delta{u}, \Delta{p}), (v, q)) = \Delta{t}(-a(u^n, p^n), (q, v)) - \Delta{t}c(u^n;u^n, v) @@ -62,7 +62,7 @@ The system we want to solve can be written in matrix form: \right) @f} -#### Grad-Div stablization #### +#### Grad-Div stabilization #### Similar to step-57, we add $\gamma B^T M_p^{-1} B$ to the upper left block of the system. This is a term that is consistent, i.e., the corresponding operators applied to the exact solution would @@ -95,7 +95,7 @@ With this, the system becomes: @f} where $\tilde{A} = A + \gamma B^T M_p^{-1} B$. -A detailed explanation of the Grad-Div stablization can be found in [1]. +A detailed explanation of the Grad-Div stabilization can be found in [1]. #### Block preconditioner #### diff --git a/time_dependent_navier_stokes/doc/tooltip b/time_dependent_navier_stokes/doc/tooltip index ceecfc9..bf7e33e 100644 --- a/time_dependent_navier_stokes/doc/tooltip +++ b/time_dependent_navier_stokes/doc/tooltip @@ -1 +1 @@ -Solving time-dependent incompressible Navier-Stokes problem in parallel with Grad-Div stablization using IMEX scheme. +Solving time-dependent incompressible Navier-Stokes problem in parallel with Grad-Div stabilization using IMEX scheme. diff --git a/time_dependent_navier_stokes/time_dependent_navier_stokes.cc b/time_dependent_navier_stokes/time_dependent_navier_stokes.cc index a7f0738..b2e3f13 100644 --- a/time_dependent_navier_stokes/time_dependent_navier_stokes.cc +++ b/time_dependent_navier_stokes/time_dependent_navier_stokes.cc @@ -172,7 +172,7 @@ namespace fluid // refinement: GridGenerator::flatten_triangulation(middle, tmp2); - // Left domain is requred in 3d only. + // Left domain is required in 3d only. if (compute_in_2d) { GridGenerator::merge_triangulations(tmp2, right, tria); @@ -554,7 +554,7 @@ namespace fluid // The system equation is written in the incremental form, and we treat // the convection term explicitly. Therefore the system equation is linear // and symmetric, which does not need to be solved with Newton's iteration. - // The system is further stablized and preconditioned with Grad-Div method, + // The system is further stabilized and preconditioned with Grad-Div method, // where GMRES solver is used as the outer solver. template class InsIMEX @@ -985,7 +985,7 @@ namespace fluid bool apply_nonzero_constraints = (time.get_timestep() == 1); // We have to assemble the LHS for the initial two time steps: // once using nonzero_constraints, once using zero_constraints, - // as well as the steps imediately after mesh refinement. + // as well as the steps immediately after mesh refinement. bool assemble_system = (time.get_timestep() < 3 || refined); refined = false; assemble(apply_nonzero_constraints, assemble_system); diff --git a/two_phase_flow/MultiPhase.cc b/two_phase_flow/MultiPhase.cc index e8c2a3e..e6530fc 100644 --- a/two_phase_flow/MultiPhase.cc +++ b/two_phase_flow/MultiPhase.cc @@ -506,7 +506,7 @@ void MultiPhase::run() ////////////////////////////////////// cK = 1.0; cE = 1.0; - sharpness_integer=10; //this will be multipled by min_h + sharpness_integer=10; //this will be multiplied by min_h //TRANSPORT_TIME_INTEGRATION=FORWARD_EULER; TRANSPORT_TIME_INTEGRATION=SSP33; //ALGORITHM = "MPP_u1"; diff --git a/two_phase_flow/TestLevelSet.cc b/two_phase_flow/TestLevelSet.cc index 861b750..34a8ed8 100644 --- a/two_phase_flow/TestLevelSet.cc +++ b/two_phase_flow/TestLevelSet.cc @@ -499,7 +499,7 @@ void TestLevelSet::run() ////////////////////////////////////// cK = 1.0; // compression constant cE = 1.0; // entropy viscosity constant - sharpness_integer=1; //this will be multipled by min_h + sharpness_integer=1; //this will be multiplied by min_h //TRANSPORT_TIME_INTEGRATION=FORWARD_EULER; TRANSPORT_TIME_INTEGRATION=SSP33; //ALGORITHM = "MPP_u1";