From 56d33bc675c052d84a771700d6c35e2696f4e4bf Mon Sep 17 00:00:00 2001 From: David Wells Date: Mon, 27 Aug 2018 13:56:17 -0400 Subject: [PATCH] step-37: minor typesetting and typo fixes. --- examples/step-37/doc/results.dox | 64 +++++++++++++++++--------------- examples/step-37/step-37.cc | 15 ++++---- 2 files changed, 43 insertions(+), 36 deletions(-) diff --git a/examples/step-37/doc/results.dox b/examples/step-37/doc/results.dox index e35e05a950..baf5b54ed5 100644 --- a/examples/step-37/doc/results.dox +++ b/examples/step-37/doc/results.dox @@ -58,7 +58,7 @@ next. The code is also very efficient in terms of storage. Around 2-4 million degrees of freedom fit into 1 GB of memory, see also the MPI results below. An interesting fact is that solving one linear system is cheaper than the setup, despite not building a matrix (approximately half of which is spent in the -DoFHandler::distribute_dofs() and DoFHandler::distributed_mg_dofs() +DoFHandler::distribute_dofs() and DoFHandler::distribute_mg_dofs() calls). This shows the high efficiency of this approach, but also that the deal.II data structures are quite expensive to set up and the setup cost must be amortized over several system solves. @@ -427,11 +427,12 @@ temporary vector and LinearAlgebra::distributed::Vector::copy_locally_owned_data as shown below. @code -IndexSet locally_relevant_set; -DoFTools::extract_locally_relevant_dofs (dof_handler, - locally_relevant_set); +IndexSet locally_relevant_dofs; +DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); LinearAlgebra::distributed::Vector copy_vec(solution); -solution.reinit(locally_owned_dofs, locally_relevant_set, mpi_communicator); +solution.reinit(dof_handler.locally_owned_dofs(), + locally_relevant_dofs, + triangulation.get_communicator()); solution.copy_locally_owned_data_from(copy_vec); constraints.distribute(solution); solution.update_ghost_values(); @@ -483,7 +484,7 @@ constrained by Dirichlet conditions. In the implementation in deal.II, the integrals $(\nabla \varphi_i,\nabla \varphi_j)_\Omega$ on the right hand side are already contained in the local matrix contributions we assemble on each cell. When using -AffineConstraints::distributed_local_to_global() as first described in the +AffineConstraints::distribute_local_to_global() as first described in the step-6 and step-7 tutorial programs, we can account for the contribution of inhomogeneous constraints j by multiplying the columns j and rows i of the local matrix according to the integrals $(\varphi_i, @@ -522,12 +523,14 @@ where we only set the Dirichlet values: @code // interpolate boundary values on vector solution std::map boundary_values; - VectorTools::interpolate_boundary_values(mapping, dof_handler, 0, - BoundaryValueFunction(), boundary_values); - for (typename std::map::iterator it = boundary_values.begin(); - it != boundary_values.end(); ++it) - if (solution.locally_owned_elements().is_element(it->first)) - solution(it->first) = it->second; + VectorTools::interpolate_boundary_values(mapping, + dof_handler, + 0, + BoundaryValueFunction(), + boundary_values); + for (const std::pair &pair : boundary_values) + if (solution.locally_owned_elements().is_element(pair.first)) + solution(pair.first) = pair.second; @endcode or, equivalently, if we already had filled the inhomogeneous constraints into an AffineConstraints object, @@ -556,22 +559,25 @@ values as follows, assuming the Dirichlet values have been interpolated into the object @p constraints: @code template -void LaplaceProblem::assemble_rhs () +void LaplaceProblem::assemble_rhs() { solution = 0; constraints.distribute(solution); + solution.update_ghost_values(); system_rhs = 0; - solution.update_ghost_values(); - FEEvaluation phi(*system_matrix.get_matrix_free()); - for (unsigned int cell=0; celln_macro_cells(); ++cell) + const Table<2, VectorizedArray> &coefficient = system_matrix.get_coefficient(); + FEEvaluation phi(*system_matrix.get_matrix_free()); + for (unsigned int cell = 0; + cell < system_matrix.get_matrix_free()->n_macro_cells(); + ++cell) { phi.reinit(cell); phi.read_dof_values_plain(solution); phi.evaluate(false, true); - for (unsigned int q=0; q(1.0), q); } phi.integrate(true, true); @@ -619,10 +625,10 @@ A second alternative to get the right hand side that re-uses the @p LaplaceOperator::apply_add() function is to instead add a second constraint matrix that skips Dirichlet constraints on the read operation. To do this, we initialize a MatrixFree object in a more extended way with two different -DoFHandler-AffineConstraints combinations. The 0-th component includes -Dirichlet conditions for solving the linear system, whereas 1-st component -does read also from Dirichlet-constrained degrees of freedom for the right -hand side assembly: +DoFHandler - AffineConstraints combinations. The zeroth component includes +Dirichlet conditions for solving the linear system, whereas first component +also reads from Dirichlet-constrained degrees of freedom for the right hand +side assembly: @code constraints.clear(); @@ -664,12 +670,12 @@ instance @p laplace_operator that gets used in the linear solver. Alongside, we create a second @p LaplaceOperator object that fills the right hand side: @code template -void LaplaceProblem::assemble_rhs () +void LaplaceProblem::assemble_rhs() { - LaplaceOperator laplace_operator_inhomogenous; + LaplaceOperator laplace_operator_inhomogenous; // select first block in matrix_free to use constraints_without_dirichlet - std::vector selected_block {1}; + std::vector selected_block{1}; laplace_operator_inhomogeneous.initialize(matrix_free, selected_block); solution = 0; constraints.distribute(solution); @@ -680,7 +686,7 @@ void LaplaceProblem::assemble_rhs () } @endcode -Instead of adding a second DoFHandler-AffineConstraints pair to the same -MatrixFree::reinit() call, one could of course also construct an independent -MatrixFree object that feeds the second @p LaplaceOperator instance, see also -the discussion in MatrixFreeOperators::Base. +Instead of adding a second DoFHandler - AffineConstraints pair to +the same MatrixFree::reinit() call, one could of course also construct an +independent MatrixFree object that feeds the second @p LaplaceOperator instance, +see also the discussion in MatrixFreeOperators::Base. diff --git a/examples/step-37/step-37.cc b/examples/step-37/step-37.cc index 6d20904472..facd794e4f 100644 --- a/examples/step-37/step-37.cc +++ b/examples/step-37/step-37.cc @@ -427,7 +427,8 @@ namespace Step37 // @code // src.update_ghost_values(); // local_apply(*this->data, dst, src, std::make_pair(0U, - // data.n_macro_cells())); dst.compress(VectorOperation::add); + // data.n_macro_cells())); + // dst.compress(VectorOperation::add); // @endcode // // Here, the two calls update_ghost_values() and compress() perform the data @@ -1094,9 +1095,9 @@ namespace Step37 constraints.distribute(solution); - pcout << "Time solve (" << solver_control.last_step() - << " iterations) (CPU/wall) " << time.cpu_time() << "s/" - << time.wall_time() << "s\n"; + pcout << "Time solve (" << solver_control.last_step() << " iterations)" + << (solver_control.last_step() < 10 ? " " : " ") << "(CPU/wall) " + << time.cpu_time() << "s/" << time.wall_time() << "s\n"; } @@ -1113,9 +1114,9 @@ namespace Step37 // DataOutBase::VtkFlags::best_speed lowers this to only one fourth the time // of the linear solve. // - // We disable the output when the mesh gets too large. Note that a variant - // of program has been run on hundreds of thousands MPI ranks with as many - // as 100 billion grid cells, which is not directly accessible to classical + // We disable the output when the mesh gets too large. A variant of this + // program has been run on hundreds of thousands MPI ranks with as many as + // 100 billion grid cells, which is not directly accessible to classical // visualization tools. template void LaplaceProblem::output_results(const unsigned int cycle) const -- 2.39.5