From a563e91a05aa1000aeb7976eb88a28c4dfba6cb5 Mon Sep 17 00:00:00 2001 From: Matthias Maier Date: Wed, 26 Feb 2020 10:41:15 -0600 Subject: [PATCH] Address some more review comments --- examples/step-69/doc/intro.dox | 20 ++-- examples/step-69/doc/results.dox | 38 +++---- examples/step-69/step-69.cc | 171 ++++++++++++++----------------- 3 files changed, 107 insertions(+), 122 deletions(-) diff --git a/examples/step-69/doc/intro.dox b/examples/step-69/doc/intro.dox index e654c5c0ac..0c9544bc4d 100644 --- a/examples/step-69/doc/intro.dox +++ b/examples/step-69/doc/intro.dox @@ -237,16 +237,16 @@ spaces $\pmb{\mathbb{V}}_h := \{\mathbb{V}_h\}^{d+2}$. Let $\mathbf{u}_h spaces in our notation. Vector-valued finite element spaces are natural for variational formulations of PDE systems (e.g. Navier-Stokes). In such context, the interactions that have to be computed describe -"interactions between DOFs": with proper renumbering of the -vector-valued DoFHandler (i.e. initialized with an FESystem) it is possible to -compute the block-matrices (required in order to advance the solution) with -relative ease. However, in the context of time-explicit collocation-like -schemes (such as finite differences and/or the scheme presented in this -tutorial): the interactions that have to be computed can be informally described -as "interactions between nodes" (not between DOFs). In addition we don't need -to compute matrices in order to advance the solution. This leaves very -little reasons to use vector-valued finite element spaces both in -theory and/or practice. +interactions between DOFs: with proper renumbering of the +vector-valued DoFHandler (i.e. initialized with an FESystem) it is possible +to compute the block-matrices (required in order to advance the solution) +with relative ease. However, the interactions that have to be computed in +the context of time-explicit collocation-type schemes (such as finite +differences and/or the scheme presented in this tutorial) can be are +better described as interactions between nodes (not between DOFs). +In addition, in our case we do not solve a linear equation in order to +advance the solution. This leaves very little reason to use vector-valued +finite element spaces both in theory and/or practice. We will use the usual Lagrange finite elements: let $\{\mathbf{x}_i\}_{i \in \mathcal{V}}$ denote the set of all support points (see @ref GlossSupport "this diff --git a/examples/step-69/doc/results.dox b/examples/step-69/doc/results.dox index b72a09cd3a..3123f6987a 100644 --- a/examples/step-69/doc/results.dox +++ b/examples/step-69/doc/results.dox @@ -120,26 +120,28 @@ So, we give the first-order method a second chance and run it with about TimeLoop::output(t = 4.00006, checkpoint = 1) +[...] + +------------------------------------------------------------------------+------------+------------+ -| Total CPU time elapsed since start | 3.34e+05s | | +| Total wallclock time elapsed since start | 6.75e+03s | | | | | | -| Section | no. calls | CPU time | % of total | +| Section | no. calls | wall time | % of total | +------------------------------------------------------------+-----------+------------+------------+ -| discretization - setup | 1 | 1.96s | 0% | -| offline_data - assemble lumped mass matrix, and c_ij | 1 | 10.6s | 0% | -| offline_data - compute |c_ij|, and n_ij | 1 | 0.208s | 0% | -| offline_data - create sparsity pattern and set up matrices | 1 | 0.38s | 0% | -| offline_data - distribute dofs | 1 | 1.36s | 0% | -| offline_data - fix slip boundary c_ij | 1 | 2.52s | 0% | -| schlieren_postprocessor - compute schlieren plot | 201 | 24.3s | 0% | -| schlieren_postprocessor - prepare scratch space | 1 | 0.008s | 0% | -| time_loop - setup scratch space | 1 | 1.48s | 0% | -| time_loop - stalled output | 200 | 0.004s | 0% | -| time_step - 1 compute d_ij | 70216 | 5.79e+04s | 17% | -| time_step - 2 compute d_ii, and tau_max | 70216 | 5.78e+03s | 1.7% | -| time_step - 3 perform update | 70216 | 2.51e+04s | 7.5% | -| time_step - 4 fix boundary states | 70216 | 56.8s | 0% | -| time_step - prepare scratch space | 1 | 0.028s | 0% | +| discretization - setup | 1 | 1.97s | 0% | +| offline_data - assemble lumped mass matrix, and c_ij | 1 | 1.19s | 0% | +| offline_data - compute |c_ij|, and n_ij | 1 | 0.0172s | 0% | +| offline_data - create sparsity pattern and set up matrices | 1 | 0.413s | 0% | +| offline_data - distribute dofs | 1 | 1.05s | 0% | +| offline_data - fix slip boundary c_ij | 1 | 0.252s | 0% | +| schlieren_postprocessor - compute schlieren plot | 201 | 1.82s | 0% | +| schlieren_postprocessor - prepare scratch space | 1 | 0.000497s | 0% | +| time_loop - setup scratch space | 1 | 1.45s | 0% | +| time_loop - stalled output | 200 | 0.00342s | 0% | +| time_step - 1 compute d_ij | 70216 | 4.38e+03s | 65% | +| time_step - 2 compute d_ii, and tau_max | 70216 | 419s | 6.2% | +| time_step - 3 perform update | 70216 | 1.87e+03s | 28% | +| time_step - 4 fix boundary states | 70216 | 24s | 0.36% | +| time_step - prepare scratch space | 1 | 0.0227s | 0% | +------------------------------------------------------------+-----------+------------+------------+ @endverbatim @@ -148,4 +150,4 @@ And with the following result: That's substantially better, although of course at the price of having run -the code for roughly 6 hours on 16 cores. +the code for roughly 2 hours on 16 cores. diff --git a/examples/step-69/step-69.cc b/examples/step-69/step-69.cc index 8888d17e7d..9b60e7b915 100644 --- a/examples/step-69/step-69.cc +++ b/examples/step-69/step-69.cc @@ -676,8 +676,6 @@ namespace Step69 dof_handler.initialize(discretization->triangulation, discretization->finite_element); - DoFRenumbering::Cuthill_McKee(dof_handler); - locally_owned = dof_handler.locally_owned_dofs(); n_locally_owned = locally_owned.n_elements(); @@ -699,11 +697,12 @@ namespace Step69 // explanation. We avoid using a distributed matrix class (as for // example provided by Trilinos or PETSc) and instead rely on deal.II's // own SparseMatrix object to store the local part of all matrices. - // This design decision is motivated by the fact that we actually never - // perform a matrix-vector multiplication. Instead, we will have to - // perform nonlinear updates while iterating over (the local part) of a - // connectivity stencil; a task for which deal.II own SparsityPattern - // is specificially optimized for. + // This design decision is motivated by the fact that (a) we actually + // never perform a matrix-vector multiplication, and (b) we can always + // assemble the local part of a matrix exclusively on a given MPI + // rank. Instead, we will compute nonlinear updates while iterating + // over (the local part) of a connectivity stencil; a task for which + // deal.II's own SparsityPattern is specificially optimized for. // // This design consideration has a caveat, though. What makes the // deal.II SparseMatrix class fast is the n_locally_relevant$)$. That capability can be - // invoked by Utilities::MPI::Partitioner::global_to_local() function. + // invoked by the Utilities::MPI::Partitioner::global_to_local() function. // Once the sparsity pattern is created using local indices, all that // is left to do is to ensure that (when implementing our scatter and // gather auxiliary functions) we always access elements of a @@ -739,7 +739,7 @@ namespace Step69 // We have to create the "local" sparsity pattern by hand. We // therefore loop over all locally owned and ghosted cells (see @ref // GlossArtificialCell) and extract the (global) - // dof_indices associated to the cell DOFs and renumber + // dof_indices associated with the cell DOFs and renumber // them using partitioner->global_to_local(index). // // @note In the case of a locally owned dof, such renumbering consist @@ -747,7 +747,7 @@ namespace Step69 // will become a number in the integer interval // $[0,$n_locally_owned$)$. However, in the case of a // ghosted dof (i.e. not locally owned) the situation is quite - // different, since the global indices associated to ghosted DOFs will + // different, since the global indices associated with ghosted DOFs will // not be (in general) a contiguous set of integers. DynamicSparsityPattern dsp(n_locally_relevant, n_locally_relevant); @@ -759,6 +759,8 @@ namespace Step69 if (cell->is_artificial()) continue; + /* We transform the set of global dof indices on the cell to the + * corresponding "local" index range on the MPI process: */ cell->get_dof_indices(dof_indices); std::transform(dof_indices.begin(), dof_indices.end(), @@ -767,6 +769,8 @@ namespace Step69 return partitioner->global_to_local(index); }); + /* And simply add, for each dof, a coupling to all other "local" + * dofs on the cell: */ for (const auto dof : dof_indices) dsp.add_entries(dof, dof_indices.begin(), dof_indices.end()); } @@ -783,7 +787,7 @@ namespace Step69 } // This concludes the setup of the DoFHandler and SparseMatrix objects. - // Next, we have to assemble various matrices. We next define a number of + // Next, we have to assemble various matrices. We define a number of // helper functions and data structures in an anonymous namespace. namespace @@ -855,7 +859,7 @@ namespace Step69 // that we need one matrix per space dimension to store the // $\mathbf{c}_{ij}$ vectors. Similar observation follows for the // matrix $\mathbf{n}_{ij}$. The purpose of - // gather_get_entry() is to retrieve those entries a store + // gather_get_entry() is to retrieve those entries and store // them into a Tensor<1, dim> for our convenience. template @@ -874,7 +878,7 @@ namespace Step69 // functionality of gather_get_entry() and // gather() is very much the same, but their context is // different: the function gather() does not rely on an - // iterator (that actually knows the value pointed) but rather on the + // iterator (that actually knows the value pointed to) but rather on the // indices (i,l) of the entry in order to retrieve its // actual value. We should expect gather() to be slightly // more expensive than gather_get_entry(). The use of @@ -887,7 +891,7 @@ namespace Step69 // matrices) is in general unacceptably expensive. Here is where we might // want to keep an eye on complexity: we want this operation to have // constant complexity, which is the case of the current implementation - // using deal.ii matrices. + // using deal.II matrices. template DEAL_II_ALWAYS_INLINE inline Tensor<1, k> @@ -920,7 +924,7 @@ namespace Step69 // Tensor<1,problem_dimension>, and the last argument // which represents a index of the global object. This function will be // primarily used to write the updated nodal values, stored as - // Tensor<1,problem_dimension>, into the global object. + // Tensor<1,problem_dimension>, into the global objects. template DEAL_II_ALWAYS_INLINE inline void @@ -937,13 +941,14 @@ namespace Step69 // \frac{\mathbf{c}_{ij}}{|\mathbf{c}_{ij}|}$, and the boundary normals // $\boldsymbol{\nu}_i$. // - // In order to exploit thread parallelization we use WorkStream approach - // detailed in the @ref threads Parallel computing with multiple processors + // In order to exploit thread parallelization we use the WorkStream approach + // detailed in the @ref threads "Parallel computing with multiple processors" // accessing shared memory. As customary this requires // definition of // - Scratch data (i.e. input info required to carry out computations): in // this case it is scratch_data. - // - The worker: in the case it is local_assemble_system() that + // - The worker: in our case this is the local_assemble_system() + // function that // actually computes the local (i.e. current cell) contributions from the // scratch data. // - A copy data: a struct that contains all the local assembly @@ -952,7 +957,7 @@ namespace Step69 // copy_local_to_global() in charge of actually coping these // local contributions into the global objects (matrices and/or vectors) // - // Most the following lines are spent in the definition of the worker + // Most of the following lines are spent in the definition of the worker // local_assemble_system() and the copy data routine // copy_local_to_global(). There is not much to say about the // WorkStream framework since the vast majority of ideas are reasonably @@ -963,14 +968,14 @@ namespace Step69 // // @f{align*} // \widehat{\boldsymbol{\nu}}_i := - // \frac{\boldsymbol{\nu}_i}{|\boldsymbol{\nu}_i|} \ \text{where} + // \frac{\boldsymbol{\nu}_i}{|\boldsymbol{\nu}_i|} \ \text{ where } // \boldsymbol{\nu}_i := \sum_{T \subset \text{supp}(\phi_i)} // \sum_{F \subset \partial T \cap \partial \Omega} // \sum_{\mathbf{x}_{q,F}} \nu(\mathbf{x}_{q,F}) - // \phi_i(\mathbf{x}_{q,F}) + // \phi_i(\mathbf{x}_{q,F}). // @f} // - // here $T$ denotes elements, + // Here $T$ denotes elements, // $\text{supp}(\phi_i)$ the support of the shape function $\phi_i$, // $F$ are faces of the element $T$, and $\mathbf{x}_{q,F}$ // are quadrature points on such face. Note that this formula for @@ -1011,35 +1016,29 @@ namespace Step69 const auto local_assemble_system = [&](const auto &cell, auto & scratch, auto & copy) { - auto &is_artificial = copy.is_artificial; - auto &local_dof_indices = copy.local_dof_indices; - auto &local_boundary_normal_map = copy.local_boundary_normal_map; - auto &cell_lumped_mass_matrix = copy.cell_lumped_mass_matrix; - auto &cell_cij_matrix = copy.cell_cij_matrix; - - is_artificial = cell->is_artificial(); - if (is_artificial) + copy.is_artificial = cell->is_artificial(); + if (copy.is_artificial) return; - local_boundary_normal_map.clear(); - cell_lumped_mass_matrix.reinit(dofs_per_cell, dofs_per_cell); - for (auto &matrix : cell_cij_matrix) + copy.local_boundary_normal_map.clear(); + copy.cell_lumped_mass_matrix.reinit(dofs_per_cell, dofs_per_cell); + for (auto &matrix : copy.cell_cij_matrix) matrix.reinit(dofs_per_cell, dofs_per_cell); const auto &fe_values = scratch.reinit(cell); - local_dof_indices.resize(dofs_per_cell); - cell->get_dof_indices(local_dof_indices); + copy.local_dof_indices.resize(dofs_per_cell); + cell->get_dof_indices(copy.local_dof_indices); - std::transform(local_dof_indices.begin(), - local_dof_indices.end(), - local_dof_indices.begin(), + std::transform(copy.local_dof_indices.begin(), + copy.local_dof_indices.end(), + copy.local_dof_indices.begin(), [&](auto index) { return partitioner->global_to_local(index); }); // We compute the local contributions for the lumped mass matrix - // entries m_i and and vectors c_ij in the usual fashion: + // entries $m_i$ and and vectors $c_{ij}$ in the usual fashion: for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) { const auto JxW = fe_values.JxW(q_point); @@ -1049,13 +1048,13 @@ namespace Step69 const auto value_JxW = fe_values.shape_value(j, q_point) * JxW; const auto grad_JxW = fe_values.shape_grad(j, q_point) * JxW; - cell_lumped_mass_matrix(j, j) += value_JxW; + copy.cell_lumped_mass_matrix(j, j) += value_JxW; for (unsigned int i = 0; i < dofs_per_cell; ++i) { const auto value = fe_values.shape_value(i, q_point); for (unsigned int d = 0; d < dim; ++d) - cell_cij_matrix[d](i, j) += (value * grad_JxW)[d]; + copy.cell_cij_matrix[d](i, j) += value * grad_JxW[d]; } /* i */ } /* j */ @@ -1088,7 +1087,7 @@ namespace Step69 // contribution right here, we have to take it "as is", // store it and pass it to the copy data routine. The // proper normalization requires an additional loop on - // nodes. + // nodes. This is done in the copy function below. Tensor<1, dim> normal; if (id == free_slip) { @@ -1097,7 +1096,7 @@ namespace Step69 fe_face_values.shape_value(j, q); } - const auto index = local_dof_indices[j]; + const auto index = copy.local_dof_indices[j]; Point position; const auto global_index = partitioner->local_to_global(index); @@ -1105,11 +1104,14 @@ namespace Step69 v < GeometryInfo::vertices_per_cell; ++v) if (cell->vertex_dof_index(v, 0) == global_index) - position = cell->vertex(v); + { + position = cell->vertex(v); + break; + } const auto old_id = - std::get<1>(local_boundary_normal_map[index]); - local_boundary_normal_map[index] = + std::get<1>(copy.local_boundary_normal_map[index]); + copy.local_boundary_normal_map[index] = std::make_tuple(normal, std::max(old_id, id), position); } } @@ -1118,36 +1120,26 @@ namespace Step69 // Last, we provide a copy_local_to_global function as required for // the WorkStream const auto copy_local_to_global = [&](const auto ©) { - const auto &is_artificial = copy.is_artificial; - const auto &local_dof_indices = copy.local_dof_indices; - const auto &local_boundary_normal_map = copy.local_boundary_normal_map; - const auto &cell_lumped_mass_matrix = copy.cell_lumped_mass_matrix; - const auto &cell_cij_matrix = copy.cell_cij_matrix; - - if (is_artificial) + if (copy.is_artificial) return; - for (const auto &it : local_boundary_normal_map) + for (const auto &it : copy.local_boundary_normal_map) { - auto &normal = std::get<0>(boundary_normal_map[it.first]); - auto &id = std::get<1>(boundary_normal_map[it.first]); - auto &position = std::get<2>(boundary_normal_map[it.first]); - - const auto &new_normal = std::get<0>(it.second); - const auto &new_id = std::get<1>(it.second); - const auto &new_position = std::get<2>(it.second); - - normal += new_normal; - id = std::max(id, new_id); - position = new_position; + std::get<0>(boundary_normal_map[it.first]) += + std::get<0>(it.second); + std::get<1>(boundary_normal_map[it.first]) = + std::max(std::get<1>(boundary_normal_map[it.first]), + std::get<1>(it.second)); + std::get<2>(boundary_normal_map[it.first]) = std::get<2>(it.second); } - lumped_mass_matrix.add(local_dof_indices, cell_lumped_mass_matrix); + lumped_mass_matrix.add(copy.local_dof_indices, + copy.cell_lumped_mass_matrix); for (int k = 0; k < dim; ++k) { - cij_matrix[k].add(local_dof_indices, cell_cij_matrix[k]); - nij_matrix[k].add(local_dof_indices, cell_cij_matrix[k]); + cij_matrix[k].add(copy.local_dof_indices, copy.cell_cij_matrix[k]); + nij_matrix[k].add(copy.local_dof_indices, copy.cell_cij_matrix[k]); } }; @@ -1322,28 +1314,23 @@ namespace Step69 const auto local_assemble_system = [&](const auto &cell, auto & scratch, auto & copy) { - auto &is_artificial = copy.is_artificial; - auto &local_dof_indices = copy.local_dof_indices; - - auto &cell_cij_matrix = copy.cell_cij_matrix; - - is_artificial = cell->is_artificial(); - if (is_artificial) + copy.is_artificial = cell->is_artificial(); + if (copy.is_artificial) return; - for (auto &matrix : cell_cij_matrix) + for (auto &matrix : copy.cell_cij_matrix) matrix.reinit(dofs_per_cell, dofs_per_cell); - local_dof_indices.resize(dofs_per_cell); - cell->get_dof_indices(local_dof_indices); - std::transform(local_dof_indices.begin(), - local_dof_indices.end(), - local_dof_indices.begin(), + copy.local_dof_indices.resize(dofs_per_cell); + cell->get_dof_indices(copy.local_dof_indices); + std::transform(copy.local_dof_indices.begin(), + copy.local_dof_indices.end(), + copy.local_dof_indices.begin(), [&](auto index) { return partitioner->global_to_local(index); }); - for (auto &matrix : cell_cij_matrix) + for (auto &matrix : copy.cell_cij_matrix) matrix = 0.; for (auto f : GeometryInfo::face_indices()) @@ -1373,8 +1360,8 @@ namespace Step69 f)) continue; - const auto &normal_j = - std::get<0>(boundary_normal_map[local_dof_indices[j]]); + const auto &normal_j = std::get<0>( + boundary_normal_map[copy.local_dof_indices[j]]); const auto value_JxW = fe_face_values.shape_value(j, q) * JxW; @@ -1385,7 +1372,7 @@ namespace Step69 /* This is the correction of the boundary c_ij */ for (unsigned int d = 0; d < dim; ++d) - cell_cij_matrix[d](i, j) += + copy.cell_cij_matrix[d](i, j) += (normal_j[d] - normal_q[d]) * (value * value_JxW); } /* i */ } /* j */ @@ -1394,15 +1381,11 @@ namespace Step69 }; const auto copy_local_to_global = [&](const auto ©) { - const auto &is_artificial = copy.is_artificial; - const auto &local_dof_indices = copy.local_dof_indices; - const auto &cell_cij_matrix = copy.cell_cij_matrix; - - if (is_artificial) + if (copy.is_artificial) return; for (int k = 0; k < dim; ++k) - cij_matrix[k].add(local_dof_indices, cell_cij_matrix[k]); + cij_matrix[k].add(copy.local_dof_indices, copy.cell_cij_matrix[k]); }; WorkStream::run(dof_handler.begin_active(), -- 2.39.5