From 65de7e071cba4d13ee42d5b993cb0b6890269211 Mon Sep 17 00:00:00 2001 From: "Ignacio Tomas (-EXP)" Date: Wed, 12 Feb 2020 16:36:44 -0700 Subject: [PATCH] More polishing --- examples/step-69/doc/intro.dox | 20 ++-- examples/step-69/step-69.cc | 188 +++++++++++++++++---------------- 2 files changed, 107 insertions(+), 101 deletions(-) diff --git a/examples/step-69/doc/intro.dox b/examples/step-69/doc/intro.dox index 6196338e54..ba3a649081 100644 --- a/examples/step-69/doc/intro.dox +++ b/examples/step-69/doc/intro.dox @@ -83,7 +83,7 @@ $\mathbb{f}(\mathbf{u})$ is defined as where $\mathbb{I} \in \mathbb{R}^{d \times d}$ is the identity matrix and $\otimes$ denotes the tensor product. Here, we have introduced the pressure $p$ that, in general, is defined by an closed-form equation of state. -In this tutorial we limit the discussion to the class of polytropic +In this tutorial we limit the discussion to the class of polytropic ideal gases for which the pressure is given by @f{align*} p = p(\textbf{u}) := (\gamma -1) \Big(E - @@ -181,7 +181,7 @@ $s_h(\cdot,\cdot)_{L^2(\Omega)}$ is some linear stabilization method instance @cite GuermondErn2004 Chapter 5 and references therein). Most time-dependent discretization approaches described in the deal.II tutorials are based on such a (semi-discrete) variational approach. Fundamentally, -from an analysis perspective, variational discretizations are conceived +from an analysis perspective, variational discretizations are conceived to provide some notion of global (integral) stabiliy, meaning an estimate of the form @@ -234,7 +234,7 @@ Let $\mathbb{V}_h$ be scalar-valued finite dimensional space spanned by a basis $\{\phi_i\}_{i \in \mathcal{V}}$ where: $\phi_i:\Omega \rightarrow \mathbb{R}$ and $\mathcal{V}$ is the set of all indices (nonnegative integers) identifying each scalar Degree of Freedom (DOF) in the mesh. -Therefore a scalar finite element functiona $u_h \in \mathbb{V}_h$ it can +Therefore a scalar finite element functional $u_h \in \mathbb{V}_h$ can be written as $u_h = \sum_{i \in \mathcal{V}} U_i \phi_i$ with $U_i \in \mathbb{R}$. We introduce the notation for vector-valued approximation spaces $\pmb{\mathbb{V}}_h := \{\mathbb{V}_h\}^{d+2}$. Let $\mathbf{u}_h @@ -280,9 +280,9 @@ The definition of $\lambda_{\text{max}} (\mathbf{U},\mathbf{V}, definition in order to focus first on some algorithmic and implementational questions. We note that - $m_i$ and $\mathbf{c}_{ij}$ do not evolve in time (provided we keep the - discretization fixed). It thus makes sense to assemble these - matrices/vectors once in a so called offline computation and reuse - them in every time step. They are part of what we are going to call + discretization fixed). It thus makes sense to assemble these + matrices/vectors once in a so called offline computation and reuse + them in every time step. They are part of what we are going to call off-line data. - At every time step we have to evaluate $\mathbb{f}(\mathbf{U}_j^{n})$ and $d_{ij} := \max \{ \lambda_{\text{max}} @@ -298,9 +298,9 @@ $t_n$: @f{align*} &\textbf{for } i \in \mathcal{V} \\ -&\ \ \ \ \{\mathbf{c}_{ij}\}_{j \in \mathcal{I}(i)} \leftarrow +&\ \ \ \ \{\mathbf{c}_{ij}\}_{j \in \mathcal{I}(i)} \leftarrow \texttt{gather_cij_vectors} (\textbf{c}, \mathcal{I}(i)) \\ -&\ \ \ \ \{\textbf{U}_j^n\}_{j \in \mathcal{I}(i)} \leftarrow +&\ \ \ \ \{\textbf{U}_j^n\}_{j \in \mathcal{I}(i)} \leftarrow \texttt{gather_state_vectors} (\textbf{U}^n, \mathcal{I}(i)) \\ &\ \ \ \ \ \textbf{U}_i^{n+1} \leftarrow \mathbf{U}_i^{n} \\ &\ \ \ \ \textbf{for } j \in \mathcal{I}(i) \\ @@ -319,7 +319,7 @@ We note here that: containing all the vectors $\mathbf{c}_{ij}$ and all the states $\mathbf{U}_j^n$ respectively. - $\texttt{gather_cij_vectors}$, $\texttt{gather_state_vectors}$, and -$\texttt{scatter_updated_state}$ are hypothetical implementations that +$\texttt{scatter_updated_state}$ are hypothetical implementations that either collect (from) or write (into) global matrices and vectors. - Note that: if we assume a cartesian mesh in two space dimensions, first-order polynomial space $\mathbb{Q}^1$, and that @@ -328,7 +328,7 @@ of the domain ) then: $\{\textbf{U}_j^n\}_{j \in \mathcal{I}(i)}$ should contain nine state-vectors (i.e. all the states in the patch/macro element associated to the shape function $\phi_i$). This is one of the major differences with the usual cell-based loop where the gather functionality (encoded in -FEValuesBase.get_function_values() in the case of deal.ii) only +FEValuesBase.get_function_values() in the case of deal.ii) only collects values for the local cell (just a subset of the patch). The actual implementation will deviate from above code in one key aspect: diff --git a/examples/step-69/step-69.cc b/examples/step-69/step-69.cc index ea5f3e926e..daf47dfb17 100644 --- a/examples/step-69/step-69.cc +++ b/examples/step-69/step-69.cc @@ -101,10 +101,10 @@ // functions. For the sake of brevity, we refrain from that approach, // though. // -// We also note that the vast majority of classes is derived from -// ParameterAcceptor. This facilitates the population of all the global -// parameters into a single (global) ParameterHandler. More explanations -// about the use inheritance from ParameterAcceptor as a global subscription +// We also note that the vast majority of classes is derived from +// ParameterAcceptor. This facilitates the population of all the global +// parameters into a single (global) ParameterHandler. More explanations +// about the use inheritance from ParameterAcceptor as a global subscription // mechanism can be found in Step-59. namespace Step69 @@ -171,8 +171,8 @@ namespace Step69 // // The class OfflineData contains pretty much all components // of the discretization that do not evolve in time, in particular, the - // DoFHandler, SparsityPattern, boundary maps, the lumped mass, - // $\mathbf{c}_{ij}$ and $\mathbf{n}_{ij}$ matrices. Here, the term + // DoFHandler, SparsityPattern, boundary maps, the lumped mass, + // $\mathbf{c}_{ij}$ and $\mathbf{n}_{ij}$ matrices. Here, the term // offline refers to the fact that all the class // members of OfflineData have well-defined values // independent of the current time step. This means that they can be @@ -366,14 +366,14 @@ namespace Step69 // that was introduced in the discussion above. The main method of the // TimeStep class is step(vector_type &U, double // t) that takes a reference to a state vector U and - // a time point t (as input arguments) computes the updated - // solution, stores it in the vector temp, swaps its contents - // with the vector U, and returns the chosen step-size + // a time point t (as input arguments) computes the updated + // solution, stores it in the vector temp, swaps its contents + // with the vector U, and returns the chosen step-size // $\tau$. // // The other important method is prepare() which primarily // sets the proper partition and sparsity pattern for the temporary - // vector temp and the matrix dij_matrix + // vector temp and the matrix dij_matrix // respectively. template @@ -747,19 +747,20 @@ namespace Step69 // contiguous local index range. But this is the precisely the type of // index manipulation we want to avoid in our assembly loops. // - // The Utilities::MPI::Partitioner already implements the translation from - // a global index range to a contiguous local (per MPI rank) index - // range (we don't have to reinvent the wheel). We just need to use that - // translation capability (once and only once) in order to create a - // "local" sparsity pattern for - // the contiguous index range $[0,$n_locally_relevant$)$. That - // capability can be invoked by - // Utilities::MPI::Partitioner::global_to_local() - // function. All that is left to do is to ensure that, when implementing - // our scatter and gather auxiliary functions, we always access - // elements of a distributed vector by a call to - // LinearAlgebra::distributed::Vector::local_element(). That way we avoid - // index translations altogether and operate exclusively with local indices. + // The Utilities::MPI::Partitioner already implements the translation + // from a global index range to a contiguous local (per MPI rank) index + // range: we don't have to reinvent the wheel. We just need to use that + // translation capability (once and only once) in order to create a + // "local" sparsity pattern for the contiguous index range + // $[0,$n_locally_relevant$)$. That capability can be + // invoked by Utilities::MPI::Partitioner::global_to_local() function. + // Once the sparsity pattern is created using local indices, all that + // is left to do is to ensure that (when implementing our scatter and + // gather auxiliary functions) we always access elements of a + // distributed vector by a call to + // LinearAlgebra::distributed::Vector::local_element(). This way we + // avoid index translations altogether and operate exclusively with + // local indices. { TimerOutput::Scope t( @@ -903,13 +904,21 @@ namespace Step69 // the individual components (i,l) of a matrix. The // functionality of gather_get_entry and // gather is very much the same, but their context is - // different: the function gather is meant to be used in - // exceptional/limited number of cases. The reader should be aware that - // accessing an arbitrary (i,l) entry of a matrix (say for - // instance Trilinos or PETSc matrices) is very expensive. Here is - // where we might want to keep an eye on complexity: we want this - // operation to have constant complexity (and that's the case of this - // implementation using deal.ii matrices). + // different: the function gather does not rely on an + // iterator (that actually knows the value pointed) but rather on the + // indices (i,l) of the entry in order to retrieve its + // actual value. We should expect gather to be slightly + // more expensive than gather_get_entry. The use of + // gather will be limited to the task of computing the + // algebraic viscosity $d_{ij}$ in the particular case that when + // both $i$ and $j$ lie at the boundary. + // + // @note The reader should be aware that accessing an arbitrary + // (i,l) entry of a matrix (say for instance Trilinos or PETSc + // matrices) is in general unacceptably expensive. Here is where we might + // want to keep an eye on complexity: we want this operation to have + // constant complexity, which is the case of the current implementation + // using deal.ii matrices. template DEAL_II_ALWAYS_INLINE inline Tensor<1, k> @@ -923,8 +932,8 @@ namespace Step69 // gather (second interface): this second function // signature having two input arguments will be used to gather the - // state at a node i and return Tensor<1, - // problem_dimension> for our convenience. + // state at a node i and return it as a + // Tensor<1,problem_dimension> for our convenience. template DEAL_II_ALWAYS_INLINE inline Tensor<1, k> gather(const std::array &U, @@ -937,13 +946,12 @@ namespace Step69 } // scatter: this function has three input arguments, the - // first one is meant to be a global object (say a locally owned - // vector), the second argument which could be a + // first one is meant to be a "global object" (say a locally owned or + // locally relevant vector), the second argument which could be a // Tensor<1,problem_dimension>, and the last argument // which represents a index of the global object. This function will be // primarily used to write the updated nodal values, stored as - // Tensor<1,problem_dimension>, into the globally owned - // vector. + // Tensor<1,problem_dimension>, into the global object. template DEAL_II_ALWAYS_INLINE inline void @@ -961,8 +969,8 @@ namespace Step69 // $\boldsymbol{\nu}_i$. // // In order to exploit thread parallelization we use WorkStream approach - // detailed in the @ref threads "Parallel computing with multiple processors - // accessing shared memory". As customary this requires + // detailed in the @ref threads Parallel computing with multiple processors + // accessing shared memory. As customary this requires // definition of // - Scratch data (i.e. input info required to carry out computations): in // this case it is scratch_data. @@ -982,11 +990,11 @@ namespace Step69 // well-documented in Step-9, Step-13 and Step-32 among others. // // Finally, assuming that $\mathbf{x}_i$ is a support point at the boundary, - // the normals are defined as + // the (nodal) normals are defined using averaging: // // $\widehat{\boldsymbol{\nu}}_i := // \frac{\boldsymbol{\nu}_i}{|\boldsymbol{\nu}_i|}$ where - // $\boldsymbol{\nu}_i := \sum_{T \in \text{supp}(\phi_i)} + // $\boldsymbol{\nu}_i := \sum_{T \subset \text{supp}(\phi_i)} // \sum_{F \subset \partial T \cap \partial \Omega} // \sum_{\mathbf{x}_{q,F}} \nu(\mathbf{x}_{q,F}) // \phi_i(\mathbf{x}_{q,F})$ @@ -994,9 +1002,10 @@ namespace Step69 // here $T$ denotes elements, // $\text{supp}(\phi_i)$ the support of the shape function $\phi_i$, // $F$ are faces of the element $T$, and $\mathbf{x}_{q,F}$ - // are quadrature points on such face. - // Other more sophisticated definitions for $\nu_i$ are - // possible but none of them have much influence in theory or practice. + // are quadrature points on such face. Note that this formula for + // $\widehat{\boldsymbol{\nu}}_i$ is nothing else than some form of + // weighted averaging. Other more sophisticated definitions for $\nu_i$ + // are possible but none of them have much influence in theory or practice. template void OfflineData::assemble() @@ -1082,7 +1091,7 @@ namespace Step69 } /* q */ /* Now we have to compute the boundary normals. Note that the - following loop does not actually do much unless the the element + following loop does not do much unless the element has faces on the boundary of the domain */ for (unsigned int f = 0; f < GeometryInfo::faces_per_cell; ++f) { @@ -1208,15 +1217,14 @@ namespace Step69 // be used for our node-loops. This functionality requires four input // arguments which we explain in detail (for the specific case of our // thread-parallel node loops): - // - The iterator indices.begin() points to - // to a row index. + // - The iterator indices.begin() points to a row index. // - The iterator indices.end() points to a numerically higher // row index. // - The function on_subranges(i1,i2) (where i1 // and i2 define sub-range within the range spanned by // the end and begin iterators defined in the two previous bullets) // applies operation for every iterator in such subrange. We may as well - // call on_subranges the worker. + // call on_subranges the "worker". // - Grainsize: minimum number of iterators (in this case representing // rows) processed by each thread. We decided for a minimum of 4096 // rows. @@ -1426,11 +1434,10 @@ namespace Step69 // In this section we describe the implementation of the class members of // the ProblemDescription class. Most of the code here is - // specific for compressible Euler's equations with an ideal gas law. - // + // specific for compressible Euler's equations with an ideal gas law. // If we wanted to re-purpose Step-69 for a different conservation law - // (say for instance the shallow water equation) most of the - // implementation of this class would have to change. Most of the other + // (say for: instance the shallow water equation) most of the + // implementation of this class would have to change. But most of the other // classes, however, (in particular those defining loop structures) would // remain unchanged. // @@ -1505,12 +1512,12 @@ namespace Step69 // advanced discussion about it in this tutorial. In this portion of the // documentation we will limit ourselves to sketch the main functionality // of our implementation functions and point to specific academic - // references in order to help (the interested) reader to trace the + // references in order to help the (interested) reader trace the // source (and proper mathematical justification) of these ideas. // // In general, obtaining a sharp guaranteed upper-bound on the maximum // wavespeed requires solving a quite expensive scalar nonlinear problem. - // This is typically with an iterative solver. In order to simplify the + // This is typically done with an iterative solver. In order to simplify the // presentation in this example step we decided not to include such an // iterative scheme. Instead, we will just use an initial guess as a // guess for an upper bound on the maximum wavespeed. More precisely, @@ -1522,17 +1529,15 @@ namespace Step69 // approximation for the intermediate pressure $p^*$, see for instance // Equation (4.46), page 128 in @cite Toro2009. // - // The estimate returned by lambda_max_two_rarefaction is in - // general quite sharp and is generally sufficient as an upper bound for - // our purposes. However, for some specific situations (in particular - // when one of states is close to vacuum conditions) such an estimate - // will be overly pessimistic. - // - // That's why we used a second estimate to avoid this degeneracy that - // will be invoked by a call to the function - // lambda_max_expansion. The most important function here is - // compute_lambda_max which takes the minimum between the - // estimates returned by lambda_max_two_rarefaction and + // The estimate returned by lambda_max_two_rarefaction + // is guaranteed to be an upper bound, it is in general quite sharp, and + // overall sufficient for our purposes. However, for some specific situations + // (in particular when one of states is close to vacuum conditions) such + // an estimate will be overly pessimistic. That's why we used a second + // estimate to avoid this degeneracy that will be invoked by a call to the + // function lambda_max_expansion. The most important function + // here is compute_lambda_max which takes the minimum between + // the estimates returned by lambda_max_two_rarefaction and // lambda_max_expansion. // // We start again by defining a couple of helper functions: @@ -1591,11 +1596,11 @@ namespace Step69 // primitive state $[\rho, u, p, a]$ and a given pressure $p^\ast$ // @cite GuermondPopov2016 Eqn. (3.7): // @f{align*} - // \lambda^- = u - a\,\sqrt{1 + \frac{\gamma+1}{2\gamma} * + // \lambda^- = u - a\,\sqrt{1 + \frac{\gamma+1}{2\gamma} // \left(\frac{p^\ast-p}{p}\right)_+} // @f} - // Here, the $+$ sign in the subscript of the parenthesis denotes the - // positive part of the given number. + // Here, the $(\cdot)_{+}$ denotes the positive part of the given + // argument. DEAL_II_ALWAYS_INLINE inline double lambda1_minus(const std::array &riemann_data, @@ -1615,7 +1620,7 @@ namespace Step69 // Analougously @cite GuermondPopov2016 Eqn. (3.8): // @f{align*} - // \lambda^+ = u + a\,\sqrt{1 + \frac{\gamma+1}{2\gamma} * + // \lambda^+ = u + a\,\sqrt{1 + \frac{\gamma+1}{2\gamma} // \left(\frac{p^\ast-p}{p}\right)_+} // @f} @@ -1669,7 +1674,7 @@ namespace Step69 return std::max(positive_part(lambda3), negative_part(lambda1)); } - // We compute a second upper bound of the maximal wavespeed that is in + // We compute the second upper bound of the maximal wavespeed that is, in // general, not as sharp as the two-rarefaction estimate. But it will // save the day in the context of near vacuum conditions when the // two-rarefaction approximation might attain extreme values: @@ -1694,8 +1699,8 @@ namespace Step69 } } // namespace - // The is the main function that we are going to call in order to compute - // $\lambda_{\text{max}} (\mathbf{U}_i^{n},\mathbf{U}_j^{n}, + // The following is the main function that we are going to call in order to + // compute $\lambda_{\text{max}} (\mathbf{U}_i^{n},\mathbf{U}_j^{n}, // \textbf{n}_{ij})$. We simply compute both maximal wavespeed estimates // and return the minimum. @@ -1742,8 +1747,8 @@ namespace Step69 // @sect4{Initial values} - // As a last preparatory step before we discuss the implementation of the - // forward Euler scheme is to quickly implement the InitialValues class. + // As a last preparatory step, before we discuss the implementation of the + // forward Euler scheme, is to briefly implement the InitialValues class. // // In the constructor we initialize all parameters with default values, // declare all parameters for the ParameterAcceptor class and connect the @@ -1782,7 +1787,7 @@ namespace Step69 // default values for the two private members // initial_direction and initial_1d_state and // added them to the parameter list. But we have not defined an - // implementation for the only public member that we really care about, + // implementation of the only public member that we really care about, // which is initial_state (the function that we are going to // call to actually evaluate the initial solution at the mesh nodes). // @@ -1806,9 +1811,9 @@ namespace Step69 static constexpr auto gamma = ProblemDescription::gamma; // The following lambda function translates a given primitive 1d state - // (density $rho$, velocity $u$, and pressure $p$) into a conserved nD - // state (density $rho$, momentum $\textbf{m}$, and total energy $E$). - // Note that we + // (density $\rho$, velocity $u$, and pressure $p$) into a + // conserved n-dimensional state (density $\rho$, momentum + // $\mathbf{m}$, and total energy $E$). Note that we // capture // the this pointer and thus access to // initial_direction by value. @@ -1930,41 +1935,41 @@ namespace Step69 // \mathbf{U}_j^{n}, \textbf{n}_{ij}) = \lambda_{\text{max}} // (\mathbf{U}_j^{n}, \mathbf{U}_i^{n}, \textbf{n}_{ji})$ do not // necessarily hold true. The only mathematically safe solution for this - // dilemma is to compute both of them and take the maximum. + // dilemma is to compute both of them $d_{ij}$ and $d_{ji}$ and + // take the maximum. // - // The computation of $\lambda_{\text{max}}$ is quite expensive. In - // order to save some computing time we exploit the fact that the - // computing local wavenumbers is symmetric (provided that not both - // $\mathbf{x}_i$ and $\mathbf{x}_j$ lie on the boundary) as outlined - // above: We only compute the upper-triangular entries of $d_{ij}$ and - // copy the corresponding entries to the lower-triangular counterpart. + // Overall, the computation of $d_{ij}$ is quite expensive. In + // order to save some computing time we exploit the fact that the viscosity + // matrix has to be symmetric (as mentioned above): we only compute + // the upper-triangular entries of $d_{ij}$ and copy the + // corresponding entries to the lower-triangular counterpart. // // We use again parallel::apply_to_subranges for thread-parallel for // loops. Pretty much all the ideas for parallel traversal that we // introduced when discussing the assembly of the matrix // norm_matrix and the normalization of - // nij_matrix agove are used here again. + // nij_matrix above are used here again. { TimerOutput::Scope time(computing_timer, "time_step - 1 compute d_ij"); // We define again a "worker" function on_subranges that - // computes the viscosity d_{ij} for a subrange [i1, i2) of column + // computes the viscosity $d_{ij}$ for a subrange [i1, i2) of column // indices: const auto on_subranges = [&](auto i1, const auto i2) { for (const auto i : boost::make_iterator_range(i1, i2)) { const auto U_i = gather(U, i); - // For a given column index i we iterate over the column of the + // For a given column index i we iterate over the columns of the // sparsity pattern from sparsity.begin(i) to // sparsity.end(i): for (auto jt = sparsity.begin(i); jt != sparsity.end(i); ++jt) { const auto j = jt->column(); - // We only compute d_ij if j < i (upper triangular entries) - // and later copy the values over to d_ji. + // We only compute $d_{ij}$ if $j < i$ (upper triangular + // entries) and later copy the values over to $d_{ji}$. if (j >= i) continue; @@ -1979,7 +1984,8 @@ namespace Step69 double d = norm * lambda_max; // If both support points happen to be at the boundary we - // have to compute d_ji as well and then take max(d_ij,d_ji): + // have to compute $d_{ji}$ as well and then take + // $max(d_{ij},d_{ji})$: if (boundary_normal_map.count(i) != 0 && boundary_normal_map.count(j) != 0) { @@ -2119,7 +2125,7 @@ namespace Step69 // both equations are algebraically equivalent (they will produce the // same numerical values). We favor this second formula since it has // natural cancellation properties that might help avoid numerical - // instabilities. + // artifacts. { TimerOutput::Scope time(computing_timer, "time_step - 3 perform update"); -- 2.39.5