publisher={Springer Science \& Business Media}
}
+% ------------------------------------
+% Step 69
+% ------------------------------------
+
+@article {GuermondPopov2016,
+ AUTHOR = {Guermond, Jean-Luc and Popov, Bojan},
+ TITLE = {Invariant domains and first-order continuous finite element
+ approximation for hyperbolic systems},
+ JOURNAL = {SIAM J. Numer. Anal.},
+ FJOURNAL = {SIAM Journal on Numerical Analysis},
+ VOLUME = {54},
+ YEAR = {2016},
+ NUMBER = {4},
+ PAGES = {2466--2489},
+ ISSN = {0036-1429},
+ DOI = {10.1137/16M1074291},
+}
+
+@article {GuermondEtAl2018,
+ AUTHOR = {Guermond, Jean-Luc and Nazarov, Murtazo and Popov, Bojan and
+ Tomas, Ignacio},
+ TITLE = {Second-order invariant domain preserving approximation of the
+ {E}uler equations using convex limiting},
+ JOURNAL = {SIAM J. Sci. Comput.},
+ FJOURNAL = {SIAM Journal on Scientific Computing},
+ VOLUME = {40},
+ YEAR = {2018},
+ NUMBER = {5},
+ PAGES = {A3211--A3239},
+ ISSN = {1064-8275},
+ DOI = {10.1137/17M1149961},
+}
+
+@book {GuermondErn2004,
+ AUTHOR = {Ern, Alexandre and Guermond, Jean-Luc},
+ TITLE = {Theory and practice of finite elements},
+ SERIES = {Applied Mathematical Sciences},
+ VOLUME = {159},
+ PUBLISHER = {Springer-Verlag, New York},
+ YEAR = {2004},
+ PAGES = {xiv+524},
+ ISBN = {0-387-20574-8},
+ DOI = {10.1007/978-1-4757-4355-5},
+}
+
+@article {Brooks1982,
+ AUTHOR = {Brooks, Alexander N. and Hughes, Thomas J. R.},
+ TITLE = {Streamline upwind/{P}etrov-{G}alerkin formulations for
+ convection dominated flows with particular emphasis on the
+ incompressible {N}avier-{S}tokes equations},
+ NOTE = {FENOMECH ''81, Part I (Stuttgart, 1981)},
+ JOURNAL = {Comput. Methods Appl. Mech. Engrg.},
+ FJOURNAL = {Computer Methods in Applied Mechanics and Engineering},
+ VOLUME = {32},
+ YEAR = {1982},
+ NUMBER = {1-3},
+ PAGES = {199--259},
+ ISSN = {0045-7825},
+ DOI = {10.1016/0045-7825(82)90071-8},
+}
+
+@article {Johnson1986,
+ AUTHOR = {Johnson, C. and Pitk\"{a}ranta, J.},
+ TITLE = {An analysis of the discontinuous {G}alerkin method for a
+ scalar hyperbolic equation},
+ JOURNAL = {Math. Comp.},
+ FJOURNAL = {Mathematics of Computation},
+ VOLUME = {46},
+ YEAR = {1986},
+ NUMBER = {173},
+ PAGES = {1--26},
+ ISSN = {0025-5718},
+ DOI = {10.2307/2008211},
+}
+
+@inbook{Rainald2008,
+author = {Lohner, Rainald},
+publisher = {John Wiley & Sons, Ltd},
+isbn = {9780470989746},
+title = {Edge-Based Compressible Flow Solvers},
+booktitle = {Applied Computational Fluid Dynamics Techniques},
+chapter = {10},
+pages = {187-200},
+doi = {10.1002/9780470989746.ch10},
+year = {2008},
+}
+
+% ------------------------------------
+% Step 71
+% ------------------------------------
+
+@article{Brenner2005,
+ doi = {10.1007/s10915-004-4135-7},
+ url = {https://doi.org/10.1007/s10915-004-4135-7},
+ year = {2005},
+ month = jun,
+ publisher = {Springer Science and Business Media {LLC}},
+ volume = {22-23},
+ number = {1-3},
+ pages = {83--118},
+ author = {Susanne C. Brenner and Li-Yeng Sung},
+ title = {$C^0$ Interior Penalty Methods for Fourth Order Elliptic Boundary Value Problems on Polygonal Domains},
+ journal = {Journal of Scientific Computing}
+}
+
+
+@incollection{Brenner2011,
+ doi = {10.1007/978-3-642-23914-4_2},
+ url = {https://doi.org/10.1007/978-3-642-23914-4_2},
+ year = {2011},
+ publisher = {Springer Berlin Heidelberg},
+ pages = {79--147},
+ author = {Susanne C. Brenner},
+ title = {$C^0$ Interior Penalty Methods},
+ booktitle = {Lecture Notes in Computational Science and Engineering}
+}
+
+@article{Engel2002,
+ doi = {10.1016/s0045-7825(02)00286-4},
+ url = {https://doi.org/10.1016/s0045-7825(02)00286-4},
+ year = {2002},
+ month = jul,
+ publisher = {Elsevier {BV}},
+ volume = {191},
+ number = {34},
+ pages = {3669--3750},
+ author = {G. Engel and K. Garikipati and T.J.R. Hughes and M.G. Larson and L. Mazzei and R.L. Taylor},
+ title = {Continuous/discontinuous finite element approximations of fourth-order elliptic problems in structural and continuum mechanics with applications to thin beams and plates, and strain gradient elasticity},
+ journal = {Computer Methods in Applied Mechanics and Engineering}
+}
+
+@article{Brenner2009,
+ doi = {10.1093/imanum/drn057},
+ url = {https://doi.org/10.1093/imanum/drn057},
+ year = {2009},
+ month = mar,
+ publisher = {Oxford University Press ({OUP})},
+ volume = {30},
+ number = {3},
+ pages = {777--798},
+ author = {S. C. Brenner and T. Gudi and L.-y. Sung},
+ title = {An a posteriori error estimator for a quadratic C0-interior penalty method for the biharmonic problem},
+ journal = {{IMA} Journal of Numerical Analysis}
+}
+
+@article{Wells2007,
+ doi = {10.1016/j.cma.2007.03.008},
+ url = {https://doi.org/10.1016/j.cma.2007.03.008},
+ year = {2007},
+ month = jul,
+ publisher = {Elsevier {BV}},
+ volume = {196},
+ number = {35-36},
+ pages = {3370--3380},
+ author = {Garth N. Wells and Nguyen Tien Dung},
+ title = {A C0 discontinuous Galerkin formulation for Kirchhoff plates},
+ journal = {Computer Methods in Applied Mechanics and Engineering}
+}
+
+
% ------------------------------------
% References used elsewhere
where $\mathbb{I} \in \mathbb{R}^{d \times d}$ is the identity matrix and
$\otimes$ denotes the tensor product. Here, we have introduced the pressure
-$p$ that, in general, is defined by an closed-form equation of state.
-For the tutorial we limit the discussion to the class of polytropic ideal gases
+$p$ that, in general, is defined by an closed-form equation of state.
+For the tutorial we limit the discussion to the class of polytropic ideal gases
for which the pressure is given by
@f{align*}
-p = p(\textbf{u}) := (\gamma -1) \Big(E - \frac{\|\textbf{m}\|^2}{2\,\rho}
+p = p(\textbf{u}) := (\gamma -1) \Big(E - \frac{\|\textbf{m}\|^2}{2\,\rho}
\Big),
@f}
<h4>Variational versus collocation-type discretizations</h3>
-Following Step-9, Step-12, and Step-33, at this point it might look tempting
+Following Step-9, Step-12, and Step-33, at this point it might look tempting
to base a discretization of Euler's equations on a (semi-discrete) variational
formulation:
Here, $\mathbb{V}_h$ is an appropriate finite element space, and
$s_h(\cdot,\cdot)_{L^2(\Omega)}$ is some linear stabilization method
-(possibly complemented with some ad-hoc shock-capturing technique, see for
+(possibly complemented with some ad-hoc shock-capturing technique, see for
instance @cite GuermondErn2004 Chapter 5 and references therein). Most
time-dependent discretization approaches described in the deal.II tutorials
are based on such a (semi-discrete) variational approach. Fundamentally,
dependent) energy-norm. Variational discretizations of hyperbolic
conservation laws have been very popular since the mid eighties, in
particular combined with SUPG-type stabilization and/or upwinding
-techniques (see the early work of @cite Brooks1982 and @cite Johnson1986). They
-have proven to be some of the best approaches for simulations in the subsonic
+techniques (see the early work of @cite Brooks1982 and @cite Johnson1986). They
+have proven to be some of the best approaches for simulations in the subsonic
shockless regime and similarly benign regimes.
-However, in the transonic and supersonic regime, and shock-hydrodynamics
-applications the use of variational schemes might be questionable. In fact, at
-the time of this writing, most shock-hydrodynamics codes are still firmly
-grounded on finite volumes methods. The main reason for failure of variational
-schemes in such extreme regimes is the lack of pointwise stability. This stems
-from the fact that <i>a priori</i> bounds on integrated quantities (e.g.
-integrals of moments) have in general no implications on pointwise properties
-of the solution. While some of these problems might be alleviated by the
-(perpetual) chase of the right shock capturing scheme, finite difference-like
+However, in the transonic and supersonic regime, and shock-hydrodynamics
+applications the use of variational schemes might be questionable. In fact, at
+the time of this writing, most shock-hydrodynamics codes are still firmly
+grounded on finite volumes methods. The main reason for failure of variational
+schemes in such extreme regimes is the lack of pointwise stability. This stems
+from the fact that <i>a priori</i> bounds on integrated quantities (e.g.
+integrals of moments) have in general no implications on pointwise properties
+of the solution. While some of these problems might be alleviated by the
+(perpetual) chase of the right shock capturing scheme, finite difference-like
and finite volume schemes still have an edge in many regards.
In this tutorial step we therefore depart from variational schemes. We will
\mathbb{R}^{d+2}$ and $\phi_i$ is a scalar-valued shape function.
<b>Note.</b>
-For simplicity we will consider the usual Lagrange finite elements. In such
-context $\{\mathbf{x}_i\}_{i \in \mathcal{V}}$ be the set of all "support
-points" (see @ref GlossSupport "this glossary entry") where $\mathbf{x}_i \in
-\mathbb{R}^d$. Then each integer index $i \in \mathcal{V}$
-uniquely identifies a support point $\mathbf{x}_i$ and/or scalar-valued shape
+For simplicity we will consider the usual Lagrange finite elements. In such
+context $\{\mathbf{x}_i\}_{i \in \mathcal{V}}$ be the set of all "support
+points" (see @ref GlossSupport "this glossary entry") where $\mathbf{x}_i \in
+\mathbb{R}^d$. Then each integer index $i \in \mathcal{V}$
+uniquely identifies a support point $\mathbf{x}_i$ and/or scalar-valued shape
function $\phi_i$.
With this notation we can define the scheme as
\mathrm{d}\mathbf{x}$ (note that $\mathbf{c}_{ij}\in \mathbb{R}^d$)
- $\mathcal{I}(i) := \{j \in \mathcal{V} \ | \ \mathbf{c}_{ij} \not \equiv
\boldsymbol{0}\} \cup \{i\}$. We will refer to $\mathcal{I}(i)$ as the
- "stencil" (or adjacency list) at the support point $i$.
+ "stencil" (or adjacency list) at the support point $i$.
- $\mathbb{f}(\mathbf{U}_j^{n})$ is just the flux $\mathbb{f}$ of the
hyperbolic system evaluated at the state $\mathbf{U}_j^{n}$ stored at the
support point $j$.
(\mathbf{U}_i^{n},\mathbf{U}_j^{n}, \textbf{n}_{ij}),
\lambda_{\text{max}} (\mathbf{U}_j^{n}, \mathbf{U}_i^{n},
\textbf{n}_{ji}) \} \|\mathbf{c}_{ij}\|_{\ell^2} $
-
+
Before we start with the description of the implementation of this scheme, it
-is worth saying a thing or two about the "assembly" of this system. Consider
+is worth saying a thing or two about the "assembly" of this system. Consider
for instance a hypothetical pseudo-code, illustrating
a possible strategy to compute the solution $\textbf{U}^{n+1}$:
@f{align*}
&\textbf{For } i \in \mathcal{V} \\
-&\ \ \ \ \{\mathbf{c}_{ij}\}_{j \in \mathcal{I}(i)} :=
+&\ \ \ \ \{\mathbf{c}_{ij}\}_{j \in \mathcal{I}(i)} :=
\texttt{gather_cij_vectors}(\textbf{c}, \mathcal{I}(i)) \\
&\ \ \ \ \{\textbf{U}_j^n\}_{j \in \mathcal{I}(i)} :=
\texttt{gather_state_vectors}(\textbf{U}^n, \mathcal{I}(i)) \\
@f}
We note here that:
- This "assembly" does not require any form of quadrature or cell-loops.
-- Here $\textbf{c}$ and $\textbf{U}^n$ are a global matrix and a global vector
-containing all the vectors $\mathbf{c}_{ij}$ and all the states
+- Here $\textbf{c}$ and $\textbf{U}^n$ are a global matrix and a global vector
+containing all the vectors $\mathbf{c}_{ij}$ and all the states
$\mathbf{U}_j^n$ respectively.
- $\texttt{gather_cij_vectors}$ and $\texttt{gather_state_vectors}$ are
hypothetical implementations that collect (from global matrices and vectors)
only the quantities required to compute the update at the node $i$.
- Note that: if we assume a cartesian mesh in two space
-dimensions, first-order polynomial space $\mathbb{Q}^1$, and that
-$\mathbf{x}_i$ is an interior node (i.e. $\mathbf{x}_i$ is not on the boundary
-of the domain ) then: $\{\textbf{U}_j^n\}_{j \in \mathcal{I}(i)}$ should contain
-nine state-vectors (i.e. all the states in the patch/macro element associated to
-the shape function $\phi_i$). This is one of the major differences with the
-usual cell-based loop where the gather functionality (encoded in
+dimensions, first-order polynomial space $\mathbb{Q}^1$, and that
+$\mathbf{x}_i$ is an interior node (i.e. $\mathbf{x}_i$ is not on the boundary
+of the domain ) then: $\{\textbf{U}_j^n\}_{j \in \mathcal{I}(i)}$ should contain
+nine state-vectors (i.e. all the states in the patch/macro element associated to
+the shape function $\phi_i$). This is one of the major differences with the
+usual cell-based loop where the gather functionality (encoded in
FEValuesBase<dim, spacedim>.get_function_values() ) only collects values for the
local cell (just a subset of the patch).
more historical references).
This pseudo-code was introduced only to prepare the mindset of the reader for
-what is going to be presented in the in the next section. The
+what is going to be presented in the in the next section. The
actual implementation described in the next section is somewhat different from
what is described in the pseudo-code but shares the same core mentality: we do
not loop on cells but rather we loop on the edges of the sparsity graph (hence
*/
// @sect3{Include files}
-// The set of include files is quite standard. The most intriguing part at this
-// point in time is that: either though this code is a "thread and mpi parallel"
+// The set of include files is quite standard. The most intriguing part
+// is that: either though this code is a "thread and mpi parallel"
// we are using neither Trilinos nor PETSC vectors. Actually we are using dealii
// distributed vectors <code>la_parallel_vector.h</code> and the regular dealii
// sparse matrices <code>sparse_matrix.h</code>
// pointed by the iterator <code>it</code> of <code>matrix</code>. Here is
// where we might want to keep an eye on complexity: we want this operation
// to have constant complexity (that's the case of this implementation).
- // Note also that the return argument (<code>Matrix::value_type</code>) is
+ // Note also that the return argument (<code>Matrix::value_type</code>) is
// going to be (in general) a double.
// - <code>set_entry</code>: it sets <code>value</code> at the entry
// pointed by the iterator <code>it</code> of <code>matrix</code>.
// <code>OfflineData<dim>::assemble()</code> which (in short)
// computes the lumped mass entries $m_i$, the vectors $\mathbf{c}_{ij}$,
// the vector $\mathbf{n}_{ij} = \frac{\mathbf{c}_{ij}}{|\mathbf{c}_{ij}|}$,
- // and the boundary normals. The information about boundary normals is
- // collected into the map <code>BoundaryNormalMap</code>: which maps the
- // global index of the DOF/node into the tuple
- // $\{\text{normal}, \text{boundary id},\text{position} \}$.
+ // and the boundary normals $\boldsymbol{\nu}_i$.
//
// In order to exploit thread parallelization we use WorkStream approach
// detailed in the @ref threads "Parallel computing with multiple processors
// - The worker: in the case it is <code>local_assemble_system</code> that
// actually computes the local (i.e. current cell) contributions.
// - A copy data: a struct that contains all the local assembly
- // contributions, in this case called <code>CopyData<dim>()</code>.
+ // contributions, in this case <code>CopyData<dim>()</code>.
// - A copy data routine: in this case it is
// <code>copy_local_to_global</code> in charge of actually coping these
// local contributions into the global objects (matrices and/or vectors)
//
// Most the following lines are spent in the definition of the worker
- // <code>local_assemble_system</code> and the copy routine
+ // <code>local_assemble_system</code> and the copy data routine
// <code>copy_local_to_global</code>. There is not much to say about the
// WorkStream framework since the vast majority of ideas are reasonably
// well-documented in Step-9, Step-13 and Step-32 among others.
+ //
+ // Finally the boundary normals are defined as
+ // $\widehat{\boldsymbol{\nu}}_i =
+ // \frac{\boldsymbol{\nu}_i}{|\boldsymbol{\nu}_i|}$ where
+ // $\boldsymbol{\nu}_i = \sum_{F \subset \text{supp}(\phi_i)}
+ // \sum_{\mathbf{x}_{q,F}} \nu(\mathbf{x}_{q,F})
+ // \phi_i(\mathbf{x}_{q,F})$, here: $F \subset \partial \Omega$ denotes
+ // faces of elements at the boundary of the domain, and $\mathbf{x}_{q,F}$
+ // are quadrature points on such face.
+ // Other more sophisticated definitions for $\nu_i$ are
+ // possible but none of them have much influence in theory or practice.
+ // We remind the reader that <code>CopyData</code> includes the class member
+ // <code>local_boundary_normal_map</code> in order to store these local
+ // contributions for the boundary map.
template <int dim>
void OfflineData<dim>::assemble()
return partitioner->global_to_local(index);
});
+ /* We compute the local contributions for the lumped mass
+ matrix entries m_i and and vectors c_ij */
for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
{
const auto JxW = fe_values.JxW(q_point);
} /* for j */
} /* for q */
+ /* Now we have to compute the boundary normals. Note that the
+ following loop does not actually do much unless the faces of the
+ cell are actually faces on the boundary of the domain */
for (unsigned int f = 0; f < GeometryInfo<dim>::faces_per_cell; ++f)
{
const auto face = cell->face(f);
if (!discretization->finite_element.has_support_on_face(j, f))
continue;
+ /* Note that "normal" will only represent the contributions
+ from one of the faces in the support of the shape
+ function \phi_j. So we cannot normalize this local
+ contribution right here, we have to take it "as is" and pass
+ it to the copy data routine. */
Tensor<1, dim> normal;
if (id == Boundary::slip)
{
std::get<1>(local_boundary_normal_map[index]);
local_boundary_normal_map[index] =
std::make_tuple(normal, std::max(old_id, id), position);
- } /* j */
- } /* f */
+ } /* done with the loop on shape functions */
+ } /* done with the loop on faces */
}; /* done with the definition of the worker */
/* This is the copy data routine for WorkStream */
// contains a just copy of the matrix <code>cij_matrix</code>.
// That's not what we really
// want: we have to normalize its entries. In addition, we have not even
- // touched the entries of the matrix <code>norm_matrix</code> yet. We would
- // like to exploit thread paralellization in order to carry out such
- // operations, but WorkStream executes parallel cell-loops, so it might not
- // the right tool. We want to execute node-loops: we
- // want to visit every node $i$ in the mesh/sparsity graph, and for every
- // such node we want to visit to every $j$ such that
- // $\mathbf{c}_{ij} \not \equiv 0$. From an algebraic point of view, this is
- // equivalent to: visiting every row in the matrix (equivalently sparsity
+ // touched the entries of the matrix <code>norm_matrix</code> yet, and the
+ // vectors stored in the map
+ // <code>OfflineData<dim>::BoundaryNormalMap</code> are not normalized.
+ //
+ // In principle, this is just offline data, it doesn't make much sense
+ // to over-optimize their computation, since their cost will get amortized
+ // over the many time steps that we are going to use. However,
+ // computing/storing the entries of the matrix
+ // <code>norm_matrix</code> and the normalization of <code>nij_matrix</code>
+ // are perfect to illustrate thread-parallel node-loops:
+ // - We want to visit every node $i$ in the mesh/sparsity graph,
+ // - and for every such node we want to visit to every $j$ such that
+ // $\mathbf{c}_{ij} \not \equiv 0$.
+ //
+ // From an algebraic point of view, this is equivalent to: visiting
+ // every row in the matrix (equivalently sparsity
// pattern) and for each one of these rows execute a loop on the columns.
// Node-loops is a core theme of this tutorial step (see the pseudo-code
- // in the introduction).
+ // in the introduction) that will repeat over and over again. That's why
+ // this is the right time to introduce them.
//
// We have the thread paralellization capability
// parallel::apply_to_subranges that is somehow more general than the
- // WorkStream framework, an in particular it can be used for our node-loops.
+ // WorkStream framework. In particular, it can be used for our
+ // node-loops.
// This functionality requires four input arguments:
// - A begin iterator: <code>indices.begin()</code>
// - A end iterator: <code>indices.end()</code>
// of the previous two bullets. The function <code>f(i1,i2)</code> is
// called <code>on_subranges</code> in this example. It applies an
// operation for every "abstract element" in the subrange. In this case
- // each "element" is a row rows of the sparsity pattern.
+ // each "element" is a row of the sparsity pattern.
// - Grainsize: minimum number of "elements" (in this case rows) processed
// by
// each thread. We decided for a minimum of 4096 rows.
// attempting to write the same entry (we do not need a scheduler). This
// advantage appears to be a particular characteristic of edge-based finite
// element schemes when they are properly implemented.
-
- // boost::irange
+ //
+ // Finally, we normalize the vector stored in
+ // <code>OfflineData<dim>::BoundaryNormalMap</code>. This operation has
+ // not been thread paralellized as it would not illustrate any important
+ // concept.
{
TimerOutput::Scope t(computing_timer,
"offline_data - compute |c_ij|, and n_ij");
+ /* Here [i1,i2] represent a subrange of rows */
const auto on_subranges = [&](auto i1, const auto i2) {
for (; i1 < i2; ++i1)
{
on_subranges,
4096);
+ /* We normalize the normals at the boundary. */
+ /* This is not thread parallelized, too bad! */
for (auto &it : boundary_normal_map)
{
auto &[normal, id, _] = it.second;
}
}
- // Placeholder here.
+ // In order to implement reflecting boundary conditions
+ // $\mathbf{m} \cdot \boldsymbol{\nu}_i =0$ (or equivalently $\mathbf{v}
+ // \cdot \boldsymbol{\nu}_i =0$ ) the vectors $\mathbf{c}_{ij}$ at the
+ // boundary have to be modified as:
+ //
+ // $\mathbf{c}_{ij} += \int_{\partial \Omega}
+ // (\boldsymbol{\nu}_j - \boldsymbol{\nu}(s)) \phi_j \, \mathrm{d}s$
+ //
+ // Otherwise we will not be able to claim conservation. The ideas repeat
+ // themselves: we use Workstream in order to compute this correction, most
+ // of the following code is about the definition of the worker
+ // <code>local_assemble_system</code>.
{
TimerOutput::Scope t(computing_timer,
{
const auto value = fe_face_values.shape_value(i, q);
+ /* This is the correction of the boundary c_ij */
for (unsigned int d = 0; d < dim; ++d)
cell_cij_matrix[d](i, j) +=
(normal_j[d] - normal_q[d]) * (value * value_JxW);
} /* j */
} /* q */
} /* f */
- };
+ }; /* Done with the definition of the worker */
const auto copy_local_to_global = [&](const auto ©) {
const auto &is_artificial = copy.is_artificial;
}
} /* assemble() */
- // Placeholder here.
+ // At this point we are very much done with anything related to offline data.
+ //
+ // Now we define the implementation of <code>momentum</code>,
+ // <code>internal_energy</code>, <code>pressure</code>,
+ // <code>speed_of_sound</code>, and <code>f</code> (the flux of the system).
+ // The functionality of each one of these functions is self-explanatory from
+ // their names.
template <int dim>
DEAL_II_ALWAYS_INLINE inline dealii::Tensor<1, dim>
return result;
}
- // Placeholder here.
-
template <int dim>
DEAL_II_ALWAYS_INLINE inline double
ProblemDescription<dim>::internal_energy(const rank1_type U)
return E - 0.5 * m.norm_square() / rho;
}
- // Placeholder here.
-
template <int dim>
DEAL_II_ALWAYS_INLINE inline double
ProblemDescription<dim>::pressure(const rank1_type U)
return (gamma - 1.) * internal_energy(U);
}
- // Placeholder here.
-
-
template <int dim>
DEAL_II_ALWAYS_INLINE inline double
ProblemDescription<dim>::speed_of_sound(const rank1_type U)
return std::sqrt(gamma * p / rho);
}
- // Placeholder here.
-
template <int dim>
DEAL_II_ALWAYS_INLINE inline typename ProblemDescription<dim>::rank2_type
ProblemDescription<dim>::f(const rank1_type U)
return result;
}
- // Placeholder here.
+ // The following function, <code>riemann_data_from_state</code>, takes the
+ // full state $\mathbf{u} = [\rho,\mathbf{m},E]^\top$ defines a new
+ // "projected state" defined as
+ //
+ // $\widetilde{\mathbf{u}} = [\rho,
+ // \mathbf{m} - (\mathbf{m}\cdot \mathbf{n}_{ij})\mathbf{n}_{ij},
+ // E - \tfrac{(\mathbf{m}\cdot \mathbf{n}_{ij})^2}{2\rho} ]^\top$
+ //
+ // Projected states appear naturally when attempting to compute a maximum
+ // wavespeed appearing in Riemann problems.
namespace
{
return std::max(std::abs(u_i), std::abs(u_j)) + 5. * std::max(a_i, a_j);
}
- } // namespace
+ } /* End of namespace dedicated to the computation of the maximum wavespeed */
// Placeholder here.