From 07a764a928faeda2aa7355a3943871d6aa1e770d Mon Sep 17 00:00:00 2001 From: David Wells Date: Wed, 15 Apr 2020 14:52:27 -0400 Subject: [PATCH] step-8: use MathJax in some formulas. Also use where appropriate for typesetting variables. --- examples/step-8/step-8.cc | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/examples/step-8/step-8.cc b/examples/step-8/step-8.cc index cdd873336f..32dabe4e87 100644 --- a/examples/step-8/step-8.cc +++ b/examples/step-8/step-8.cc @@ -352,9 +352,9 @@ namespace Step8 for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) { cell_matrix(i, j) += - // The first term is (lambda d_i u_i, d_j v_j) + (mu d_i - // u_j, d_j v_i). Note that - // shape_grad(i,q_point) returns the + // The first term is $\lambda \partial_i u_i, \partial_j + // v_j) + (\mu \partial_i u_j, \partial_j v_i)$. Note + // that shape_grad(i,q_point) returns the // gradient of the only nonzero component of the i-th // shape function at quadrature point q_point. The // component comp(i) of the gradient, which @@ -371,17 +371,17 @@ namespace Step8 fe_values.shape_grad(j, q_point)[component_i] * // mu_values[q_point]) // + // - // The second term is (mu nabla u_i, nabla v_j). We - // need not access a specific component of the - // gradient, since we only have to compute the scalar - // product of the two gradients, of which an - // overloaded version of the operator* takes care, as - // in previous examples. + // The second term is $(\mu \nabla u_i, \nabla + // v_j)$. We need not access a specific component of + // the gradient, since we only have to compute the + // scalar product of the two gradients, of which an + // overloaded version of operator* takes + // care, as in previous examples. // - // Note that by using the ?: operator, we only do this - // if comp(i) equals comp(j), otherwise a zero is - // added (which will be optimized away by the - // compiler). + // Note that by using the ?: operator, we only + // do this if component_i equals + // component_j, otherwise a zero is added + // (which will be optimized away by the compiler). ((component_i == component_j) ? // (fe_values.shape_grad(i, q_point) * // fe_values.shape_grad(j, q_point) * // -- 2.39.5