From: Reza Rastak Date: Wed, 24 Apr 2019 04:53:22 +0000 (-0700) Subject: Mathematical explanation for the functions within DerivativeForm is improved X-Git-Tag: v9.1.0-rc1~173^2~3 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5c0521332333bd9c3505d55106b1f397373b70e2;p=dealii.git Mathematical explanation for the functions within DerivativeForm is improved --- diff --git a/include/deal.II/base/derivative_form.h b/include/deal.II/base/derivative_form.h index c2ad3441dc..cd0f43a16c 100644 --- a/include/deal.II/base/derivative_form.h +++ b/include/deal.II/base/derivative_form.h @@ -21,24 +21,26 @@ DEAL_II_NAMESPACE_OPEN /** - * This class represents the (tangential) derivatives of a function $ f: + * This class represents the (tangential) derivatives of a function $ \mathbf F: * {\mathbb R}^{\text{dim}} \rightarrow {\mathbb R}^{\text{spacedim}}$. Such * functions are always used to map the reference dim-dimensional cell into * spacedim-dimensional space. For such objects, the first derivative of the * function is a linear map from ${\mathbb R}^{\text{dim}}$ to ${\mathbb * R}^{\text{spacedim}}$, i.e., it can be represented as a matrix in ${\mathbb * R}^{\text{spacedim}\times \text{dim}}$. This makes sense since one would - * represent the first derivative, $\nabla f(\mathbf x)$ with $\mathbf x\in + * represent the first derivative, $\nabla \mathbf F(\mathbf x)$ with $\mathbf + * x\in * {\mathbb R}^{\text{dim}}$, in such a way that the directional derivative in * direction $\mathbf d\in {\mathbb R}^{\text{dim}}$ so that * @f{align*}{ - * \nabla f(\mathbf x) \mathbf d + * \nabla \mathbf F(\mathbf x) \mathbf d * = \lim_{\varepsilon\rightarrow 0} - * \frac{f(\mathbf x + \varepsilon \mathbf d) - f(\mathbf x)}{\varepsilon}, + * \frac{\mathbf F(\mathbf x + \varepsilon \mathbf d) - \mathbf F(\mathbf + * x)}{\varepsilon}, * @f} - * i.e., one needs to be able to multiply the matrix $\nabla f(\mathbf x)$ by - * a vector in ${\mathbb R}^{\text{dim}}$, and the result is a difference of - * function values, which are in ${\mathbb R}^{\text{spacedim}}$. + * i.e., one needs to be able to multiply the matrix $\nabla \mathbf F(\mathbf + * x)$ by a vector in ${\mathbb R}^{\text{dim}}$, and the result is a difference + * of function values, which are in ${\mathbb R}^{\text{spacedim}}$. * Consequently, the matrix must be of size $\text{spacedim}\times\text{dim}$. * * Similarly, the second derivative is a bilinear map from ${\mathbb @@ -89,19 +91,19 @@ public: operator=(const Tensor<1, dim, Number> &); /** - * Converts a DerivativeForm to Tensor. - * In particular, if order==1 and the derivative is the Jacobian of F, then - * Tensor[i] = grad(F^i). + * Converts a DerivativeForm to Tensor. In particular, if order == 1 and the derivative is the Jacobian of + * $\mathbf F(\mathbf x)$, then Tensor[i] = $\nabla F_i(\mathbf x)$. */ operator Tensor() const; /** - * Converts a DerivativeForm <1, dim, 1> to Tensor<1,dim,Number>. + * Converts a DerivativeForm<1, dim, 1, Number> to Tensor<1, dim, Number>. */ operator Tensor<1, dim, Number>() const; /** - * Return the transpose of a rectangular DerivativeForm, that is to say + * Return the transpose of a rectangular DerivativeForm, * viewed as a two dimensional matrix. */ DerivativeForm<1, spacedim, dim, Number> @@ -109,14 +111,15 @@ public: /** * Compute the Frobenius norm of this form, i.e., the expression - * $\sqrt{\sum_{ij} |DF_{ij}|^2}$. + * $\sqrt{\sum_{ij} |DF_{ij}|^2} = + * \sqrt{\sum_{ij} |\frac{\partial F_i}{\partial x_j}|}$. */ typename numbers::NumberTraits::real_type norm() const; /** * Compute the volume element associated with the jacobian of the - * transformation F. That is to say if $DF$ is square, it computes + * transformation $\mathbf F$. That is to say if $DF$ is square, it computes * $\det(DF)$, in case DF is not square returns $\sqrt{\det(DF^T * DF)}$. */ Number @@ -124,11 +127,12 @@ public: /** * Assuming that the current object stores the Jacobian of a mapping - * $F$, then the current function computes the covariant form - * of the derivative, namely $(\nabla F)G^{-1}$, where $G = (\nabla - * F)^{T}*(\nabla F)$. If $\nabla F$ is a square matrix (i.e., $F: + * $\mathbf F$, then the current function computes the covariant form + * of the derivative, namely $(\nabla \mathbf F) {\mathbf G}^{-1}$, where + * $\mathbf G = (\nabla \mathbf F)^{T}*(\nabla \mathbf F)$. If $\nabla \mathbf + * F$ is a square matrix (i.e., $\mathbf F: * {\mathbb R}^n \mapsto {\mathbb R}^n$), then this function - * simplifies to computing $\nabla F^{-T}$. + * simplifies to computing $\nabla {\mathbf F}^{-T}$. */ DerivativeForm<1, dim, spacedim, Number> covariant_form() const; @@ -366,50 +370,76 @@ DerivativeForm::memory_consumption() /** - * One of the uses of DerivativeForm is to apply it as a transformation. This - * is what this function does. If @p DF is DerivativeForm<1,dim,1> it computes - * $DF * T$, if @p DF is DerivativeForm<1,dim,rank> it computes $T*DF^{T}$. + * One of the uses of DerivativeForm is to apply it as a linear transformation. + * This function returns $\nabla \mathbf F(\mathbf x) \Delta \mathbf x$, which + * approximates the change in $\mathbf F(\mathbf x)$ when $\mathbf x$ is changed + * by the amount $\Delta \mathbf x$ + * @f[ + * \nabla \mathbf F(\mathbf x) \; \Delta \mathbf x + * \approx + * \mathbf F(\mathbf x + \Delta \mathbf x) - \mathbf F(\mathbf x). + * @f] + * The transformation corresponds to + * @f[ + * [\text{result}]_{i_1,\dots,i_k} = i\sum_{j} + * \left[\nabla \mathbf F(\mathbf x)\right]_{i_1,\dots,i_k, j} + * \Delta x_j + * @f] + * in index notation and corresponds to + * $[\Delta \mathbf x] [\nabla \mathbf F(\mathbf x)]^T$ in matrix notation. * * @relatesalso DerivativeForm - * @author Sebastian Pauletti, 2011 + * @author Sebastian Pauletti, 2011, Reza Rastak, 2019 */ template inline Tensor<1, spacedim, Number> -apply_transformation(const DerivativeForm<1, dim, spacedim, Number> &DF, - const Tensor<1, dim, Number> & T) +apply_transformation(const DerivativeForm<1, dim, spacedim, Number> &grad_F, + const Tensor<1, dim, Number> & d_x) { Tensor<1, spacedim, Number> dest; for (unsigned int i = 0; i < spacedim; ++i) - dest[i] = DF[i] * T; + dest[i] = grad_F[i] * d_x; return dest; } /** - * Similar to previous apply_transformation. It computes $T*DF^{T}$. + * Similar to the previous apply_transformation(). + * Each row of the result corresponds to one of the rows of @p D_X transformed + * by @p grad_F, equivalent to $\text{D\_X} * \text{grad\_F}^T$ in matrix notation. * * @relatesalso DerivativeForm - * @author Sebastian Pauletti, 2011 + * @author Sebastian Pauletti, 2011, Reza Rastak, 2019 */ // rank=2 template inline DerivativeForm<1, spacedim, dim> -apply_transformation(const DerivativeForm<1, dim, spacedim, Number> &DF, - const Tensor<2, dim, Number> & T) +apply_transformation(const DerivativeForm<1, dim, spacedim, Number> &grad_F, + const Tensor<2, dim, Number> & D_X) { DerivativeForm<1, spacedim, dim> dest; for (unsigned int i = 0; i < dim; ++i) - dest[i] = apply_transformation(DF, T[i]); + dest[i] = apply_transformation(grad_F, D_X[i]); return dest; } /** - * Similar to previous apply_transformation. It computes $DF2*DF1^{T}$ + * Similar to the previous apply_transformation(). In matrix notation, it + * computes $DF2 * DF1^{T}$. Moreover, the result of this operation $\mathbf A$ + * can be interpreted as a metric tensor in + * ${\mathbb R}^\text{spacedim}$ which corresponds to the Euclidean metric + * tensor in + * ${\mathbb R}^\text{dim}$. For every pair of vectors + * $\mathbf u, \mathbf v \in {\mathbb R}^\text{spacedim}$, we have: + * @f[ + * \mathbf u \cdot \mathbf A \mathbf v = + * \text{DF2}^{-1}(\mathbf u) \cdot \text{DF1}^{-1}(\mathbf v) + * @f] * * @relatesalso DerivativeForm - * @author Sebastian Pauletti, 2011 + * @author Sebastian Pauletti, 2011, Reza Rastak, 2019 */ template inline Tensor<2, spacedim, Number>