]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Major revamp of the finite difference approximation of derivatives. Allow to approxim...
authorWolfgang Bangerth <bangerth@math.tamu.edu>
Mon, 24 Jul 2000 07:46:03 +0000 (07:46 +0000)
committerWolfgang Bangerth <bangerth@math.tamu.edu>
Mon, 24 Jul 2000 07:46:03 +0000 (07:46 +0000)
git-svn-id: https://svn.dealii.org/trunk@3193 0785d39b-7218-0410-832d-ea1e28bc413d

deal.II/deal.II/include/numerics/derivative_approximation.h
deal.II/deal.II/include/numerics/gradient_estimator.h
deal.II/deal.II/source/numerics/derivative_approximation.cc
deal.II/deal.II/source/numerics/gradient_estimator.cc

index 4dafb4685f4141fc876c6239b5e04340b5b7798a..bdff13656b27cede5bf201da0e82c514ef573cdc 100644 (file)
 
 
 /**
- * This class computes a cell-wise estimate of the gradient by taking
- * difference quotients between neighboring cells. This is a rather
- * simple but efficient form to get an error indicator, since it can
- * be computed with relatively little numerical effort and yet gives a
- * reasonable approximation.
+ * This class computes a cell-wise approximation of the norm of a
+ * derivative of a finite element field by taking difference quotients
+ * between neighboring cells. This is a rather simple but efficient
+ * form to get an error indicator, since it can be computed with
+ * relatively little numerical effort and yet gives a reasonable
+ * approximation.
  *
  * The way the difference quotients are computed on cell $K$ is the
- * following: let $K'$ be a neighboring cell, and let
- * $y_{K'}=x_{K'}-x_K$ be the distance vector between the centers of
- * the two cells, then
+ * following (here described for the approximation of the gradient of
+ * a finite element field, but see below for higher derivatived): let
+ * $K'$ be a neighboring cell, and let $y_{K'}=x_{K'}-x_K$ be the
+ * distance vector between the centers of the two cells, then
  *   $ \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }$
  * is an approximation of the directional derivative
  *   $ \nabla u(x_K) \cdot \frac{y_{K'}}{ \|y_{K'}\| }.$
  *                      \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }  \right).$
  *
  * Thus, if the matrix
- *   $ Y =  \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|} 
+ *   $ Y =  \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|} 
  *                           \frac{y_{K'}^T}{ \|y_{K'}\| } \right)$ is
  * regular (which is the case when the vectors $y_{K'}$ to all neighbors span
  * the whole space), we can obtain an approximation to the true gradient by
  *   $ \nabla u(x_K)
  *     \approx
- *     Y^{-1} \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|} 
- *                             \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }  \right).$
+ *     Y^{-1} \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|} 
+ *                             \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }
+ *                      \right).$
  * This is a quantity that is easily computed. The value returned for
- * each cell when calling the main function of this class is the $l_2$
- * norm of this approximation to the gradient. To make this a useful
- * quantity, you may want to scale each element by the correct power
- * of the respective cell size.
+ * each cell when calling the @p{approximate_gradient} function of
+ * this class is the $l_2$ norm of this approximation to the
+ * gradient. To make this a useful quantity, you may want to scale
+ * each element by the correct power of the respective cell size.
  *
  * The computation of this quantity must fail if a cell has only
  * neighbors for which the direction vectors do not span the whole
  * gradients themselves.
  *
  *
- * @sect2{Refinement indicators based on the gradients}
+ * @sect2{Approximation of higher derivatives}
  *
- * If you would like to base a refinement criterion upon this
- * approximation of the gradient, you will have to scale the results
+ * Similar to the reasoning above, approximations to higher
+ * derivatives can be computed in a similar fashion. For example, the
+ * tensor of second derivatives is approximated by the formula
+ *   $ \nabla^2 u(x_K)
+ *     \approx
+ *     Y^{-1}
+ *     \sum_{K'}
+ *        \left(
+ *           \frac{y_{K'}}{\|y_{K'}\|} \otimes
+ *           \frac{\nabla u_h(x_{K'}) - \nabla u_h(x_K)}{ \|y_{K'}\| }
+ *        \right),
+ *   $ 
+ * where $\otimes$ denotes the outer product of two vectors. Note that
+ * unlike the true tensor of second derivatives, its approximation is
+ * not necessarily symmetric. This is due to the fact that in the
+ * derivation, it is not clear whether we shall consider as projected
+ * second derivative the term $\nabla^2 u y_{KK'}$ or $y_{KK'}^T
+ * \nabla^2 u$. Depending on which choice we take, we obtain one
+ * approximation of the tensor of second derivatives or its
+ * transpose. To avoid this ambiguity, as result we take the
+ * symmetrized form, which is the mean value of the approximation and
+ * its transpose.
+ *
+ * The returned value on each cell is the spectral norm of the
+ * approximated tensor of second derivatives, i.e. the largest
+ * eigenvalue by absolute value. This equals the largest curvature of
+ * the finite element field at each cell, and the spectral norm is the
+ * matrix norm associated to the $l_2$ vector norm.
+ *
+ * Even higher than the second derivative can be obtained along the
+ * same lines as exposed above.
+ *
+ *
+ * @sect2{Refinement indicators based on the derivatives}
+ *
+ * If you would like to base a refinement criterion upon these
+ * approximation of the derivatives, you will have to scale the results
  * of this class by an appropriate power of the mesh width. For
  * example, since
  * $\|u-u_h\|^2_{L_2} \le C h^2 \|\nabla u\|^2_{L_2}$, it might be the
  * i.e. $\eta_K = h^{1+d/2} \|\nabla u\|_{\infty;K}$, i.e. the right
  * power is $1+d/2$.
  *
+ * Likewise, for the second derivative, one should choose a power of
+ * the mesh size $h$ one higher than for the gradient.
+ *
+ *
+ * @sect2{Implementation}
+ *
+ * The formulae for the computation of approximations to the gradient
+ * and to the tensor of second derivatives shown above are very much
+ * alike. The basic difference is that in one case the finite
+ * difference quotiont is a scalar, while in the other case it is a
+ * vector. For higher derivatives, this would be a tensor of even
+ * higher rank. We then have to form the outer product of this
+ * difference quotient with the distance vector $y_{KK'}$, symmetrize
+ * it, contract it with the matrix $Y^{-1}$ and compute its norm. To
+ * make the implementation simpler and to allow for code reuse, all
+ * these operations that are dependent on the actual order of the
+ * derivatives to be approximated, as well as the computation of the
+ * quantities entering the difference quotient, have been separated
+ * into auxiliary nested classes (names @p{Gradient} and
+ * @p{SecondDerivative}) and the main algorithm is simply passed one
+ * or the other data types and asks them to perform the order
+ * dependent operations. The main framework that is independent of
+ * this, such as finding all active neighbors, or setting up the
+ * matrix $Y$ is done in the main function @p{approximate}.
+ *
+ * Due to this way of operation, the class may be easily extended for
+ * higher oder derivatives than are presently implemented. Basically,
+ * only an additional class along the lines of the derivative
+ * descriptor classes @p{Gradient} and @p{SecondDerivative} has to be
+ * implemented, with the respective typedefs and functions replaced by
+ * the appropriate analogues for the derivative that is to be
+ * approximated.
+ *
  * @author Wolfgang Bangerth, 2000
  */
-class GradientEstimator 
+class DerivativeApproximation
 {
   public:
                                     /**
-                                     * This is the main function that
-                                     * does what is announced in the
-                                     * general documentation of this
-                                     * class. Pass it the DoF handler
-                                     * object that describes the
-                                     * finite element field, a nodal
-                                     * value vector, and receive the
-                                     * cell-wise norm of the
+                                     * This function is used to
+                                     * obtain an approximation of the
+                                     * gradient. Pass it the DoF
+                                     * handler object that describes
+                                     * the finite element field, a
+                                     * nodal value vector, and
+                                     * receive the cell-wise
+                                     * Euclidian norm of the
                                      * approximated gradient.
                                      */
     template <int dim>
-    static void estimate (const DoFHandler<dim> &dof,
+    static void
+    approximate_gradient (const DoFHandler<dim> &dof,
                          const Vector<double>  &solution,
-                         Vector<float>         &error_per_cell);
+                         Vector<float>         &derivative_norm);
 
+                                    /**
+                                     * This function is the analogue
+                                     * to the one above, computing
+                                     * finite difference
+                                     * approximations of the tensor
+                                     * of second derivatives. Pass it
+                                     * the DoF handler object that
+                                     * describes the finite element
+                                     * field, a nodal value vector,
+                                     * and receive the cell-wise
+                                     * spectral norm of the
+                                     * approximated tensor of second
+                                     * derivatives. The spectral norm
+                                     * is the matrix norm associated
+                                     * to the $l_2$ vector norm.
+                                     */
+    template <int dim>
+    static void
+    approximate_second_derivative (const DoFHandler<dim> &dof,
+                                  const Vector<double>  &solution,
+                                  Vector<float>         &derivative_norm);
+    
                                     /**
                                      * Exception
                                      */
@@ -121,6 +215,174 @@ class GradientEstimator
     DeclException0 (ExcInsufficientDirections);
 
   private:
+
+                                    /**
+                                     * The following class is used to
+                                     * describe the data needed to
+                                     * compute the finite difference
+                                     * approximation to the gradient
+                                     * on a cell. See the general
+                                     * documentation of this class
+                                     * for more information on
+                                     * implementational details.
+                                     *
+                                     * @author Wolfgang Bangerth, 2000
+                                     */
+    template <int dim>
+    class Gradient 
+    {
+      public:
+                                        /**
+                                         * Declare which data fields have
+                                         * to be updated for the function
+                                         * @p{get_projected_derivative}
+                                         * to work.
+                                         */
+       static const UpdateFlags update_flags = update_values;
+
+                                        /**
+                                         * Declare the data type which
+                                         * holds the derivative described
+                                         * by this class.
+                                         */
+       typedef Tensor<1,dim> Derivative;
+
+                                        /**
+                                         * Likewise declare the data type
+                                         * that holds the derivative
+                                         * projected to a certain
+                                         * directions.
+                                         */
+       typedef double        ProjectedDerivative;
+
+                                        /**
+                                         * Given an @p{FEValues} object
+                                         * initialized to a cell, and a
+                                         * solution vector, extract the
+                                         * desired derivative at the
+                                         * first quadrature point (which
+                                         * is the only one, as we only
+                                         * evaluate the finite element
+                                         * field at the center of each
+                                         * cell).
+                                         */
+       static ProjectedDerivative
+       get_projected_derivative (const FEValues<dim>  &fe_values,
+                                 const Vector<double> &solution);
+    
+                                        /**
+                                         * Return the norm of the
+                                         * derivative object. Here, for
+                                         * the gradient, we choose the
+                                         * Euclidian norm of the gradient
+                                         * vector.
+                                         */
+       static double derivative_norm (const Derivative &d);
+
+                                        /**
+                                         * If for the present derivative
+                                         * order, symmetrization of the
+                                         * derivative tensor is
+                                         * necessary, then do so on the
+                                         * argument.
+                                         *
+                                         * For the first derivatives, no
+                                         * such thing is necessary, so
+                                         * this function is a no-op.
+                                         */
+       static void symmetrize (Derivative &derivative_tensor);
+    };
+
+
+
+                                    /**
+                                     * The following class is used to
+                                     * describe the data needed to
+                                     * compute the finite difference
+                                     * approximation to the second
+                                     * derivatives on a cell. See the
+                                     * general documentation of this
+                                     * class for more information on
+                                     * implementational details.
+                                     *
+                                     * @author Wolfgang Bangerth, 2000
+                                     */
+    template <int dim>
+    class SecondDerivative
+    {
+      public:
+                                        /**
+                                         * Declare which data fields have
+                                         * to be updated for the function
+                                         * @p{get_projected_derivative}
+                                         * to work.
+                                         */
+       static const UpdateFlags update_flags = update_gradients;
+
+                                        /**
+                                         * Declare the data type which
+                                         * holds the derivative described
+                                         * by this class.
+                                         */
+       typedef Tensor<2,dim> Derivative;
+
+                                        /**
+                                         * Likewise declare the data type
+                                         * that holds the derivative
+                                         * projected to a certain
+                                         * directions.
+                                         */
+       typedef Tensor<1,dim> ProjectedDerivative;
+
+                                        /**
+                                         * Given an @p{FEValues} object
+                                         * initialized to a cell, and a
+                                         * solution vector, extract the
+                                         * desired derivative at the
+                                         * first quadrature point (which
+                                         * is the only one, as we only
+                                         * evaluate the finite element
+                                         * field at the center of each
+                                         * cell).
+                                         */
+       static ProjectedDerivative
+       get_projected_derivative (const FEValues<dim>  &fe_values,
+                                 const Vector<double> &solution);
+       
+                                        /**
+                                         * Return the norm of the
+                                         * derivative object. Here, for
+                                         * the (symmetric) tensor of
+                                         * second derivatives, we choose
+                                         * the absolute value of the
+                                         * largest eigenvalue, which is
+                                         * the matrix norm associated to
+                                         * the $l_2$ norm of vectors. It
+                                         * is also the largest value of
+                                         * the curvature of the solution.
+                                         */
+       static double derivative_norm (const Derivative &d);
+
+                                        /**
+                                         * If for the present derivative
+                                         * order, symmetrization of the
+                                         * derivative tensor is
+                                         * necessary, then do so on the
+                                         * argument.
+                                         *
+                                         * For the second derivatives,
+                                         * each entry of the tensor is
+                                         * set to the mean of its value
+                                         * and the value of the transpose
+                                         * element.
+                                         *
+                                         * Note that this function
+                                         * actually modifies its
+                                         * argument.
+                                         */
+       static void symmetrize (Derivative &derivative_tensor);
+    };
+    
                                     /**
                                      * Convenience typedef denoting
                                      * the range of indices on which
@@ -130,15 +392,37 @@ class GradientEstimator
     typedef pair<unsigned int,unsigned int> IndexInterval;
 
                                     /**
-                                     * Compute the error estimator on
-                                     * the cells in the range given
-                                     * by the third parameter.
+                                     * Kind of the main function of
+                                     * this class. It is called by
+                                     * the public entry points to
+                                     * this class with the correct
+                                     * template first argument and
+                                     * then simply calls the
+                                     * @p{approximate} function,
+                                     * after setting up several
+                                     * threads and doing some
+                                     * administration that is
+                                     * independent of the actual
+                                     * derivative to be computed.
                                      */
-    template <int dim>
-    static void estimate_threaded (const DoFHandler<dim> &dof,
-                                  const Vector<double>  &solution,
-                                  const IndexInterval   &index_interval,
-                                  Vector<float>         &error_per_cell);    
+    template <class DerivativeDescription, int dim>
+    static void
+    approximate_derivative (const DoFHandler<dim> &dof,
+                           const Vector<double>  &solution,
+                           Vector<float>         &derivative_norm);
+
+                                    /**
+                                     * Compute the derivative
+                                     * approximation on the cells in
+                                     * the range given by the third
+                                     * parameter.
+                                     */
+    template <class DerivativeDescription, int dim>
+    static void
+    approximate (const DoFHandler<dim> &dof,
+                const Vector<double>  &solution,
+                const IndexInterval   &index_interval,
+                            Vector<float>         &derivative_norm);    
 };
 
 
index 4dafb4685f4141fc876c6239b5e04340b5b7798a..bdff13656b27cede5bf201da0e82c514ef573cdc 100644 (file)
 
 
 /**
- * This class computes a cell-wise estimate of the gradient by taking
- * difference quotients between neighboring cells. This is a rather
- * simple but efficient form to get an error indicator, since it can
- * be computed with relatively little numerical effort and yet gives a
- * reasonable approximation.
+ * This class computes a cell-wise approximation of the norm of a
+ * derivative of a finite element field by taking difference quotients
+ * between neighboring cells. This is a rather simple but efficient
+ * form to get an error indicator, since it can be computed with
+ * relatively little numerical effort and yet gives a reasonable
+ * approximation.
  *
  * The way the difference quotients are computed on cell $K$ is the
- * following: let $K'$ be a neighboring cell, and let
- * $y_{K'}=x_{K'}-x_K$ be the distance vector between the centers of
- * the two cells, then
+ * following (here described for the approximation of the gradient of
+ * a finite element field, but see below for higher derivatived): let
+ * $K'$ be a neighboring cell, and let $y_{K'}=x_{K'}-x_K$ be the
+ * distance vector between the centers of the two cells, then
  *   $ \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }$
  * is an approximation of the directional derivative
  *   $ \nabla u(x_K) \cdot \frac{y_{K'}}{ \|y_{K'}\| }.$
  *                      \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }  \right).$
  *
  * Thus, if the matrix
- *   $ Y =  \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|} 
+ *   $ Y =  \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|} 
  *                           \frac{y_{K'}^T}{ \|y_{K'}\| } \right)$ is
  * regular (which is the case when the vectors $y_{K'}$ to all neighbors span
  * the whole space), we can obtain an approximation to the true gradient by
  *   $ \nabla u(x_K)
  *     \approx
- *     Y^{-1} \sum_{K'} \left( \frac{y_{K'}}{ \|y_{K'}\|} 
- *                             \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }  \right).$
+ *     Y^{-1} \sum_{K'} \left( \frac{y_{K'}}{\|y_{K'}\|} 
+ *                             \frac{u_h(x_{K'}) - u_h(x_K)}{ \|y_{K'}\| }
+ *                      \right).$
  * This is a quantity that is easily computed. The value returned for
- * each cell when calling the main function of this class is the $l_2$
- * norm of this approximation to the gradient. To make this a useful
- * quantity, you may want to scale each element by the correct power
- * of the respective cell size.
+ * each cell when calling the @p{approximate_gradient} function of
+ * this class is the $l_2$ norm of this approximation to the
+ * gradient. To make this a useful quantity, you may want to scale
+ * each element by the correct power of the respective cell size.
  *
  * The computation of this quantity must fail if a cell has only
  * neighbors for which the direction vectors do not span the whole
  * gradients themselves.
  *
  *
- * @sect2{Refinement indicators based on the gradients}
+ * @sect2{Approximation of higher derivatives}
  *
- * If you would like to base a refinement criterion upon this
- * approximation of the gradient, you will have to scale the results
+ * Similar to the reasoning above, approximations to higher
+ * derivatives can be computed in a similar fashion. For example, the
+ * tensor of second derivatives is approximated by the formula
+ *   $ \nabla^2 u(x_K)
+ *     \approx
+ *     Y^{-1}
+ *     \sum_{K'}
+ *        \left(
+ *           \frac{y_{K'}}{\|y_{K'}\|} \otimes
+ *           \frac{\nabla u_h(x_{K'}) - \nabla u_h(x_K)}{ \|y_{K'}\| }
+ *        \right),
+ *   $ 
+ * where $\otimes$ denotes the outer product of two vectors. Note that
+ * unlike the true tensor of second derivatives, its approximation is
+ * not necessarily symmetric. This is due to the fact that in the
+ * derivation, it is not clear whether we shall consider as projected
+ * second derivative the term $\nabla^2 u y_{KK'}$ or $y_{KK'}^T
+ * \nabla^2 u$. Depending on which choice we take, we obtain one
+ * approximation of the tensor of second derivatives or its
+ * transpose. To avoid this ambiguity, as result we take the
+ * symmetrized form, which is the mean value of the approximation and
+ * its transpose.
+ *
+ * The returned value on each cell is the spectral norm of the
+ * approximated tensor of second derivatives, i.e. the largest
+ * eigenvalue by absolute value. This equals the largest curvature of
+ * the finite element field at each cell, and the spectral norm is the
+ * matrix norm associated to the $l_2$ vector norm.
+ *
+ * Even higher than the second derivative can be obtained along the
+ * same lines as exposed above.
+ *
+ *
+ * @sect2{Refinement indicators based on the derivatives}
+ *
+ * If you would like to base a refinement criterion upon these
+ * approximation of the derivatives, you will have to scale the results
  * of this class by an appropriate power of the mesh width. For
  * example, since
  * $\|u-u_h\|^2_{L_2} \le C h^2 \|\nabla u\|^2_{L_2}$, it might be the
  * i.e. $\eta_K = h^{1+d/2} \|\nabla u\|_{\infty;K}$, i.e. the right
  * power is $1+d/2$.
  *
+ * Likewise, for the second derivative, one should choose a power of
+ * the mesh size $h$ one higher than for the gradient.
+ *
+ *
+ * @sect2{Implementation}
+ *
+ * The formulae for the computation of approximations to the gradient
+ * and to the tensor of second derivatives shown above are very much
+ * alike. The basic difference is that in one case the finite
+ * difference quotiont is a scalar, while in the other case it is a
+ * vector. For higher derivatives, this would be a tensor of even
+ * higher rank. We then have to form the outer product of this
+ * difference quotient with the distance vector $y_{KK'}$, symmetrize
+ * it, contract it with the matrix $Y^{-1}$ and compute its norm. To
+ * make the implementation simpler and to allow for code reuse, all
+ * these operations that are dependent on the actual order of the
+ * derivatives to be approximated, as well as the computation of the
+ * quantities entering the difference quotient, have been separated
+ * into auxiliary nested classes (names @p{Gradient} and
+ * @p{SecondDerivative}) and the main algorithm is simply passed one
+ * or the other data types and asks them to perform the order
+ * dependent operations. The main framework that is independent of
+ * this, such as finding all active neighbors, or setting up the
+ * matrix $Y$ is done in the main function @p{approximate}.
+ *
+ * Due to this way of operation, the class may be easily extended for
+ * higher oder derivatives than are presently implemented. Basically,
+ * only an additional class along the lines of the derivative
+ * descriptor classes @p{Gradient} and @p{SecondDerivative} has to be
+ * implemented, with the respective typedefs and functions replaced by
+ * the appropriate analogues for the derivative that is to be
+ * approximated.
+ *
  * @author Wolfgang Bangerth, 2000
  */
-class GradientEstimator 
+class DerivativeApproximation
 {
   public:
                                     /**
-                                     * This is the main function that
-                                     * does what is announced in the
-                                     * general documentation of this
-                                     * class. Pass it the DoF handler
-                                     * object that describes the
-                                     * finite element field, a nodal
-                                     * value vector, and receive the
-                                     * cell-wise norm of the
+                                     * This function is used to
+                                     * obtain an approximation of the
+                                     * gradient. Pass it the DoF
+                                     * handler object that describes
+                                     * the finite element field, a
+                                     * nodal value vector, and
+                                     * receive the cell-wise
+                                     * Euclidian norm of the
                                      * approximated gradient.
                                      */
     template <int dim>
-    static void estimate (const DoFHandler<dim> &dof,
+    static void
+    approximate_gradient (const DoFHandler<dim> &dof,
                          const Vector<double>  &solution,
-                         Vector<float>         &error_per_cell);
+                         Vector<float>         &derivative_norm);
 
+                                    /**
+                                     * This function is the analogue
+                                     * to the one above, computing
+                                     * finite difference
+                                     * approximations of the tensor
+                                     * of second derivatives. Pass it
+                                     * the DoF handler object that
+                                     * describes the finite element
+                                     * field, a nodal value vector,
+                                     * and receive the cell-wise
+                                     * spectral norm of the
+                                     * approximated tensor of second
+                                     * derivatives. The spectral norm
+                                     * is the matrix norm associated
+                                     * to the $l_2$ vector norm.
+                                     */
+    template <int dim>
+    static void
+    approximate_second_derivative (const DoFHandler<dim> &dof,
+                                  const Vector<double>  &solution,
+                                  Vector<float>         &derivative_norm);
+    
                                     /**
                                      * Exception
                                      */
@@ -121,6 +215,174 @@ class GradientEstimator
     DeclException0 (ExcInsufficientDirections);
 
   private:
+
+                                    /**
+                                     * The following class is used to
+                                     * describe the data needed to
+                                     * compute the finite difference
+                                     * approximation to the gradient
+                                     * on a cell. See the general
+                                     * documentation of this class
+                                     * for more information on
+                                     * implementational details.
+                                     *
+                                     * @author Wolfgang Bangerth, 2000
+                                     */
+    template <int dim>
+    class Gradient 
+    {
+      public:
+                                        /**
+                                         * Declare which data fields have
+                                         * to be updated for the function
+                                         * @p{get_projected_derivative}
+                                         * to work.
+                                         */
+       static const UpdateFlags update_flags = update_values;
+
+                                        /**
+                                         * Declare the data type which
+                                         * holds the derivative described
+                                         * by this class.
+                                         */
+       typedef Tensor<1,dim> Derivative;
+
+                                        /**
+                                         * Likewise declare the data type
+                                         * that holds the derivative
+                                         * projected to a certain
+                                         * directions.
+                                         */
+       typedef double        ProjectedDerivative;
+
+                                        /**
+                                         * Given an @p{FEValues} object
+                                         * initialized to a cell, and a
+                                         * solution vector, extract the
+                                         * desired derivative at the
+                                         * first quadrature point (which
+                                         * is the only one, as we only
+                                         * evaluate the finite element
+                                         * field at the center of each
+                                         * cell).
+                                         */
+       static ProjectedDerivative
+       get_projected_derivative (const FEValues<dim>  &fe_values,
+                                 const Vector<double> &solution);
+    
+                                        /**
+                                         * Return the norm of the
+                                         * derivative object. Here, for
+                                         * the gradient, we choose the
+                                         * Euclidian norm of the gradient
+                                         * vector.
+                                         */
+       static double derivative_norm (const Derivative &d);
+
+                                        /**
+                                         * If for the present derivative
+                                         * order, symmetrization of the
+                                         * derivative tensor is
+                                         * necessary, then do so on the
+                                         * argument.
+                                         *
+                                         * For the first derivatives, no
+                                         * such thing is necessary, so
+                                         * this function is a no-op.
+                                         */
+       static void symmetrize (Derivative &derivative_tensor);
+    };
+
+
+
+                                    /**
+                                     * The following class is used to
+                                     * describe the data needed to
+                                     * compute the finite difference
+                                     * approximation to the second
+                                     * derivatives on a cell. See the
+                                     * general documentation of this
+                                     * class for more information on
+                                     * implementational details.
+                                     *
+                                     * @author Wolfgang Bangerth, 2000
+                                     */
+    template <int dim>
+    class SecondDerivative
+    {
+      public:
+                                        /**
+                                         * Declare which data fields have
+                                         * to be updated for the function
+                                         * @p{get_projected_derivative}
+                                         * to work.
+                                         */
+       static const UpdateFlags update_flags = update_gradients;
+
+                                        /**
+                                         * Declare the data type which
+                                         * holds the derivative described
+                                         * by this class.
+                                         */
+       typedef Tensor<2,dim> Derivative;
+
+                                        /**
+                                         * Likewise declare the data type
+                                         * that holds the derivative
+                                         * projected to a certain
+                                         * directions.
+                                         */
+       typedef Tensor<1,dim> ProjectedDerivative;
+
+                                        /**
+                                         * Given an @p{FEValues} object
+                                         * initialized to a cell, and a
+                                         * solution vector, extract the
+                                         * desired derivative at the
+                                         * first quadrature point (which
+                                         * is the only one, as we only
+                                         * evaluate the finite element
+                                         * field at the center of each
+                                         * cell).
+                                         */
+       static ProjectedDerivative
+       get_projected_derivative (const FEValues<dim>  &fe_values,
+                                 const Vector<double> &solution);
+       
+                                        /**
+                                         * Return the norm of the
+                                         * derivative object. Here, for
+                                         * the (symmetric) tensor of
+                                         * second derivatives, we choose
+                                         * the absolute value of the
+                                         * largest eigenvalue, which is
+                                         * the matrix norm associated to
+                                         * the $l_2$ norm of vectors. It
+                                         * is also the largest value of
+                                         * the curvature of the solution.
+                                         */
+       static double derivative_norm (const Derivative &d);
+
+                                        /**
+                                         * If for the present derivative
+                                         * order, symmetrization of the
+                                         * derivative tensor is
+                                         * necessary, then do so on the
+                                         * argument.
+                                         *
+                                         * For the second derivatives,
+                                         * each entry of the tensor is
+                                         * set to the mean of its value
+                                         * and the value of the transpose
+                                         * element.
+                                         *
+                                         * Note that this function
+                                         * actually modifies its
+                                         * argument.
+                                         */
+       static void symmetrize (Derivative &derivative_tensor);
+    };
+    
                                     /**
                                      * Convenience typedef denoting
                                      * the range of indices on which
@@ -130,15 +392,37 @@ class GradientEstimator
     typedef pair<unsigned int,unsigned int> IndexInterval;
 
                                     /**
-                                     * Compute the error estimator on
-                                     * the cells in the range given
-                                     * by the third parameter.
+                                     * Kind of the main function of
+                                     * this class. It is called by
+                                     * the public entry points to
+                                     * this class with the correct
+                                     * template first argument and
+                                     * then simply calls the
+                                     * @p{approximate} function,
+                                     * after setting up several
+                                     * threads and doing some
+                                     * administration that is
+                                     * independent of the actual
+                                     * derivative to be computed.
                                      */
-    template <int dim>
-    static void estimate_threaded (const DoFHandler<dim> &dof,
-                                  const Vector<double>  &solution,
-                                  const IndexInterval   &index_interval,
-                                  Vector<float>         &error_per_cell);    
+    template <class DerivativeDescription, int dim>
+    static void
+    approximate_derivative (const DoFHandler<dim> &dof,
+                           const Vector<double>  &solution,
+                           Vector<float>         &derivative_norm);
+
+                                    /**
+                                     * Compute the derivative
+                                     * approximation on the cells in
+                                     * the range given by the third
+                                     * parameter.
+                                     */
+    template <class DerivativeDescription, int dim>
+    static void
+    approximate (const DoFHandler<dim> &dof,
+                const Vector<double>  &solution,
+                const IndexInterval   &index_interval,
+                            Vector<float>         &derivative_norm);    
 };
 
 
index ed250e198151904a70fa25a3bac92c70a8a8e8d7..5b6db2d9eba4c675e7b439bc201d7fbb7f8bf478 100644 (file)
@@ -13,6 +13,8 @@
 
 
 #include <base/quadrature_lib.h>
+#include <base/thread_management.h>
+#include <base/multithread_info.h>
 #include <lac/vector.h>
 #include <grid/tria_iterator.h>
 #include <dofs/dof_accessor.h>
 #include <fe/fe_values.h>
 #include <numerics/gradient_estimator.h>
 
-#ifdef DEAL_II_USE_MT
-#  include <base/thread_management.h>
-#  include <base/multithread_info.h>
-#endif
+
+template <typename T>
+static T sqr (const T t)
+{
+  return t*t;
+};
+
+
+
+
+
+template <int dim>
+inline
+typename DerivativeApproximation::Gradient<dim>::ProjectedDerivative
+DerivativeApproximation::Gradient<dim>::
+get_projected_derivative (const FEValues<dim>  &fe_values,
+                         const Vector<double> &solution) 
+{
+  vector<ProjectedDerivative> values (1);
+  fe_values.get_function_values (solution, values);
+  return values[0];
+};
+
+
+
+template <int dim>
+inline
+double
+DerivativeApproximation::Gradient<dim>::derivative_norm (const Derivative &d)
+{
+  double s = 0;
+  for (unsigned int i=0; i<dim; ++i)
+    s += d[i]*d[i];
+  return sqrt(s);
+};
+
+
+
+template <int dim>
+inline
+void
+DerivativeApproximation::Gradient<dim>::symmetrize (Derivative &)
+{
+                                  // nothing to do here
+};
+
+
+
+template <int dim>
+inline
+typename DerivativeApproximation::SecondDerivative<dim>::ProjectedDerivative
+DerivativeApproximation::SecondDerivative<dim>::
+get_projected_derivative (const FEValues<dim>  &fe_values,
+                         const Vector<double> &solution) 
+{
+  vector<ProjectedDerivative> values (1);
+  fe_values.get_function_grads (solution, values);
+  return values[0];
+};
+
+
+
+template <>
+inline
+double
+DerivativeApproximation::SecondDerivative<1>::
+derivative_norm (const Derivative &d)
+{
+  return fabs (d[0][0]);
+};
+
+
+
+template <>
+inline
+double
+DerivativeApproximation::SecondDerivative<2>::
+derivative_norm (const Derivative &d)
+{
+                                  // note that d should be a
+                                  // symmetric 2x2 tensor, so the
+                                  // eigenvalues are:
+                                  //
+                                  // 1/2(a+b\pm\sqrt((a-b)^2+4c^2))
+                                  //
+                                  // if the d_11=a, d_22=b,
+                                  // d_12=d_21=c
+  const double radicand = sqr(d[0][0] - d[1][1]) + 4*sqr(d[0][1]);
+  const double eigenvalues[2]
+    = { 0.5*(d[0][0] + d[1][1] + sqrt(radicand)),
+       0.5*(d[0][0] + d[1][1] - sqrt(radicand))  };
+  
+  return max (fabs (eigenvalues[0]),
+             fabs (eigenvalues[1]));
+};
+
+
+
+template <int dim>
+inline
+void
+DerivativeApproximation::SecondDerivative<dim>::symmetrize (Derivative &d)
+{
+                                  // symmetrize non-diagonal entries
+  for (unsigned int i=0; i<dim; ++i)
+    for (unsigned int j=i+1; j<dim; ++j)
+      {
+       const double s = (d[i][j] + d[j][i]) / 2;
+       d[i][j] = d[j][i] = s;
+      };
+};
+
+
+
+
+template <int dim>
+void 
+DerivativeApproximation::
+approximate_gradient (const DoFHandler<dim> &dof_handler,
+                     const Vector<double>  &solution,
+                     Vector<float>         &derivative_norm)
+{
+  approximate_derivative<Gradient<dim>,dim> (dof_handler,
+                                            solution,
+                                            derivative_norm);
+};
 
 
 
 template <int dim>
 void 
-GradientEstimator::estimate (const DoFHandler<dim> &dof_handler,
-                            const Vector<double>  &solution,
-                            Vector<float>         &error_per_cell)
+DerivativeApproximation::
+approximate_second_derivative (const DoFHandler<dim> &dof_handler,
+                              const Vector<double>  &solution,
+                              Vector<float>         &derivative_norm)
 {
-  Assert (error_per_cell.size() == dof_handler.get_tria().n_active_cells(),
-         ExcInvalidVectorLength (error_per_cell.size(),
+  approximate_derivative<SecondDerivative<dim>,dim> (dof_handler,
+                                                    solution,
+                                                    derivative_norm);
+};
+
+
+
+template <class DerivativeDescription, int dim>
+void 
+DerivativeApproximation::
+approximate_derivative (const DoFHandler<dim> &dof_handler,
+                       const Vector<double>  &solution,
+                       Vector<float>         &derivative_norm)
+{
+  Assert (derivative_norm.size() == dof_handler.get_tria().n_active_cells(),
+         ExcInvalidVectorLength (derivative_norm.size(),
                                  dof_handler.get_tria().n_active_cells()));
   Assert (dof_handler.get_fe().n_components() == 1,
          ExcInternalError());
 
-#ifdef DEAL_II_USE_MT
   const unsigned int n_threads = multithread_info.n_default_threads;
   vector<IndexInterval> index_intervals
     = Threads::split_interval (0, dof_handler.get_tria().n_active_cells(),
@@ -48,32 +186,28 @@ GradientEstimator::estimate (const DoFHandler<dim> &dof_handler,
   Threads::ThreadManager thread_manager;
   for (unsigned int i=0; i<n_threads; ++i)
     Threads::spawn (thread_manager,
-                   Threads::encapsulate (&GradientEstimator::
-                                         template estimate_threaded<dim>)
-                   .collect_args (dof_handler, solution, index_intervals[i],
-                                  error_per_cell));
+                   Threads::encapsulate
+                   (&DerivativeApproximation::
+                    template approximate<DerivativeDescription,dim>)
+                   .collect_args (dof_handler, solution,
+                                  index_intervals[i],
+                                  derivative_norm));
   thread_manager.wait ();
-  
-#else
-  estimate_threaded (dof_handler, solution,
-                    make_pair(0U, dof_handler.get_tria().n_active_cells()),
-                    error_per_cell);
-#endif
 };
 
 
 
-template <int dim>
+template <class DerivativeDescription, int dim>
 void 
-GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
+DerivativeApproximation::approximate (const DoFHandler<dim> &dof_handler,
                                      const Vector<double>  &solution,
                                      const IndexInterval   &index_interval,
-                                     Vector<float>         &error_per_cell)
+                                     Vector<float>         &derivative_norm)
 {
   QMidpoint<dim> midpoint_rule;
   FEValues<dim>  fe_midpoint_value (dof_handler.get_fe(),
                                    midpoint_rule,
-                                   UpdateFlags(update_values |
+                                   UpdateFlags(DerivativeDescription::update_flags |
                                                update_q_points));
   
                                   // matrix Y=sum_i y_i y_i^T
@@ -83,9 +217,10 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
                                   // respective entries in the output
                                   // vector:
   Vector<float>::iterator
-    error_on_this_cell = error_per_cell.begin() + index_interval.first;
+    derivative_norm_on_this_cell
+    = derivative_norm.begin() + index_interval.first;
   
-  DoFHandler<dim>::active_cell_iterator cell, endc;
+  typename DoFHandler<dim>::active_cell_iterator cell, endc;
   cell = endc = dof_handler.begin_active();
                                   // (static_cast to avoid warnings
                                   // about unsigned always >=0)
@@ -96,25 +231,30 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
                                   // active neighbors of a cell
                                   // reserve the maximal number of
                                   // active neighbors
-  vector<DoFHandler<dim>::active_cell_iterator> active_neighbors;
+  vector<typename DoFHandler<dim>::active_cell_iterator> active_neighbors;
   active_neighbors.reserve (GeometryInfo<dim>::faces_per_cell *
                            GeometryInfo<dim>::subfaces_per_face);
 
-  for (; cell!=endc; ++cell, ++error_on_this_cell)
+  for (; cell!=endc; ++cell, ++derivative_norm_on_this_cell)
     {
       Y.clear ();
-                                      // vector g=sum_i y_i (f(x+y_i)-f(x))/|y_i|
-      Tensor<1,dim> projected_gradient;
+                                      // vector
+                                      // g=sum_i y_i (f(x+y_i)-f(x))/|y_i|
+                                      // or related type for higher
+                                      // derivatives
+      typename DerivativeDescription::Derivative projected_derivative;
 
                                       // reinit fe values object...
       fe_midpoint_value.reinit (cell);
 
                                       // ...and get the value of the
-                                      // solution...
-      vector<double> this_midpoint_value(1);
-      fe_midpoint_value.get_function_values (solution, this_midpoint_value);
-                                      // ...and the place where it lives
-      Point<dim> this_center = fe_midpoint_value.quadrature_point(0);
+                                      // projected derivative...
+      const typename DerivativeDescription::ProjectedDerivative
+       this_midpoint_value
+       = DerivativeDescription::get_projected_derivative (fe_midpoint_value,
+                                                   solution);
+                                      // ...and the place where it lives
+      const Point<dim> this_center = fe_midpoint_value.quadrature_point(0);
 
       
                                       // loop over all neighbors and
@@ -133,7 +273,8 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
       for (unsigned int n=0; n<GeometryInfo<dim>::faces_per_cell; ++n)
        if (! cell->at_boundary(n))
          {
-           DoFHandler<dim>::cell_iterator neighbor = cell->neighbor(n);
+           typename DoFHandler<dim>::cell_iterator
+             neighbor = cell->neighbor(n);
            if (neighbor->active())
              active_neighbors.push_back (neighbor);
            else
@@ -157,7 +298,8 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
                                                 // present cell
                if (dim == 1)
                  {
-                   DoFHandler<dim>::cell_iterator neighbor_child = neighbor;
+                   typename DoFHandler<dim>::cell_iterator
+                     neighbor_child = neighbor;
                    while (neighbor_child->has_children())
                      neighbor_child = neighbor_child->child (n==0 ? 1 : 0);
                    
@@ -181,11 +323,11 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
                                       // now loop over all active
                                       // neighbors and collect the
                                       // data we need
-      typename vector<DoFHandler<dim>::active_cell_iterator>::const_iterator
+      typename vector<typename DoFHandler<dim>::active_cell_iterator>::const_iterator
        neighbor_ptr = active_neighbors.begin();
       for (; neighbor_ptr!=active_neighbors.end(); ++neighbor_ptr)
        {
-         const DoFHandler<dim>::active_cell_iterator
+         const typename DoFHandler<dim>::active_cell_iterator
            neighbor = *neighbor_ptr;
            
                                           // reinit fe values object...
@@ -193,10 +335,14 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
          
                                           // ...and get the value of the
                                           // solution...
-         vector<double> neighbor_midpoint_value(1);
-         fe_midpoint_value.get_function_values (solution, this_midpoint_value);
+         const typename DerivativeDescription::ProjectedDerivative
+           neighbor_midpoint_value
+           = DerivativeDescription::get_projected_derivative (fe_midpoint_value,
+                                                       solution);
+         
                                           // ...and the place where it lives
-         Point<dim> neighbor_center = fe_midpoint_value.quadrature_point(0);
+         const Point<dim>
+           neighbor_center = fe_midpoint_value.quadrature_point(0);
          
          
                                           // vector for the
@@ -208,6 +354,13 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
          const double distance = sqrt(y.square());
                                           // normalize y
          y /= distance;
+                                          // *** note that unlike in
+                                          // the docs, y denotes the
+                                          // normalized vector
+                                          // connecting the centers
+                                          // of the two cells, rather
+                                          // than the normal
+                                          // difference! ***
          
                                           // add up the
                                           // contribution of
@@ -216,13 +369,20 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
            for (unsigned int j=0; j<dim; ++j)
              Y[i][j] += y[i] * y[j];
          
-                                          // the update the sum
+                                          // then update the sum
                                           // of difference
                                           // quotients
-         projected_gradient += (neighbor_midpoint_value[0] -
-                                this_midpoint_value[0]) /
-                               distance *
-                               y;
+         typename DerivativeDescription::ProjectedDerivative
+           projected_finite_difference
+           = (neighbor_midpoint_value -
+              this_midpoint_value);
+         projected_finite_difference /= distance;
+         
+         typename DerivativeDescription::Derivative projected_derivative_update;
+         outer_product (projected_derivative_update,
+                        y,
+                        projected_finite_difference);
+         projected_derivative += projected_derivative_update;
        };
 
                                       // can we determine an
@@ -237,14 +397,17 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
       AssertThrow (determinant(Y) != 0,
                   ExcInsufficientDirections());
 
+                                      // first symmetrize g
+      DerivativeDescription::symmetrize (projected_derivative);
+      
                                        // compute Y^-1 g
-      Point<dim> gradient;
+      typename DerivativeDescription::Derivative derivative;
       Tensor<2,dim> Y_inverse = invert(Y);
       
-                                       // compute Y^-1 g
-      contract (gradient, Y_inverse, projected_gradient);
+      contract (derivative, Y_inverse, projected_derivative);
 
-      *error_on_this_cell = sqrt(gradient.square());
+      *derivative_norm_on_this_cell
+       = DerivativeDescription::derivative_norm (derivative);
     };
 };
 
@@ -254,9 +417,17 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
 // explicit instantiations
 template
 void 
-GradientEstimator::estimate (const DoFHandler<deal_II_dimension> &dof_handler,
-                            const Vector<double>  &solution,
-                            Vector<float>         &error_per_cell);
+DerivativeApproximation::
+approximate_gradient (const DoFHandler<deal_II_dimension> &dof_handler,
+                     const Vector<double>  &solution,
+                     Vector<float>         &derivative_norm);
+
+template
+void 
+DerivativeApproximation::
+approximate_second_derivative (const DoFHandler<deal_II_dimension> &dof_handler,
+                              const Vector<double>  &solution,
+                              Vector<float>         &derivative_norm);
 
 
 
index ed250e198151904a70fa25a3bac92c70a8a8e8d7..5b6db2d9eba4c675e7b439bc201d7fbb7f8bf478 100644 (file)
@@ -13,6 +13,8 @@
 
 
 #include <base/quadrature_lib.h>
+#include <base/thread_management.h>
+#include <base/multithread_info.h>
 #include <lac/vector.h>
 #include <grid/tria_iterator.h>
 #include <dofs/dof_accessor.h>
 #include <fe/fe_values.h>
 #include <numerics/gradient_estimator.h>
 
-#ifdef DEAL_II_USE_MT
-#  include <base/thread_management.h>
-#  include <base/multithread_info.h>
-#endif
+
+template <typename T>
+static T sqr (const T t)
+{
+  return t*t;
+};
+
+
+
+
+
+template <int dim>
+inline
+typename DerivativeApproximation::Gradient<dim>::ProjectedDerivative
+DerivativeApproximation::Gradient<dim>::
+get_projected_derivative (const FEValues<dim>  &fe_values,
+                         const Vector<double> &solution) 
+{
+  vector<ProjectedDerivative> values (1);
+  fe_values.get_function_values (solution, values);
+  return values[0];
+};
+
+
+
+template <int dim>
+inline
+double
+DerivativeApproximation::Gradient<dim>::derivative_norm (const Derivative &d)
+{
+  double s = 0;
+  for (unsigned int i=0; i<dim; ++i)
+    s += d[i]*d[i];
+  return sqrt(s);
+};
+
+
+
+template <int dim>
+inline
+void
+DerivativeApproximation::Gradient<dim>::symmetrize (Derivative &)
+{
+                                  // nothing to do here
+};
+
+
+
+template <int dim>
+inline
+typename DerivativeApproximation::SecondDerivative<dim>::ProjectedDerivative
+DerivativeApproximation::SecondDerivative<dim>::
+get_projected_derivative (const FEValues<dim>  &fe_values,
+                         const Vector<double> &solution) 
+{
+  vector<ProjectedDerivative> values (1);
+  fe_values.get_function_grads (solution, values);
+  return values[0];
+};
+
+
+
+template <>
+inline
+double
+DerivativeApproximation::SecondDerivative<1>::
+derivative_norm (const Derivative &d)
+{
+  return fabs (d[0][0]);
+};
+
+
+
+template <>
+inline
+double
+DerivativeApproximation::SecondDerivative<2>::
+derivative_norm (const Derivative &d)
+{
+                                  // note that d should be a
+                                  // symmetric 2x2 tensor, so the
+                                  // eigenvalues are:
+                                  //
+                                  // 1/2(a+b\pm\sqrt((a-b)^2+4c^2))
+                                  //
+                                  // if the d_11=a, d_22=b,
+                                  // d_12=d_21=c
+  const double radicand = sqr(d[0][0] - d[1][1]) + 4*sqr(d[0][1]);
+  const double eigenvalues[2]
+    = { 0.5*(d[0][0] + d[1][1] + sqrt(radicand)),
+       0.5*(d[0][0] + d[1][1] - sqrt(radicand))  };
+  
+  return max (fabs (eigenvalues[0]),
+             fabs (eigenvalues[1]));
+};
+
+
+
+template <int dim>
+inline
+void
+DerivativeApproximation::SecondDerivative<dim>::symmetrize (Derivative &d)
+{
+                                  // symmetrize non-diagonal entries
+  for (unsigned int i=0; i<dim; ++i)
+    for (unsigned int j=i+1; j<dim; ++j)
+      {
+       const double s = (d[i][j] + d[j][i]) / 2;
+       d[i][j] = d[j][i] = s;
+      };
+};
+
+
+
+
+template <int dim>
+void 
+DerivativeApproximation::
+approximate_gradient (const DoFHandler<dim> &dof_handler,
+                     const Vector<double>  &solution,
+                     Vector<float>         &derivative_norm)
+{
+  approximate_derivative<Gradient<dim>,dim> (dof_handler,
+                                            solution,
+                                            derivative_norm);
+};
 
 
 
 template <int dim>
 void 
-GradientEstimator::estimate (const DoFHandler<dim> &dof_handler,
-                            const Vector<double>  &solution,
-                            Vector<float>         &error_per_cell)
+DerivativeApproximation::
+approximate_second_derivative (const DoFHandler<dim> &dof_handler,
+                              const Vector<double>  &solution,
+                              Vector<float>         &derivative_norm)
 {
-  Assert (error_per_cell.size() == dof_handler.get_tria().n_active_cells(),
-         ExcInvalidVectorLength (error_per_cell.size(),
+  approximate_derivative<SecondDerivative<dim>,dim> (dof_handler,
+                                                    solution,
+                                                    derivative_norm);
+};
+
+
+
+template <class DerivativeDescription, int dim>
+void 
+DerivativeApproximation::
+approximate_derivative (const DoFHandler<dim> &dof_handler,
+                       const Vector<double>  &solution,
+                       Vector<float>         &derivative_norm)
+{
+  Assert (derivative_norm.size() == dof_handler.get_tria().n_active_cells(),
+         ExcInvalidVectorLength (derivative_norm.size(),
                                  dof_handler.get_tria().n_active_cells()));
   Assert (dof_handler.get_fe().n_components() == 1,
          ExcInternalError());
 
-#ifdef DEAL_II_USE_MT
   const unsigned int n_threads = multithread_info.n_default_threads;
   vector<IndexInterval> index_intervals
     = Threads::split_interval (0, dof_handler.get_tria().n_active_cells(),
@@ -48,32 +186,28 @@ GradientEstimator::estimate (const DoFHandler<dim> &dof_handler,
   Threads::ThreadManager thread_manager;
   for (unsigned int i=0; i<n_threads; ++i)
     Threads::spawn (thread_manager,
-                   Threads::encapsulate (&GradientEstimator::
-                                         template estimate_threaded<dim>)
-                   .collect_args (dof_handler, solution, index_intervals[i],
-                                  error_per_cell));
+                   Threads::encapsulate
+                   (&DerivativeApproximation::
+                    template approximate<DerivativeDescription,dim>)
+                   .collect_args (dof_handler, solution,
+                                  index_intervals[i],
+                                  derivative_norm));
   thread_manager.wait ();
-  
-#else
-  estimate_threaded (dof_handler, solution,
-                    make_pair(0U, dof_handler.get_tria().n_active_cells()),
-                    error_per_cell);
-#endif
 };
 
 
 
-template <int dim>
+template <class DerivativeDescription, int dim>
 void 
-GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
+DerivativeApproximation::approximate (const DoFHandler<dim> &dof_handler,
                                      const Vector<double>  &solution,
                                      const IndexInterval   &index_interval,
-                                     Vector<float>         &error_per_cell)
+                                     Vector<float>         &derivative_norm)
 {
   QMidpoint<dim> midpoint_rule;
   FEValues<dim>  fe_midpoint_value (dof_handler.get_fe(),
                                    midpoint_rule,
-                                   UpdateFlags(update_values |
+                                   UpdateFlags(DerivativeDescription::update_flags |
                                                update_q_points));
   
                                   // matrix Y=sum_i y_i y_i^T
@@ -83,9 +217,10 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
                                   // respective entries in the output
                                   // vector:
   Vector<float>::iterator
-    error_on_this_cell = error_per_cell.begin() + index_interval.first;
+    derivative_norm_on_this_cell
+    = derivative_norm.begin() + index_interval.first;
   
-  DoFHandler<dim>::active_cell_iterator cell, endc;
+  typename DoFHandler<dim>::active_cell_iterator cell, endc;
   cell = endc = dof_handler.begin_active();
                                   // (static_cast to avoid warnings
                                   // about unsigned always >=0)
@@ -96,25 +231,30 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
                                   // active neighbors of a cell
                                   // reserve the maximal number of
                                   // active neighbors
-  vector<DoFHandler<dim>::active_cell_iterator> active_neighbors;
+  vector<typename DoFHandler<dim>::active_cell_iterator> active_neighbors;
   active_neighbors.reserve (GeometryInfo<dim>::faces_per_cell *
                            GeometryInfo<dim>::subfaces_per_face);
 
-  for (; cell!=endc; ++cell, ++error_on_this_cell)
+  for (; cell!=endc; ++cell, ++derivative_norm_on_this_cell)
     {
       Y.clear ();
-                                      // vector g=sum_i y_i (f(x+y_i)-f(x))/|y_i|
-      Tensor<1,dim> projected_gradient;
+                                      // vector
+                                      // g=sum_i y_i (f(x+y_i)-f(x))/|y_i|
+                                      // or related type for higher
+                                      // derivatives
+      typename DerivativeDescription::Derivative projected_derivative;
 
                                       // reinit fe values object...
       fe_midpoint_value.reinit (cell);
 
                                       // ...and get the value of the
-                                      // solution...
-      vector<double> this_midpoint_value(1);
-      fe_midpoint_value.get_function_values (solution, this_midpoint_value);
-                                      // ...and the place where it lives
-      Point<dim> this_center = fe_midpoint_value.quadrature_point(0);
+                                      // projected derivative...
+      const typename DerivativeDescription::ProjectedDerivative
+       this_midpoint_value
+       = DerivativeDescription::get_projected_derivative (fe_midpoint_value,
+                                                   solution);
+                                      // ...and the place where it lives
+      const Point<dim> this_center = fe_midpoint_value.quadrature_point(0);
 
       
                                       // loop over all neighbors and
@@ -133,7 +273,8 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
       for (unsigned int n=0; n<GeometryInfo<dim>::faces_per_cell; ++n)
        if (! cell->at_boundary(n))
          {
-           DoFHandler<dim>::cell_iterator neighbor = cell->neighbor(n);
+           typename DoFHandler<dim>::cell_iterator
+             neighbor = cell->neighbor(n);
            if (neighbor->active())
              active_neighbors.push_back (neighbor);
            else
@@ -157,7 +298,8 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
                                                 // present cell
                if (dim == 1)
                  {
-                   DoFHandler<dim>::cell_iterator neighbor_child = neighbor;
+                   typename DoFHandler<dim>::cell_iterator
+                     neighbor_child = neighbor;
                    while (neighbor_child->has_children())
                      neighbor_child = neighbor_child->child (n==0 ? 1 : 0);
                    
@@ -181,11 +323,11 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
                                       // now loop over all active
                                       // neighbors and collect the
                                       // data we need
-      typename vector<DoFHandler<dim>::active_cell_iterator>::const_iterator
+      typename vector<typename DoFHandler<dim>::active_cell_iterator>::const_iterator
        neighbor_ptr = active_neighbors.begin();
       for (; neighbor_ptr!=active_neighbors.end(); ++neighbor_ptr)
        {
-         const DoFHandler<dim>::active_cell_iterator
+         const typename DoFHandler<dim>::active_cell_iterator
            neighbor = *neighbor_ptr;
            
                                           // reinit fe values object...
@@ -193,10 +335,14 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
          
                                           // ...and get the value of the
                                           // solution...
-         vector<double> neighbor_midpoint_value(1);
-         fe_midpoint_value.get_function_values (solution, this_midpoint_value);
+         const typename DerivativeDescription::ProjectedDerivative
+           neighbor_midpoint_value
+           = DerivativeDescription::get_projected_derivative (fe_midpoint_value,
+                                                       solution);
+         
                                           // ...and the place where it lives
-         Point<dim> neighbor_center = fe_midpoint_value.quadrature_point(0);
+         const Point<dim>
+           neighbor_center = fe_midpoint_value.quadrature_point(0);
          
          
                                           // vector for the
@@ -208,6 +354,13 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
          const double distance = sqrt(y.square());
                                           // normalize y
          y /= distance;
+                                          // *** note that unlike in
+                                          // the docs, y denotes the
+                                          // normalized vector
+                                          // connecting the centers
+                                          // of the two cells, rather
+                                          // than the normal
+                                          // difference! ***
          
                                           // add up the
                                           // contribution of
@@ -216,13 +369,20 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
            for (unsigned int j=0; j<dim; ++j)
              Y[i][j] += y[i] * y[j];
          
-                                          // the update the sum
+                                          // then update the sum
                                           // of difference
                                           // quotients
-         projected_gradient += (neighbor_midpoint_value[0] -
-                                this_midpoint_value[0]) /
-                               distance *
-                               y;
+         typename DerivativeDescription::ProjectedDerivative
+           projected_finite_difference
+           = (neighbor_midpoint_value -
+              this_midpoint_value);
+         projected_finite_difference /= distance;
+         
+         typename DerivativeDescription::Derivative projected_derivative_update;
+         outer_product (projected_derivative_update,
+                        y,
+                        projected_finite_difference);
+         projected_derivative += projected_derivative_update;
        };
 
                                       // can we determine an
@@ -237,14 +397,17 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
       AssertThrow (determinant(Y) != 0,
                   ExcInsufficientDirections());
 
+                                      // first symmetrize g
+      DerivativeDescription::symmetrize (projected_derivative);
+      
                                        // compute Y^-1 g
-      Point<dim> gradient;
+      typename DerivativeDescription::Derivative derivative;
       Tensor<2,dim> Y_inverse = invert(Y);
       
-                                       // compute Y^-1 g
-      contract (gradient, Y_inverse, projected_gradient);
+      contract (derivative, Y_inverse, projected_derivative);
 
-      *error_on_this_cell = sqrt(gradient.square());
+      *derivative_norm_on_this_cell
+       = DerivativeDescription::derivative_norm (derivative);
     };
 };
 
@@ -254,9 +417,17 @@ GradientEstimator::estimate_threaded (const DoFHandler<dim> &dof_handler,
 // explicit instantiations
 template
 void 
-GradientEstimator::estimate (const DoFHandler<deal_II_dimension> &dof_handler,
-                            const Vector<double>  &solution,
-                            Vector<float>         &error_per_cell);
+DerivativeApproximation::
+approximate_gradient (const DoFHandler<deal_II_dimension> &dof_handler,
+                     const Vector<double>  &solution,
+                     Vector<float>         &derivative_norm);
+
+template
+void 
+DerivativeApproximation::
+approximate_second_derivative (const DoFHandler<deal_II_dimension> &dof_handler,
+                              const Vector<double>  &solution,
+                              Vector<float>         &derivative_norm);
 
 
 

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.