]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Add norm() and norm_square()
authorWolfgang Bangerth <bangerth@math.tamu.edu>
Wed, 6 Apr 2005 15:24:33 +0000 (15:24 +0000)
committerWolfgang Bangerth <bangerth@math.tamu.edu>
Wed, 6 Apr 2005 15:24:33 +0000 (15:24 +0000)
git-svn-id: https://svn.dealii.org/trunk@10388 0785d39b-7218-0410-832d-ea1e28bc413d

deal.II/base/include/base/tensor.h
deal.II/base/include/base/tensor_base.h
deal.II/doc/news/changes.html

index 05898ca5ccef098b4e58d725ba200beb2808e44f..9e1fca1fc6951bbeb3ca16c87f110b859004d2a3 100644 (file)
@@ -157,6 +157,27 @@ class Tensor
                                      * entries of a tensor.
                                      */
     Tensor<rank_,dim>   operator - () const;
+
+                                     /**
+                                      * Return the Frobenius-norm of a tensor,
+                                      * i.e. the square root of the sum of
+                                      * squares of all entries.
+                                      */
+    double norm () const;
+
+                                     /**
+                                      * Return the square of the
+                                      * Frobenius-norm of a tensor,
+                                      * i.e. the square root of the
+                                      * sum of squares of all entries.
+                                     *
+                                     * This function mainly exists
+                                     * because it makes computing the
+                                     * norm simpler recursively, but
+                                     * may also be useful in other
+                                     * contexts.
+                                      */
+    double norm_square () const;
     
                                     /**
                                      * Fill a vector with all tensor elements.
@@ -167,7 +188,7 @@ class Tensor
                                      * usual in C++, the rightmost
                                      * index of the tensor marches fastest.
                                      */
-    void unroll(Vector<double> & result) const;
+    void unroll (Vector<double> & result) const;
 
 
                                     /**
@@ -394,6 +415,29 @@ Tensor<rank_,dim>::operator - () const
 }
 
 
+
+template <int rank_, int dim>
+inline
+double Tensor<rank_,dim>::norm () const
+{
+  return std::sqrt (norm_square());
+}
+
+
+
+template <int rank_, int dim>
+inline
+double Tensor<rank_,dim>::norm_square () const
+{
+  double s = 0;
+  for (unsigned int i=0; i<dim; ++i)
+    s += subtensor[i].norm_square();
+
+  return s;
+}
+
+
+
 template <int rank_, int dim>
 inline
 void Tensor<rank_,dim>::clear ()
index b66ce4be3e6b2b77605d509d5bb2f5ea0033c826..fe2078e8c3010235472b933b8e26135f0f053d50 100644 (file)
@@ -220,6 +220,32 @@ class Tensor<1,dim>
                                      */
     Tensor<1,dim>   operator - () const;
     
+                                     /**
+                                      * Return the Frobenius-norm of a
+                                      * tensor, i.e. the square root
+                                      * of the sum of squares of all
+                                      * entries. For the present case
+                                      * of rank-1 tensors, this equals
+                                      * the usual
+                                      * <tt>l<sub>2</sub></tt> norm of
+                                      * the vector.
+                                      */
+    double norm () const;
+
+                                     /**
+                                      * Return the square of the
+                                      * Frobenius-norm of a tensor,
+                                      * i.e. the square root of the
+                                      * sum of squares of all entries.
+                                     *
+                                     * This function mainly exists
+                                     * because it makes computing the
+                                     * norm simpler recursively, but
+                                     * may also be useful in other
+                                     * contexts.
+                                      */
+    double norm_square () const;
+
                                     /**
                                      * Reset all values to zero.
                                      *
@@ -637,6 +663,28 @@ Tensor<1,dim> Tensor<1,dim>::operator - () const
 
 
 
+template <int dim>
+inline
+double Tensor<1,dim>::norm () const
+{
+  return std::sqrt (norm_square());
+}
+
+
+
+template <int dim>
+inline
+double Tensor<1,dim>::norm_square () const
+{
+  double s = 0;
+  for (unsigned int i=0; i<dim; ++i)
+    s += values[i] * values[i];
+
+  return s;
+}
+
+
+
 template <int dim>
 inline
 void Tensor<1,dim>::clear ()
index 9ef311e67f370d7a8259e3bd628ed6e207b61101..edb0f4d40abb5effdd68c05823883133abd30140 100644 (file)
@@ -159,6 +159,14 @@ inconvenience this causes.
 <h3>base</h3>
 
 <ol>  
+  <li> <p>
+       New: The <code class="class">Tensor</code> classes now
+       have member functions that compute the Frobenius norm and its
+       square. 
+       <br>
+       (WB, 2005/04/06)
+       </p>
+
   <li> <p>
        New: The <code class="member">DataOutBase</code> class now
        allows to write data in a new intermediate format that

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.