]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Classical Gram-Schmidt for block vectors 14605/head
authorPeter Munch <peterrmuench@gmail.com>
Wed, 21 Dec 2022 19:38:58 +0000 (20:38 +0100)
committerPeter Munch <peterrmuench@gmail.com>
Tue, 14 Mar 2023 10:45:50 +0000 (11:45 +0100)
include/deal.II/lac/solver_gmres.h
tests/lac/solver_gmres_01.cc
tests/lac/solver_gmres_01.output

index b38032116deac4e70fbb2edcfd5da45f49a581df..c386a075532167cd4f627ea481224747310fa743 100644 (file)
@@ -24,6 +24,7 @@
 #include <deal.II/base/subscriptor.h>
 #include <deal.II/base/vectorization.h>
 
+#include <deal.II/lac/block_vector_base.h>
 #include <deal.II/lac/full_matrix.h>
 #include <deal.II/lac/householder.h>
 #include <deal.II/lac/lapack_full_matrix.h>
@@ -730,7 +731,109 @@ namespace internal
 {
   namespace SolverGMRESImplementation
   {
-    template <class VectorType>
+    template <typename VectorType, typename Enable = void>
+    struct is_dealii_compatible_distributed_vector;
+
+    template <typename VectorType>
+    struct is_dealii_compatible_distributed_vector<
+      VectorType,
+      typename std::enable_if<!internal::is_block_vector<VectorType>>::type>
+    {
+      static constexpr bool value = std::is_same<
+        VectorType,
+        LinearAlgebra::distributed::Vector<typename VectorType::value_type,
+                                           MemorySpace::Host>>::value;
+    };
+
+
+
+    template <typename VectorType>
+    struct is_dealii_compatible_distributed_vector<
+      VectorType,
+      typename std::enable_if<internal::is_block_vector<VectorType>>::type>
+    {
+      static constexpr bool value = std::is_same<
+        typename VectorType::BlockType,
+        LinearAlgebra::distributed::Vector<typename VectorType::value_type,
+                                           MemorySpace::Host>>::value;
+    };
+
+
+
+    template <typename VectorType,
+              std::enable_if_t<!IsBlockVector<VectorType>::value, VectorType>
+                * = nullptr>
+    unsigned int
+    n_blocks(const VectorType &)
+    {
+      return 1;
+    }
+
+
+
+    template <typename VectorType,
+              std::enable_if_t<IsBlockVector<VectorType>::value, VectorType> * =
+                nullptr>
+    unsigned int
+    n_blocks(const VectorType &vector)
+    {
+      return vector.n_blocks();
+    }
+
+
+
+    template <typename VectorType,
+              std::enable_if_t<!IsBlockVector<VectorType>::value, VectorType>
+                * = nullptr>
+    VectorType &
+    block(VectorType &vector, const unsigned int b)
+    {
+      AssertDimension(b, 0);
+      (void)b;
+      return vector;
+    }
+
+
+
+    template <typename VectorType,
+              std::enable_if_t<!IsBlockVector<VectorType>::value, VectorType>
+                * = nullptr>
+    const VectorType &
+    block(const VectorType &vector, const unsigned int b)
+    {
+      AssertDimension(b, 0);
+      (void)b;
+      return vector;
+    }
+
+
+
+    template <typename VectorType,
+              std::enable_if_t<IsBlockVector<VectorType>::value, VectorType> * =
+                nullptr>
+    typename VectorType::BlockType &
+    block(VectorType &vector, const unsigned int b)
+    {
+      return vector.block(b);
+    }
+
+
+
+    template <typename VectorType,
+              std::enable_if_t<IsBlockVector<VectorType>::value, VectorType> * =
+                nullptr>
+    const typename VectorType::BlockType &
+    block(const VectorType &vector, const unsigned int b)
+    {
+      return vector.block(b);
+    }
+
+
+
+    template <class VectorType,
+              std::enable_if_t<
+                !is_dealii_compatible_distributed_vector<VectorType>::value,
+                VectorType> * = nullptr>
     void
     Tvmult_add(const unsigned int dim,
                const VectorType & vv,
@@ -744,73 +847,83 @@ namespace internal
 
 
 
-    template <class Number>
+    template <class VectorType,
+              std::enable_if_t<
+                is_dealii_compatible_distributed_vector<VectorType>::value,
+                VectorType> * = nullptr>
     void
-    Tvmult_add(
-      const unsigned int                                                   dim,
-      const LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> &vv,
-      const internal::SolverGMRESImplementation::TmpVectors<
-        LinearAlgebra::distributed::Vector<Number, MemorySpace::Host>>
-        &             orthogonal_vectors,
-      Vector<double> &h)
+    Tvmult_add(const unsigned int dim,
+               const VectorType & vv,
+               const internal::SolverGMRESImplementation::TmpVectors<VectorType>
+                 &             orthogonal_vectors,
+               Vector<double> &h)
     {
-      unsigned int j = 0;
-
-      if (dim <= 128)
+      for (unsigned int b = 0; b < n_blocks(vv); ++b)
         {
-          // optimized path
-          static constexpr unsigned int n_lanes =
-            VectorizedArray<double>::size();
+          unsigned int j = 0;
 
-          VectorizedArray<double> hs[128];
-          for (unsigned int d = 0; d < dim; ++d)
-            hs[d] = 0.0;
+          if (dim <= 128)
+            {
+              // optimized path
+              static constexpr unsigned int n_lanes =
+                VectorizedArray<double>::size();
 
-          unsigned int c = 0;
+              VectorizedArray<double> hs[128];
+              for (unsigned int d = 0; d < dim; ++d)
+                hs[d] = 0.0;
 
-          for (; c < vv.locally_owned_size() / n_lanes / 4;
-               ++c, j += n_lanes * 4)
-            for (unsigned int i = 0; i < dim; ++i)
-              {
-                VectorizedArray<double> vvec[4];
-                for (unsigned int k = 0; k < 4; ++k)
-                  vvec[k].load(vv.begin() + j + k * n_lanes);
+              unsigned int c = 0;
 
-                for (unsigned int k = 0; k < 4; ++k)
+              for (; c < block(vv, b).locally_owned_size() / n_lanes / 4;
+                   ++c, j += n_lanes * 4)
+                for (unsigned int i = 0; i < dim; ++i)
                   {
-                    VectorizedArray<double> temp;
-                    temp.load(orthogonal_vectors[i].begin() + j + k * n_lanes);
-                    hs[i] += temp * vvec[k];
+                    VectorizedArray<double> vvec[4];
+                    for (unsigned int k = 0; k < 4; ++k)
+                      vvec[k].load(block(vv, b).begin() + j + k * n_lanes);
+
+                    for (unsigned int k = 0; k < 4; ++k)
+                      {
+                        VectorizedArray<double> temp;
+                        temp.load(block(orthogonal_vectors[i], b).begin() + j +
+                                  k * n_lanes);
+                        hs[i] += temp * vvec[k];
+                      }
                   }
-              }
 
-          c *= 4;
-          for (; c < vv.locally_owned_size() / n_lanes; ++c, j += n_lanes)
-            for (unsigned int i = 0; i < dim; ++i)
-              {
-                VectorizedArray<double> vvec, temp;
-                vvec.load(vv.begin() + j);
-                temp.load(orthogonal_vectors[i].begin() + j);
-                hs[i] += temp * vvec;
-              }
+              c *= 4;
+              for (; c < block(vv, b).locally_owned_size() / n_lanes;
+                   ++c, j += n_lanes)
+                for (unsigned int i = 0; i < dim; ++i)
+                  {
+                    VectorizedArray<double> vvec, temp;
+                    vvec.load(block(vv, b).begin() + j);
+                    temp.load(block(orthogonal_vectors[i], b).begin() + j);
+                    hs[i] += temp * vvec;
+                  }
 
-          for (unsigned int i = 0; i < dim; ++i)
-            for (unsigned int v = 0; v < n_lanes; ++v)
-              h(i) += hs[i][v];
-        }
+              for (unsigned int i = 0; i < dim; ++i)
+                for (unsigned int v = 0; v < n_lanes; ++v)
+                  h(i) += hs[i][v];
+            }
 
-      // remainder loop of optimized path or non-optimized path (if
-      // dim>128)
-      for (; j < vv.locally_owned_size(); ++j)
-        for (unsigned int i = 0; i < dim; ++i)
-          h(i) += orthogonal_vectors[i].local_element(j) * vv.local_element(j);
+          // remainder loop of optimized path or non-optimized path (if
+          // dim>128)
+          for (; j < block(vv, b).locally_owned_size(); ++j)
+            for (unsigned int i = 0; i < dim; ++i)
+              h(i) += block(orthogonal_vectors[i], b).local_element(j) *
+                      block(vv, b).local_element(j);
+        }
 
       Utilities::MPI::sum(h, MPI_COMM_WORLD, h);
     }
 
 
 
-    template <class VectorType>
+    template <class VectorType,
+              std::enable_if_t<
+                !is_dealii_compatible_distributed_vector<VectorType>::value,
+                VectorType> * = nullptr>
     double
     substract_and_norm(
       const unsigned int dim,
@@ -829,84 +942,97 @@ namespace internal
 
 
 
-    template <class Number>
+    template <class VectorType,
+              std::enable_if_t<
+                is_dealii_compatible_distributed_vector<VectorType>::value,
+                VectorType> * = nullptr>
     double
     substract_and_norm(
       const unsigned int dim,
-      const internal::SolverGMRESImplementation::TmpVectors<
-        LinearAlgebra::distributed::Vector<Number, MemorySpace::Host>>
+      const internal::SolverGMRESImplementation::TmpVectors<VectorType>
         &                   orthogonal_vectors,
       const Vector<double> &h,
-      LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> &vv)
+      VectorType &          vv)
     {
       static constexpr unsigned int n_lanes = VectorizedArray<double>::size();
 
-      double                  norm_vv_temp            = 0.0;
-      VectorizedArray<double> norm_vv_temp_vectorized = 0.0;
+      double norm_vv_temp = 0.0;
 
-      unsigned int j = 0;
-      unsigned int c = 0;
-      for (; c < vv.locally_owned_size() / n_lanes / 4; ++c, j += n_lanes * 4)
+      for (unsigned int b = 0; b < n_blocks(vv); ++b)
         {
-          VectorizedArray<double> temp[4];
+          VectorizedArray<double> norm_vv_temp_vectorized = 0.0;
 
-          for (unsigned int k = 0; k < 4; ++k)
-            temp[k].load(vv.begin() + j + k * n_lanes);
-
-          for (unsigned int i = 0; i < dim; ++i)
+          unsigned int j = 0;
+          unsigned int c = 0;
+          for (; c < block(vv, b).locally_owned_size() / n_lanes / 4;
+               ++c, j += n_lanes * 4)
             {
-              const double factor = h(i);
+              VectorizedArray<double> temp[4];
+
               for (unsigned int k = 0; k < 4; ++k)
+                temp[k].load(block(vv, b).begin() + j + k * n_lanes);
+
+              for (unsigned int i = 0; i < dim; ++i)
                 {
-                  VectorizedArray<double> vec;
-                  vec.load(orthogonal_vectors[i].begin() + j + k * n_lanes);
-                  temp[k] -= factor * vec;
+                  const double factor = h(i);
+                  for (unsigned int k = 0; k < 4; ++k)
+                    {
+                      VectorizedArray<double> vec;
+                      vec.load(block(orthogonal_vectors[i], b).begin() + j +
+                               k * n_lanes);
+                      temp[k] -= factor * vec;
+                    }
                 }
-            }
-
-          for (unsigned int k = 0; k < 4; ++k)
-            temp[k].store(vv.begin() + j + k * n_lanes);
 
-          norm_vv_temp_vectorized += (temp[0] * temp[0] + temp[1] * temp[1]) +
-                                     (temp[2] * temp[2] + temp[3] * temp[3]);
-        }
+              for (unsigned int k = 0; k < 4; ++k)
+                temp[k].store(block(vv, b).begin() + j + k * n_lanes);
 
-      c *= 4;
-      for (; c < vv.locally_owned_size() / n_lanes; ++c, j += n_lanes)
-        {
-          VectorizedArray<double> temp;
-          temp.load(vv.begin() + j);
+              norm_vv_temp_vectorized +=
+                (temp[0] * temp[0] + temp[1] * temp[1]) +
+                (temp[2] * temp[2] + temp[3] * temp[3]);
+            }
 
-          for (unsigned int i = 0; i < dim; ++i)
+          c *= 4;
+          for (; c < block(vv, b).locally_owned_size() / n_lanes;
+               ++c, j += n_lanes)
             {
-              VectorizedArray<double> vec;
-              vec.load(orthogonal_vectors[i].begin() + j);
-              temp -= h(i) * vec;
-            }
+              VectorizedArray<double> temp;
+              temp.load(block(vv, b).begin() + j);
 
-          temp.store(vv.begin() + j);
+              for (unsigned int i = 0; i < dim; ++i)
+                {
+                  VectorizedArray<double> vec;
+                  vec.load(block(orthogonal_vectors[i], b).begin() + j);
+                  temp -= h(i) * vec;
+                }
 
-          norm_vv_temp_vectorized += temp * temp;
-        }
+              temp.store(block(vv, b).begin() + j);
+
+              norm_vv_temp_vectorized += temp * temp;
+            }
 
-      for (unsigned int v = 0; v < n_lanes; ++v)
-        norm_vv_temp += norm_vv_temp_vectorized[v];
+          for (unsigned int v = 0; v < n_lanes; ++v)
+            norm_vv_temp += norm_vv_temp_vectorized[v];
 
-      for (; j < vv.locally_owned_size(); ++j)
-        {
-          double temp = vv.local_element(j);
-          for (unsigned int i = 0; i < dim; ++i)
-            temp -= h(i) * orthogonal_vectors[i].local_element(j);
-          vv.local_element(j) = temp;
+          for (; j < block(vv, b).locally_owned_size(); ++j)
+            {
+              double temp = block(vv, b).local_element(j);
+              for (unsigned int i = 0; i < dim; ++i)
+                temp -= h(i) * block(orthogonal_vectors[i], b).local_element(j);
+              block(vv, b).local_element(j) = temp;
 
-          norm_vv_temp += temp * temp;
+              norm_vv_temp += temp * temp;
+            }
         }
 
       return std::sqrt(Utilities::MPI::sum(norm_vv_temp, MPI_COMM_WORLD));
     }
 
 
-    template <class VectorType>
+    template <class VectorType,
+              std::enable_if_t<
+                !is_dealii_compatible_distributed_vector<VectorType>::value,
+                VectorType> * = nullptr>
     double
     sadd_and_norm(VectorType &      v,
                   const double      factor_a,
@@ -918,32 +1044,38 @@ namespace internal
     }
 
 
-    template <class Number>
+    template <class VectorType,
+              std::enable_if_t<
+                is_dealii_compatible_distributed_vector<VectorType>::value,
+                VectorType> * = nullptr>
     double
-    sadd_and_norm(
-      LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> &v,
-      const double                                                   factor_a,
-      const LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> &b,
-      const double factor_b)
+    sadd_and_norm(VectorType &      v,
+                  const double      factor_a,
+                  const VectorType &w,
+                  const double      factor_b)
     {
       double norm = 0;
 
-      for (unsigned int j = 0; j < v.locally_owned_size(); ++j)
-        {
-          const double temp =
-            v.local_element(j) * factor_a + b.local_element(j) * factor_b;
+      for (unsigned int b = 0; b < n_blocks(v); ++b)
+        for (unsigned int j = 0; j < block(v, b).locally_owned_size(); ++j)
+          {
+            const double temp = block(v, b).local_element(j) * factor_a +
+                                block(w, b).local_element(j) * factor_b;
 
-          v.local_element(j) = temp;
+            block(v, b).local_element(j) = temp;
 
-          norm += temp * temp;
-        }
+            norm += temp * temp;
+          }
 
       return std::sqrt(Utilities::MPI::sum(norm, MPI_COMM_WORLD));
     }
 
 
 
-    template <class VectorType>
+    template <class VectorType,
+              std::enable_if_t<
+                !is_dealii_compatible_distributed_vector<VectorType>::value,
+                VectorType> * = nullptr>
     void
     add(VectorType &          p,
         const unsigned int    dim,
@@ -963,23 +1095,26 @@ namespace internal
 
 
 
-    template <class Number>
+    template <class VectorType,
+              std::enable_if_t<
+                is_dealii_compatible_distributed_vector<VectorType>::value,
+                VectorType> * = nullptr>
     void
-    add(LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> &p,
-        const unsigned int                                             dim,
-        const Vector<double> &                                         h,
-        const internal::SolverGMRESImplementation::TmpVectors<
-          LinearAlgebra::distributed::Vector<Number, MemorySpace::Host>>
+    add(VectorType &          p,
+        const unsigned int    dim,
+        const Vector<double> &h,
+        const internal::SolverGMRESImplementation::TmpVectors<VectorType>
           &        tmp_vectors,
         const bool zero_out)
     {
-      for (unsigned int j = 0; j < p.locally_owned_size(); ++j)
-        {
-          double temp = zero_out ? 0 : p.local_element(j);
-          for (unsigned int i = 0; i < dim; ++i)
-            temp += tmp_vectors[i].local_element(j) * h(i);
-          p.local_element(j) = temp;
-        }
+      for (unsigned int b = 0; b < n_blocks(p); ++b)
+        for (unsigned int j = 0; j < block(p, b).locally_owned_size(); ++j)
+          {
+            double temp = zero_out ? 0 : block(p, b).local_element(j);
+            for (unsigned int i = 0; i < dim; ++i)
+              temp += block(tmp_vectors[i], b).local_element(j) * h(i);
+            block(p, b).local_element(j) = temp;
+          }
     }
 
 
index 586982087dfc4a1177a0b99d1815a87a1fd8f575..fb7ccad9662a2a7c9a9c131d57492a2e6926a886 100644 (file)
@@ -18,6 +18,7 @@
 
 
 #include <deal.II/lac/diagonal_matrix.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
 #include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/precondition.h>
 #include <deal.II/lac/solver_gmres.h>
@@ -39,6 +40,21 @@ struct MyDiagonalMatrix
     dst.scale(diagonal);
   }
 
+  void
+  vmult(LinearAlgebra::distributed::BlockVector<double> &      dst,
+        const LinearAlgebra::distributed::BlockVector<double> &src) const
+  {
+    dst = src;
+    for (unsigned int b = 0; b < src.n_blocks(); ++b)
+      dst.block(0).scale(diagonal);
+  }
+
+  unsigned int
+  m() const
+  {
+    return diagonal.size();
+  }
+
   const LinearAlgebra::distributed::Vector<double> &diagonal;
 };
 
@@ -55,38 +71,75 @@ monitor_norm(const unsigned int iteration,
 }
 
 
+
+SolverControl::State
+monitor_norm_block(const unsigned int iteration,
+                   const double       check_value,
+                   const LinearAlgebra::distributed::BlockVector<double> &)
+{
+  deallog << "   estimated residual at iteration " << iteration << ": "
+          << check_value << std::endl;
+  return SolverControl::success;
+}
+
+
 int
 main()
 {
   initlog();
 
   // Create diagonal matrix with entries between 1 and 30
-  DiagonalMatrix<LinearAlgebra::distributed::Vector<double>> unit_matrix;
-  unit_matrix.get_vector().reinit(30);
-  unit_matrix.get_vector() = 1.0;
+  LinearAlgebra::distributed::Vector<double> matrix_entries_(30);
+  matrix_entries_ = 1.0;
+  MyDiagonalMatrix unit_matrix(matrix_entries_);
 
   LinearAlgebra::distributed::Vector<double> matrix_entries(unit_matrix.m());
   for (unsigned int i = 0; i < unit_matrix.m(); ++i)
     matrix_entries(i) = i + 1;
   MyDiagonalMatrix matrix(matrix_entries);
 
-  LinearAlgebra::distributed::Vector<double> rhs(unit_matrix.m()),
-    sol(unit_matrix.m());
+  LinearAlgebra::distributed::Vector<double> rhs(unit_matrix.m());
+  LinearAlgebra::distributed::Vector<double> sol(unit_matrix.m());
   rhs = 1.;
 
-  deallog << "Solve with PreconditionIdentity: " << std::endl;
-  SolverControl control(40, 1e-4);
-  SolverGMRES<LinearAlgebra::distributed::Vector<double>>::AdditionalData data3(
-    8);
-  data3.orthogonalization_strategy =
-    SolverGMRES<LinearAlgebra::distributed::Vector<double>>::AdditionalData::
-      OrthogonalizationStrategy::classical_gram_schmidt;
-  SolverGMRES<LinearAlgebra::distributed::Vector<double>> solver(control,
-                                                                 data3);
-  solver.connect(&monitor_norm);
-  solver.solve(matrix, sol, rhs, PreconditionIdentity());
-
-  deallog << "Solve with diagonal preconditioner: " << std::endl;
-  sol = 0;
-  solver.solve(matrix, sol, rhs, unit_matrix);
+  {
+    deallog << "Solve with PreconditionIdentity: " << std::endl;
+    SolverControl control(40, 1e-4);
+    SolverGMRES<LinearAlgebra::distributed::Vector<double>>::AdditionalData
+      data3(8);
+    data3.orthogonalization_strategy =
+      SolverGMRES<LinearAlgebra::distributed::Vector<double>>::AdditionalData::
+        OrthogonalizationStrategy::classical_gram_schmidt;
+    SolverGMRES<LinearAlgebra::distributed::Vector<double>> solver(control,
+                                                                   data3);
+    solver.connect(&monitor_norm);
+    solver.solve(matrix, sol, rhs, PreconditionIdentity());
+
+    deallog << "Solve with diagonal preconditioner: " << std::endl;
+    sol = 0;
+    solver.solve(matrix, sol, rhs, unit_matrix);
+  }
+
+  {
+    LinearAlgebra::distributed::BlockVector<double> rhs_(1);
+    LinearAlgebra::distributed::BlockVector<double> sol_(1);
+    rhs_.block(0) = rhs;
+    sol_.block(0) = sol;
+
+    deallog << "Solve with PreconditionIdentity: " << std::endl;
+    SolverControl control(40, 1e-4);
+    SolverGMRES<LinearAlgebra::distributed::BlockVector<double>>::AdditionalData
+      data3(8);
+    data3.orthogonalization_strategy =
+      SolverGMRES<LinearAlgebra::distributed::BlockVector<double>>::
+        AdditionalData::OrthogonalizationStrategy::classical_gram_schmidt;
+    SolverGMRES<LinearAlgebra::distributed::BlockVector<double>> solver(control,
+                                                                        data3);
+    solver.connect(&monitor_norm_block);
+    solver.solve(matrix, sol_, rhs_, PreconditionIdentity());
+
+    deallog << "Solve with diagonal preconditioner: " << std::endl;
+    sol_ = 0;
+    solver.solve(matrix, sol_, rhs_, unit_matrix);
+  }
 }
index fcf658979375bb1d1c391b7b3d2fb566b38900c4..2d729f866d554628edbcbf72f8279148b3e0e3a1 100644 (file)
@@ -87,3 +87,51 @@ DEAL:GMRES::   estimated residual at iteration 33: 0.000183614
 DEAL:GMRES::   estimated residual at iteration 34: 0.000129847
 DEAL:GMRES::Convergence step 35 value 9.20930e-05
 DEAL:GMRES::   estimated residual at iteration 35: 9.20930e-05
+DEAL::Solve with PreconditionIdentity: 
+DEAL:GMRES::Starting value 9.20930e-05
+DEAL:GMRES::Convergence step 0 value 9.20930e-05
+DEAL:GMRES::   estimated residual at iteration 0: 9.20930e-05
+DEAL::Solve with diagonal preconditioner: 
+DEAL:GMRES::Starting value 5.47723
+DEAL:GMRES::   estimated residual at iteration 0: 5.47723
+DEAL:GMRES::   estimated residual at iteration 1: 2.67042
+DEAL:GMRES::   estimated residual at iteration 2: 1.70538
+DEAL:GMRES::   estimated residual at iteration 3: 1.20190
+DEAL:GMRES::   estimated residual at iteration 4: 0.884567
+DEAL:GMRES::   estimated residual at iteration 5: 0.662215
+DEAL:GMRES::   estimated residual at iteration 6: 0.496462
+DEAL:GMRES::   estimated residual at iteration 6: 0.496462
+DEAL:GMRES::   estimated residual at iteration 7: 0.419909
+DEAL:GMRES::   estimated residual at iteration 8: 0.339229
+DEAL:GMRES::   estimated residual at iteration 9: 0.260767
+DEAL:GMRES::   estimated residual at iteration 10: 0.189115
+DEAL:GMRES::   estimated residual at iteration 11: 0.126467
+DEAL:GMRES::   estimated residual at iteration 12: 0.0736025
+DEAL:GMRES::   estimated residual at iteration 12: 0.0736025
+DEAL:GMRES::   estimated residual at iteration 13: 0.0505275
+DEAL:GMRES::   estimated residual at iteration 14: 0.0360998
+DEAL:GMRES::   estimated residual at iteration 15: 0.0253697
+DEAL:GMRES::   estimated residual at iteration 16: 0.0184200
+DEAL:GMRES::   estimated residual at iteration 17: 0.0142333
+DEAL:GMRES::   estimated residual at iteration 18: 0.0116662
+DEAL:GMRES::   estimated residual at iteration 18: 0.0116662
+DEAL:GMRES::   estimated residual at iteration 19: 0.00989870
+DEAL:GMRES::   estimated residual at iteration 20: 0.00800703
+DEAL:GMRES::   estimated residual at iteration 21: 0.00604107
+DEAL:GMRES::   estimated residual at iteration 22: 0.00431736
+DEAL:GMRES::   estimated residual at iteration 23: 0.00304828
+DEAL:GMRES::   estimated residual at iteration 24: 0.00203679
+DEAL:GMRES::   estimated residual at iteration 24: 0.00203679
+DEAL:GMRES::   estimated residual at iteration 25: 0.00141669
+DEAL:GMRES::   estimated residual at iteration 26: 0.00101755
+DEAL:GMRES::   estimated residual at iteration 27: 0.000724887
+DEAL:GMRES::   estimated residual at iteration 28: 0.000535249
+DEAL:GMRES::   estimated residual at iteration 29: 0.000423665
+DEAL:GMRES::   estimated residual at iteration 30: 0.000358414
+DEAL:GMRES::   estimated residual at iteration 30: 0.000358414
+DEAL:GMRES::   estimated residual at iteration 31: 0.000307521
+DEAL:GMRES::   estimated residual at iteration 32: 0.000247031
+DEAL:GMRES::   estimated residual at iteration 33: 0.000183614
+DEAL:GMRES::   estimated residual at iteration 34: 0.000129847
+DEAL:GMRES::Convergence step 35 value 9.20930e-05
+DEAL:GMRES::   estimated residual at iteration 35: 9.20930e-05

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.