]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Added support for PETSc in MGTransferPrebuilt, internal::MatrixSelector (mg_transfer... 6106/head
authorAlexander Knieps <alexanderrobom@web.de>
Sun, 4 Mar 2018 19:52:34 +0000 (20:52 +0100)
committerAlexander Knieps <alexanderrobom@web.de>
Mon, 21 May 2018 16:19:29 +0000 (18:19 +0200)
cmake/config/template-arguments.in
doc/news/changes/minor/20180521AlexanderKnieps [new file with mode: 0644]
include/deal.II/multigrid/mg_transfer.h
include/deal.II/multigrid/mg_transfer.templates.h
source/lac/petsc_parallel_sparse_matrix.cc
source/multigrid/mg_level_global_transfer.inst.in
source/multigrid/mg_transfer_prebuilt.cc
tests/multigrid/transfer_04a.cc
tests/multigrid/transfer_04b.cc [new file with mode: 0644]
tests/multigrid/transfer_04b.with_trilinos=true.mpirun=1.debug.output [new file with mode: 0644]
tests/multigrid/transfer_04b.with_trilinos=true.mpirun=2.debug.output [new file with mode: 0644]

index fc9466d78d37a092098f1a072333707fd6ca513c..a41532a2962173a2d011b7722d3cfe9fd28804aa 100644 (file)
@@ -168,6 +168,7 @@ VECTORS_WITH_MATRIX := { Vector<double>;
 
                          @DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
                          @DEAL_II_EXPAND_EPETRA_VECTOR@;
+                         @DEAL_II_EXPAND_PETSC_MPI_VECTOR@;
                        }
 
 // Matrices
diff --git a/doc/news/changes/minor/20180521AlexanderKnieps b/doc/news/changes/minor/20180521AlexanderKnieps
new file mode 100644 (file)
index 0000000..be296b5
--- /dev/null
@@ -0,0 +1,5 @@
+Improved: MGTransferPrebuilt now supports
+PETScWrappers::MPI::Vector and
+PETScWrappers::MPI::SparseMatrix.
+<br>
+(Alexander Knieps, 2018/05/21)
index 2608dedbf805bf6373226b9a5d0ed2f2bf04512a..4067f4716bc3cd4d749011dc3b454e6a5feefd87 100644 (file)
@@ -24,6 +24,8 @@
 #include <deal.II/lac/block_sparsity_pattern.h>
 #include <deal.II/lac/trilinos_sparse_matrix.h>
 #include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
 
 #include <deal.II/lac/vector_memory.h>
 
@@ -47,6 +49,8 @@ namespace internal
     typedef ::dealii::SparsityPattern Sparsity;
     typedef ::dealii::SparseMatrix<typename VectorType::value_type> Matrix;
 
+    static const bool requires_distributed_sparsity_pattern = false;
+
     template <typename SparsityPatternType, typename DoFHandlerType>
     static void reinit(Matrix &matrix, Sparsity &sparsity, int level, const SparsityPatternType &sp, const DoFHandlerType &)
     {
@@ -63,6 +67,8 @@ namespace internal
     typedef ::dealii::TrilinosWrappers::SparsityPattern Sparsity;
     typedef ::dealii::TrilinosWrappers::SparseMatrix Matrix;
 
+    static const bool requires_distributed_sparsity_pattern = false;
+
     template <typename SparsityPatternType, typename DoFHandlerType>
     static void reinit(Matrix &matrix, Sparsity &, int level, const SparsityPatternType &sp, DoFHandlerType &dh)
     {
@@ -86,6 +92,8 @@ namespace internal
     typedef ::dealii::TrilinosWrappers::SparsityPattern Sparsity;
     typedef ::dealii::TrilinosWrappers::SparseMatrix Matrix;
 
+    static const bool requires_distributed_sparsity_pattern = false;
+
     template <typename SparsityPatternType, typename DoFHandlerType>
     static void reinit(Matrix &matrix, Sparsity &, int level, const SparsityPatternType &sp, DoFHandlerType &dh)
     {
@@ -109,6 +117,8 @@ namespace internal
     typedef ::dealii::TrilinosWrappers::SparsityPattern Sparsity;
     typedef ::dealii::TrilinosWrappers::SparseMatrix Matrix;
 
+    static const bool requires_distributed_sparsity_pattern = false;
+
     template <typename SparsityPatternType, typename DoFHandlerType>
     static void reinit(Matrix &matrix, Sparsity &, int level, const SparsityPatternType &sp, DoFHandlerType &dh)
     {
@@ -133,6 +143,8 @@ namespace internal
     typedef ::dealii::SparsityPattern Sparsity;
     typedef ::dealii::SparseMatrix<Number> Matrix;
 
+    static const bool requires_distributed_sparsity_pattern = false;
+
     template <typename SparsityPatternType, typename DoFHandlerType>
     static void reinit(Matrix &, Sparsity &, int, const SparsityPatternType &, const DoFHandlerType &)
     {
@@ -143,6 +155,33 @@ namespace internal
   };
 
 #endif
+
+#ifdef DEAL_II_WITH_PETSC
+  template <>
+  struct MatrixSelector<dealii::PETScWrappers::MPI::Vector>
+  {
+    typedef ::dealii::DynamicSparsityPattern Sparsity;
+    typedef ::dealii::PETScWrappers::MPI::SparseMatrix Matrix;
+
+    static const bool requires_distributed_sparsity_pattern = true;
+
+    template <typename SparsityPatternType, typename DoFHandlerType>
+    static void reinit(Matrix &matrix, Sparsity &, int level, const SparsityPatternType &sp, const DoFHandlerType &dh)
+    {
+      const parallel::Triangulation<DoFHandlerType::dimension,DoFHandlerType::space_dimension> *dist_tria =
+        dynamic_cast<const parallel::Triangulation<DoFHandlerType::dimension,DoFHandlerType::space_dimension>*>
+        (&(dh.get_triangulation()));
+      MPI_Comm communicator = dist_tria != nullptr ?
+                              dist_tria->get_communicator() :
+                              MPI_COMM_SELF;
+      // Reinit PETSc matrix
+      matrix.reinit(dh.locally_owned_mg_dofs(level+1),
+                    dh.locally_owned_mg_dofs(level),
+                    sp, communicator);
+    }
+
+  };
+#endif
 }
 
 /*
index 49c72ebc4133dd4162aa9746e9e69b9b17448100..1130d794b9383fb613b2c3e41ff56fadf1477d67 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/trilinos_epetra_vector.h>
+#include <deal.II/lac/petsc_parallel_vector.h>
 #include <deal.II/lac/sparse_matrix.h>
 #include <deal.II/lac/vector_view.h>
 #include <deal.II/grid/tria_iterator.h>
@@ -152,6 +153,31 @@ namespace
       }
   }
 #endif
+
+#ifdef DEAL_II_WITH_PETSC
+  /**
+   * Adjust vectors on all levels to correct size.  Here, we just count the
+   * numbers of degrees of freedom on each level and @p reinit each level
+   * vector to this length.
+   */
+  template <int dim, int spacedim>
+  void
+  reinit_vector (const dealii::DoFHandler<dim,spacedim> &mg_dof,
+                 const std::vector<unsigned int> &,
+                 MGLevelObject<PETScWrappers::MPI::Vector> &v)
+  {
+    const dealii::parallel::Triangulation<dim,spacedim> *tria =
+      (dynamic_cast<const parallel::Triangulation<dim,spacedim>*>
+       (&mg_dof.get_triangulation()));
+    AssertThrow(tria!=nullptr, ExcMessage("multigrid with parallel PETSc vectors only works with a parallel Triangulation!"));
+
+    for (unsigned int level=v.min_level();
+         level<=v.max_level(); ++level)
+      {
+        v[level].reinit(mg_dof.locally_owned_mg_dofs(level), tria->get_communicator());
+      }
+  }
+#endif
 }
 
 
index 08ac98f6dfe3670fc3e3bb2242f3bca52e3c7ad9..85f34f07e47536c16c10aea897dbac41ee1dafa0 100644 (file)
@@ -382,7 +382,6 @@ namespace PETScWrappers
       //if (preset_nonzero_locations == true)
       if (local_rows.n_elements()>0)
         {
-          Assert(local_columns.n_elements()>0, ExcInternalError());
           // MatMPIAIJSetPreallocationCSR
           // can be used to allocate the sparsity
           // pattern of a matrix
index d9adbd8cee5438fcd8242a6d7fdc2e63f26921e4..d67f14254b7c914867c787576a8a4f2e23bb6bbe 100644 (file)
@@ -81,4 +81,16 @@ for(deal_II_dimension : DIMENSIONS)
     MGLevelGlobalTransfer<TrilinosWrappers::MPI::Vector>::copy_from_mg_add (const DoFHandler<deal_II_dimension>&, TrilinosWrappers::MPI::Vector&,
             const MGLevelObject<TrilinosWrappers::MPI::Vector>&) const;
 #endif
+
+#ifdef DEAL_II_WITH_PETSC
+    template void
+    MGLevelGlobalTransfer<PETScWrappers::MPI::Vector>::copy_to_mg (
+        const DoFHandler<deal_II_dimension>&, MGLevelObject<PETScWrappers::MPI::Vector>&, const PETScWrappers::MPI::Vector&) const;
+    template void
+    MGLevelGlobalTransfer<PETScWrappers::MPI::Vector>::copy_from_mg (const DoFHandler<deal_II_dimension>&, PETScWrappers::MPI::Vector&,
+            const MGLevelObject<PETScWrappers::MPI::Vector>&) const;
+    template void
+    MGLevelGlobalTransfer<PETScWrappers::MPI::Vector>::copy_from_mg_add (const DoFHandler<deal_II_dimension>&, PETScWrappers::MPI::Vector&,
+            const MGLevelObject<PETScWrappers::MPI::Vector>&) const;
+#endif
 }
index dffa44fdb70bcb6ba7a387d0581f70883091481b..b2b0f6f3312464e1f1365105dc5c2eba05123a6b 100644 (file)
@@ -27,6 +27,7 @@
 #include <deal.II/lac/sparse_matrix.h>
 #include <deal.II/lac/block_sparse_matrix.h>
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
+#include <deal.II/lac/sparsity_tools.h>
 #include <deal.II/grid/tria.h>
 #include <deal.II/grid/tria_iterator.h>
 #include <deal.II/dofs/dof_tools.h>
@@ -236,6 +237,41 @@ void MGTransferPrebuilt<VectorType>::build_matrices
               }
           }
 
+#ifdef DEAL_II_WITH_MPI
+      if (internal::MatrixSelector<VectorType>::requires_distributed_sparsity_pattern)
+        {
+          // Since PETSc matrices do not offer the functionality to fill up in-
+          // complete sparsity patterns on their own, the sparsity pattern must be
+          // manually distributed.
+
+          // Retrieve communicator from triangulation if it is parallel
+          const parallel::Triangulation<dim,spacedim> *dist_tria =
+            dynamic_cast<const parallel::Triangulation<dim,spacedim>*>
+            (&(mg_dof.get_triangulation()));
+
+          MPI_Comm communicator = dist_tria != nullptr ?
+                                  dist_tria->get_communicator() :
+                                  MPI_COMM_SELF;
+
+          // Compute # of locally owned MG dofs / processor for distribution
+          const std::vector<::dealii::IndexSet> &locally_owned_mg_dofs_per_processor = mg_dof.locally_owned_mg_dofs_per_processor(level+1);
+          std::vector<::dealii::types::global_dof_index> n_locally_owned_mg_dofs_per_processor(locally_owned_mg_dofs_per_processor.size(), 0);
+
+          for (size_t index = 0; index < n_locally_owned_mg_dofs_per_processor.size(); ++index)
+            {
+              n_locally_owned_mg_dofs_per_processor[index] = locally_owned_mg_dofs_per_processor[index].n_elements();
+            }
+
+          // Distribute sparsity pattern
+          ::dealii::SparsityTools::distribute_sparsity_pattern(
+            dsp,
+            n_locally_owned_mg_dofs_per_processor,
+            communicator,
+            dsp.row_index_set()
+          );
+        }
+#endif
+
       internal::MatrixSelector<VectorType>::reinit(*prolongation_matrices[level],
                                                    *prolongation_sparsities[level],
                                                    level,
index b4ec2728aafeabe4ceaf72d279d271227df6ab33..3ca8048876a6197136f0ce00861208c83792083f 100644 (file)
@@ -14,7 +14,7 @@
 // ---------------------------------------------------------------------
 
 
-// check mg transfer in parallel
+// check mg transfer in parallel for trilinos vectors
 
 #include "../tests.h"
 #include <deal.II/base/function.h>
diff --git a/tests/multigrid/transfer_04b.cc b/tests/multigrid/transfer_04b.cc
new file mode 100644 (file)
index 0000000..dcc0a62
--- /dev/null
@@ -0,0 +1,182 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2006 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// check mg transfer in parallel for PETSc vectors
+
+#include "../tests.h"
+#include <deal.II/base/function.h>
+#include <deal.II/lac/vector.h>
+#include <deal.II/lac/block_vector.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/dofs/function_map.h>
+#include <deal.II/fe/fe_dgq.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_raviart_thomas.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/dofs/dof_accessor.h>
+#include <deal.II/multigrid/mg_tools.h>
+#include <deal.II/multigrid/mg_transfer.h>
+#include <deal.II/multigrid/mg_constrained_dofs.h>
+#include <deal.II/numerics/data_out.h>
+#include <deal.II/lac/petsc_parallel_vector.h>
+
+#include <algorithm>
+
+using namespace std;
+
+template <int dim>
+void setup_tria(parallel::distributed::Triangulation<dim> &tr)
+{
+  GridGenerator::hyper_cube(tr);
+  tr.refine_global(1);
+
+  for (typename parallel::distributed::Triangulation<dim>::active_cell_iterator cell = tr.begin_active();
+       cell != tr.end(); ++cell)
+    {
+      if (cell->id().to_string() != "0_1:3")
+        cell->set_refine_flag();
+    }
+  tr.execute_coarsening_and_refinement();
+
+  for (typename parallel::distributed::Triangulation<dim>::active_cell_iterator cell = tr.begin_active();
+       cell != tr.end(); ++cell)
+    {
+      if (cell->id().to_string() == "0_2:00"
+          || cell->id().to_string() == "0_2:01"
+          || cell->id().to_string() == "0_2:02")
+        cell->set_refine_flag();
+    }
+  tr.execute_coarsening_and_refinement();
+
+
+  for (typename parallel::distributed::Triangulation<dim>::cell_iterator cell = tr.begin();
+       cell != tr.end(); ++cell)
+    {
+      deallog << "cell=" << cell->id()
+              << " level_subdomain_id=" << cell->level_subdomain_id()
+              << std::endl;
+    }
+}
+
+
+
+
+
+
+
+
+
+template <int dim>
+void check_fe(FiniteElement<dim> &fe)
+{
+  deallog << fe.get_name() << std::endl;
+
+  parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+                                               Triangulation<dim>::limit_level_difference_at_vertices,
+                                               parallel::distributed::Triangulation<dim>::construct_multigrid_hierarchy);
+  setup_tria(tr);
+
+  if (false)
+    {
+      DataOut<dim> data_out;
+      Vector<float> subdomain (tr.n_active_cells());
+      for (unsigned int i=0; i<subdomain.size(); ++i)
+        subdomain(i) = tr.locally_owned_subdomain();
+      data_out.attach_triangulation (tr);
+      data_out.add_data_vector (subdomain, "subdomain");
+      data_out.build_patches (0);
+      const std::string filename = ("solution." +
+                                    Utilities::int_to_string
+                                    (tr.locally_owned_subdomain(), 4) +
+                                    ".vtu");
+      std::ofstream output (filename.c_str());
+      data_out.write_vtu (output);
+
+    }
+
+
+  Functions::ZeroFunction<dim> zero;
+  typename FunctionMap<dim>::type fmap;
+  fmap.insert(std::make_pair(0, &zero));
+
+  DoFHandler<dim> dofh(tr);
+  dofh.distribute_dofs(fe);
+  dofh.distribute_mg_dofs(fe);
+  typedef PETScWrappers::MPI::Vector vector_t;
+  {
+
+
+
+  }
+  MGTransferPrebuilt<vector_t> transfer;
+  transfer.build_matrices(dofh);
+  transfer.print_indices(deallog.get_file_stream());
+
+  MGLevelObject<vector_t> u(0, tr.n_global_levels()-1);
+  for (unsigned int level=u.min_level(); level<=u.max_level(); ++level)
+    {
+      u[level].reinit(dofh.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
+      for (unsigned int i=0; i<dofh.locally_owned_mg_dofs(level).n_elements(); ++i)
+        {
+          unsigned int index = dofh.locally_owned_mg_dofs(level).nth_index_in_set(i);
+          u[level][index] = 1;//1000+level*100+index;
+        }
+      u[level].compress(VectorOperation::insert);
+    }
+
+  vector_t v;
+  v.reinit(dofh.locally_owned_dofs(), MPI_COMM_WORLD);
+  v = 0.;
+  transfer.copy_from_mg(dofh, v, u);
+
+  {
+    for (unsigned int i=0; i<dofh.locally_owned_dofs().n_elements(); ++i)
+      {
+        unsigned int index = dofh.locally_owned_dofs().nth_index_in_set(i);
+        deallog << v[index] << " ";
+      }
+  }
+  //v.print(deallog.get_file_stream());
+  deallog << "ok" << std::endl;
+}
+
+
+template <int dim>
+void check()
+{
+  FE_Q<dim> q1(1);
+  FE_Q<dim> q2(2);
+//  FE_DGQ<dim> dq1(1);
+
+  FESystem<dim> s1(q1, 2, q2,1);
+
+  check_fe(q1);
+  //  check_fe(q2);
+  //check_fe(s1);
+}
+
+int main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+  MPILogInitAll log;
+
+  check<2> ();
+  //check<3> ();
+}
diff --git a/tests/multigrid/transfer_04b.with_trilinos=true.mpirun=1.debug.output b/tests/multigrid/transfer_04b.with_trilinos=true.mpirun=1.debug.output
new file mode 100644 (file)
index 0000000..86ad6eb
--- /dev/null
@@ -0,0 +1,75 @@
+
+DEAL:0::FE_Q<2>(1)
+DEAL:0::cell=0_0: level_subdomain_id=0
+DEAL:0::cell=0_1:0 level_subdomain_id=0
+DEAL:0::cell=0_1:1 level_subdomain_id=0
+DEAL:0::cell=0_1:2 level_subdomain_id=0
+DEAL:0::cell=0_1:3 level_subdomain_id=0
+DEAL:0::cell=0_2:00 level_subdomain_id=0
+DEAL:0::cell=0_2:01 level_subdomain_id=0
+DEAL:0::cell=0_2:02 level_subdomain_id=0
+DEAL:0::cell=0_2:03 level_subdomain_id=0
+DEAL:0::cell=0_2:10 level_subdomain_id=0
+DEAL:0::cell=0_2:11 level_subdomain_id=0
+DEAL:0::cell=0_2:12 level_subdomain_id=0
+DEAL:0::cell=0_2:13 level_subdomain_id=0
+DEAL:0::cell=0_2:20 level_subdomain_id=0
+DEAL:0::cell=0_2:21 level_subdomain_id=0
+DEAL:0::cell=0_2:22 level_subdomain_id=0
+DEAL:0::cell=0_2:23 level_subdomain_id=0
+DEAL:0::cell=0_3:000 level_subdomain_id=0
+DEAL:0::cell=0_3:001 level_subdomain_id=0
+DEAL:0::cell=0_3:002 level_subdomain_id=0
+DEAL:0::cell=0_3:003 level_subdomain_id=0
+DEAL:0::cell=0_3:010 level_subdomain_id=0
+DEAL:0::cell=0_3:011 level_subdomain_id=0
+DEAL:0::cell=0_3:012 level_subdomain_id=0
+DEAL:0::cell=0_3:013 level_subdomain_id=0
+DEAL:0::cell=0_3:020 level_subdomain_id=0
+DEAL:0::cell=0_3:021 level_subdomain_id=0
+DEAL:0::cell=0_3:022 level_subdomain_id=0
+DEAL:0::cell=0_3:023 level_subdomain_id=0
+copy_indices[1]        0       3
+copy_indices[1]        1       5
+copy_indices[1]        2       7
+copy_indices[1]        3       8
+copy_indices[2]        0       8
+copy_indices[2]        1       14
+copy_indices[2]        2       20
+copy_indices[2]        4       3
+copy_indices[2]        5       5
+copy_indices[2]        6       7
+copy_indices[2]        7       4
+copy_indices[2]        8       9
+copy_indices[2]        9       10
+copy_indices[2]        10      11
+copy_indices[2]        11      12
+copy_indices[2]        12      13
+copy_indices[2]        13      6
+copy_indices[2]        14      15
+copy_indices[2]        15      16
+copy_indices[2]        16      17
+copy_indices[2]        17      18
+copy_indices[2]        18      19
+copy_indices[3]        4       8
+copy_indices[3]        5       14
+copy_indices[3]        6       20
+copy_indices[3]        7       11
+copy_indices[3]        13      18
+copy_indices[3]        19      0
+copy_indices[3]        20      1
+copy_indices[3]        21      2
+copy_indices[3]        22      3
+copy_indices[3]        23      4
+copy_indices[3]        24      5
+copy_indices[3]        25      6
+copy_indices[3]        26      7
+copy_indices[3]        27      9
+copy_indices[3]        28      10
+copy_indices[3]        29      12
+copy_indices[3]        30      13
+copy_indices[3]        31      15
+copy_indices[3]        32      16
+copy_indices[3]        33      17
+copy_indices[3]        34      19
+DEAL:0::1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 ok
diff --git a/tests/multigrid/transfer_04b.with_trilinos=true.mpirun=2.debug.output b/tests/multigrid/transfer_04b.with_trilinos=true.mpirun=2.debug.output
new file mode 100644 (file)
index 0000000..c9a3860
--- /dev/null
@@ -0,0 +1,109 @@
+
+DEAL:0::FE_Q<2>(1)
+DEAL:0::cell=0_0: level_subdomain_id=0
+DEAL:0::cell=0_1:0 level_subdomain_id=0
+DEAL:0::cell=0_1:1 level_subdomain_id=1
+DEAL:0::cell=0_1:2 level_subdomain_id=1
+DEAL:0::cell=0_1:3 level_subdomain_id=1
+DEAL:0::cell=0_2:00 level_subdomain_id=0
+DEAL:0::cell=0_2:01 level_subdomain_id=0
+DEAL:0::cell=0_2:02 level_subdomain_id=0
+DEAL:0::cell=0_2:03 level_subdomain_id=1
+DEAL:0::cell=0_2:10 level_subdomain_id=1
+DEAL:0::cell=0_2:11 level_subdomain_id=4294967294
+DEAL:0::cell=0_2:12 level_subdomain_id=1
+DEAL:0::cell=0_2:13 level_subdomain_id=4294967294
+DEAL:0::cell=0_2:20 level_subdomain_id=1
+DEAL:0::cell=0_2:21 level_subdomain_id=1
+DEAL:0::cell=0_2:22 level_subdomain_id=4294967294
+DEAL:0::cell=0_2:23 level_subdomain_id=4294967294
+DEAL:0::cell=0_3:000 level_subdomain_id=0
+DEAL:0::cell=0_3:001 level_subdomain_id=0
+DEAL:0::cell=0_3:002 level_subdomain_id=0
+DEAL:0::cell=0_3:003 level_subdomain_id=0
+DEAL:0::cell=0_3:010 level_subdomain_id=0
+DEAL:0::cell=0_3:011 level_subdomain_id=0
+DEAL:0::cell=0_3:012 level_subdomain_id=0
+DEAL:0::cell=0_3:013 level_subdomain_id=0
+DEAL:0::cell=0_3:020 level_subdomain_id=0
+DEAL:0::cell=0_3:021 level_subdomain_id=0
+DEAL:0::cell=0_3:022 level_subdomain_id=0
+DEAL:0::cell=0_3:023 level_subdomain_id=0
+copy_indices[2]        8       3
+copy_indices[2]        11      4
+copy_indices[2]        14      5
+copy_indices[2]        18      6
+copy_indices[2]        20      7
+copy_indices[3]        0       0
+copy_indices[3]        1       1
+copy_indices[3]        2       2
+copy_indices[3]        3       3
+copy_indices[3]        4       4
+copy_indices[3]        5       5
+copy_indices[3]        6       6
+copy_indices[3]        7       7
+copy_indices[3]        8       8
+copy_indices[3]        9       9
+copy_indices[3]        10      10
+copy_indices[3]        11      11
+copy_indices[3]        12      12
+copy_indices[3]        13      13
+copy_indices[3]        14      14
+copy_indices[3]        15      15
+copy_indices[3]        16      16
+copy_indices[3]        17      17
+copy_indices[3]        18      18
+copy_indices[3]        19      19
+copy_indices[3]        20      20
+copy_ifrom  [1]        21      3
+DEAL:0::1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 ok
+
+DEAL:1::FE_Q<2>(1)
+DEAL:1::cell=0_0: level_subdomain_id=0
+DEAL:1::cell=0_1:0 level_subdomain_id=0
+DEAL:1::cell=0_1:1 level_subdomain_id=1
+DEAL:1::cell=0_1:2 level_subdomain_id=1
+DEAL:1::cell=0_1:3 level_subdomain_id=1
+DEAL:1::cell=0_2:00 level_subdomain_id=0
+DEAL:1::cell=0_2:01 level_subdomain_id=0
+DEAL:1::cell=0_2:02 level_subdomain_id=0
+DEAL:1::cell=0_2:03 level_subdomain_id=1
+DEAL:1::cell=0_2:10 level_subdomain_id=1
+DEAL:1::cell=0_2:11 level_subdomain_id=1
+DEAL:1::cell=0_2:12 level_subdomain_id=1
+DEAL:1::cell=0_2:13 level_subdomain_id=1
+DEAL:1::cell=0_2:20 level_subdomain_id=1
+DEAL:1::cell=0_2:21 level_subdomain_id=1
+DEAL:1::cell=0_2:22 level_subdomain_id=1
+DEAL:1::cell=0_2:23 level_subdomain_id=1
+DEAL:1::cell=0_3:000 level_subdomain_id=4294967294
+DEAL:1::cell=0_3:001 level_subdomain_id=4294967294
+DEAL:1::cell=0_3:002 level_subdomain_id=4294967294
+DEAL:1::cell=0_3:003 level_subdomain_id=0
+DEAL:1::cell=0_3:010 level_subdomain_id=4294967294
+DEAL:1::cell=0_3:011 level_subdomain_id=0
+DEAL:1::cell=0_3:012 level_subdomain_id=0
+DEAL:1::cell=0_3:013 level_subdomain_id=0
+DEAL:1::cell=0_3:020 level_subdomain_id=4294967294
+DEAL:1::cell=0_3:021 level_subdomain_id=0
+DEAL:1::cell=0_3:022 level_subdomain_id=0
+DEAL:1::cell=0_3:023 level_subdomain_id=0
+copy_indices[1]        22      5
+copy_indices[1]        23      7
+copy_indices[1]        24      8
+copy_indices[2]        21      8
+copy_indices[2]        22      14
+copy_indices[2]        23      20
+copy_indices[2]        25      9
+copy_indices[2]        26      10
+copy_indices[2]        27      11
+copy_indices[2]        28      12
+copy_indices[2]        29      13
+copy_indices[2]        30      15
+copy_indices[2]        31      16
+copy_indices[2]        32      17
+copy_indices[2]        33      18
+copy_indices[2]        34      19
+copy_ito    [1]        21      3
+DEAL:1::1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 ok
+

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.