]> https://gitweb.dealii.org/ - dealii.git/commitdiff
add distribute_mg_dofs for shared::Triangulation 5114/head
authortcclevenger <tcleven@clemson.edu>
Tue, 19 Sep 2017 23:19:14 +0000 (19:19 -0400)
committertcclevenger <tcleven@clemson.edu>
Tue, 10 Oct 2017 18:43:20 +0000 (14:43 -0400)
16 files changed:
doc/news/changes/major/20171010ConradClevenger [new file with mode: 0644]
include/deal.II/distributed/shared_tria.h
include/deal.II/distributed/tria.h
include/deal.II/distributed/tria_base.h
include/deal.II/multigrid/mg_transfer.templates.h
source/distributed/shared_tria.cc
source/distributed/tria.cc
source/distributed/tria_base.cc
source/dofs/dof_handler_policy.cc
source/multigrid/mg_transfer_internal.cc
tests/multigrid/step-50_02.cc [new file with mode: 0644]
tests/multigrid/step-50_02.with_mpi=true.with_trilinos=true.mpirun=3.output [new file with mode: 0644]
tests/sharedtria/tria_mg_dof_01.cc [new file with mode: 0644]
tests/sharedtria/tria_mg_dof_01.mpirun=3.with_p4est=true.output [new file with mode: 0644]
tests/sharedtria/tria_mg_dof_02.cc [new file with mode: 0644]
tests/sharedtria/tria_mg_dof_02.mpirun=3.output [new file with mode: 0644]

diff --git a/doc/news/changes/major/20171010ConradClevenger b/doc/news/changes/major/20171010ConradClevenger
new file mode 100644 (file)
index 0000000..0db855b
--- /dev/null
@@ -0,0 +1,3 @@
+New: The function distribute_mg_dofs has been written for a parallel::shared::Triangulation. This allows for geometric multigrid computations on an adaptively refined mesh using a shared triangulation with the possibility of a user defined partitioning of the active and level cells. 
+<br>
+(Conrad Clevenger, 2017/10/10)
index 20d865ea79a2074447671f3335042cf61f036f3a..930b5797a90454d6ed620fabe8361eccf37b0133 100644 (file)
@@ -262,6 +262,16 @@ namespace parallel
        */
       const std::vector<types::subdomain_id> &get_true_subdomain_ids_of_cells() const;
 
+      /**
+       * Return a vector of length Triangulation::n_cells(level) where each
+       * element stores the level subdomain id of the owner of this cell. The
+       * elements of the vector are obviously the same as the level subdomain ids
+       * for locally owned and ghost cells, but are also correct for
+       * artificial cells that do not store who the owner of the cell is in
+       * their level_subdomain_id field.
+       */
+      const std::vector<types::subdomain_id> &get_true_level_subdomain_ids_of_cells(const unsigned int level) const;
+
       /**
        * Return allow_artificial_cells , namely true if artificial cells are
        * allowed.
@@ -270,6 +280,12 @@ namespace parallel
 
     private:
 
+      /**
+       * Override the function to update the number cache so we can fill data
+       * like @p level_ghost_owners.
+       */
+      virtual void update_number_cache ();
+
       /**
        * Settings
        */
@@ -288,16 +304,27 @@ namespace parallel
 
       /**
        * A vector containing subdomain IDs of cells obtained by partitioning
-       * using METIS. In case allow_artificial_cells is false, this vector is
+       * using either zorder, METIS, or a user-defined partitioning scheme.
+       * In case allow_artificial_cells is false, this vector is
        * consistent with IDs stored in cell->subdomain_id() of the
        * triangulation class. When allow_artificial_cells is true, cells which
        * are artificial will have cell->subdomain_id() == numbers::artificial;
        *
-       * The original parition information is stored to allow using sequential
+       * The original partition information is stored to allow using sequential
        * DoF distribution and partitioning functions with semi-artificial
        * cells.
        */
       std::vector<types::subdomain_id> true_subdomain_ids_of_cells;
+
+      /**
+       * A vector containing level subdomain IDs of cells obtained by partitioning
+       * each level.
+       *
+       * The original parition information is stored to allow using sequential
+       * DoF distribution and partitioning functions with semi-artificial
+       * cells.
+       */
+      std::vector<std::vector<types::subdomain_id>> true_level_subdomain_ids_of_cells;
     };
 
     template <int dim, int spacedim>
@@ -342,6 +369,11 @@ namespace parallel
        */
       const std::vector<types::subdomain_id> &get_true_subdomain_ids_of_cells() const;
 
+      /**
+       * A dummy function to return empty vector.
+       */
+      const std::vector<types::subdomain_id> &get_true_level_subdomain_ids_of_cells(const unsigned int level) const;
+
       /**
        * A dummy function which always returns true.
        */
@@ -352,6 +384,11 @@ namespace parallel
        * A dummy vector.
        */
       std::vector<types::subdomain_id> true_subdomain_ids_of_cells;
+
+      /**
+       * A dummy vector.
+       */
+      std::vector<types::subdomain_id> true_level_subdomain_ids_of_cells;
     };
   }
 
index ce465beeeaf1d1653837effe27051c08b3222cd6..499bb5bbfcbdb997cf8c4609ff4825ffcf9a43ae 100644 (file)
@@ -712,7 +712,6 @@ namespace parallel
       /**
        * Override the function to update the number cache so we can fill data
        * like @p level_ghost_owners.
-       *
        */
       virtual void update_number_cache ();
 
index 74ccda87142579f068ef48d0023cfd50beb72127..c1781f529ff61893111070f5eba66597420235b7 100644 (file)
@@ -219,6 +219,11 @@ namespace parallel
      * Update the number_cache variable after mesh creation or refinement.
      */
     virtual void update_number_cache ();
+
+    /**
+     * Store MPI ranks of level ghost owners of this processor on all levels.
+     */
+    void fill_level_ghost_owners ();
   };
 
 } // namespace parallel
index d0858185ce47b322d3efd6a3ec36f000e1bb5cfb..2346866b7ba2e95d723a258daacee1b115d10182 100644 (file)
@@ -140,20 +140,16 @@ namespace
                  const std::vector<unsigned int> &,
                  MGLevelObject<TrilinosWrappers::MPI::Vector> &v)
   {
-    const dealii::parallel::distributed::Triangulation<dim,spacedim> *tria =
-      (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
+    const dealii::parallel::Triangulation<dim,spacedim> *tria =
+      (dynamic_cast<const parallel::Triangulation<dim,spacedim>*>
        (&mg_dof.get_triangulation()));
-    AssertThrow(tria!=nullptr, ExcMessage("multigrid with Trilinos vectors only works with distributed Triangulation!"));
+    AssertThrow(tria!=nullptr, ExcMessage("multigrid with Trilinos vectors only works with a parallel Triangulation!"));
 
-#ifdef DEAL_II_WITH_P4EST
     for (unsigned int level=v.min_level();
          level<=v.max_level(); ++level)
       {
         v[level].reinit(mg_dof.locally_owned_mg_dofs(level), tria->get_communicator());
       }
-#else
-    (void)v;
-#endif
   }
 #endif
 }
index 91f77b48d433d418313da44f29f4d0b1a71ee5ac..681554f27d8c765455fd93bb2539f615257f2008 100644 (file)
@@ -111,10 +111,14 @@ namespace parallel
           // loop over all cells in multigrid hierarchy and mark artificial:
           if (settings & construct_multigrid_hierarchy)
             {
+              true_level_subdomain_ids_of_cells.resize(this->n_levels());
+
               std::function<bool (const typename parallel::shared::Triangulation<dim,spacedim>::cell_iterator &)> predicate
                 = IteratorFilters::LocallyOwnedLevelCell();
               for (unsigned int lvl=0; lvl<this->n_levels(); ++lvl)
                 {
+                  true_level_subdomain_ids_of_cells[lvl].resize(this->n_cells(lvl));
+
                   const std::vector<typename parallel::shared::Triangulation<dim,spacedim>::cell_iterator>
                   level_halo_layer_vector = GridTools::compute_cell_halo_layer_on_level (*this, predicate, lvl);
                   std::set<typename parallel::shared::Triangulation<dim,spacedim>::cell_iterator>
@@ -123,8 +127,11 @@ namespace parallel
                   typename parallel::shared::Triangulation<dim,spacedim>::cell_iterator
                   cell = this->begin(lvl),
                   endc = this->end(lvl);
-                  for (; cell != endc; cell++)
+                  for (unsigned int index=0; cell != endc; cell++, index++)
                     {
+                      // Store true level subdomain IDs before setting artificial
+                      true_level_subdomain_ids_of_cells[lvl][index] = cell->level_subdomain_id();
+
                       // for active cells, we must have knowledge of level subdomain ids of
                       // all neighbors to our subdomain, not just neighbors on the same level.
                       // if the cells subdomain id was not set to artitficial above, we will
@@ -227,6 +234,15 @@ namespace parallel
 
 
 
+    template <int dim, int spacedim>
+    const std::vector<types::subdomain_id> &
+    Triangulation<dim,spacedim>::get_true_level_subdomain_ids_of_cells(const unsigned int level) const
+    {
+      return true_level_subdomain_ids_of_cells[level];
+    }
+
+
+
     template <int dim, int spacedim>
     void
     Triangulation<dim,spacedim>::execute_coarsening_and_refinement ()
@@ -274,6 +290,18 @@ namespace parallel
       this->update_number_cache ();
     }
 
+
+
+    template <int dim, int spacedim>
+    void
+    Triangulation<dim,spacedim>::
+    update_number_cache ()
+    {
+      parallel::Triangulation<dim,spacedim>::update_number_cache();
+
+      if (settings & construct_multigrid_hierarchy)
+        parallel::Triangulation<dim,spacedim>::fill_level_ghost_owners();
+    }
   }
 }
 
@@ -301,6 +329,15 @@ namespace parallel
       return true_subdomain_ids_of_cells;
     }
 
+
+
+    template <int dim, int spacedim>
+    const std::vector<unsigned int> &
+    Triangulation<dim,spacedim>::get_true_level_subdomain_ids_of_cells(const unsigned int) const
+    {
+      Assert (false, ExcNotImplemented());
+      return true_level_subdomain_ids_of_cells;
+    }
   }
 }
 
index 20bcca1b72d386ba864ee55a3f45306f82596f6f..b8c7a375be812732ab920e7838038cc3a0db2b85 100644 (file)
@@ -1899,6 +1899,8 @@ namespace parallel
       return dealii::internal::p4est::functions<dim>::checksum (parallel_forest);
     }
 
+
+
     template <int dim, int spacedim>
     void
     Triangulation<dim,spacedim>::
@@ -1906,76 +1908,12 @@ namespace parallel
     {
       parallel::Triangulation<dim,spacedim>::update_number_cache();
 
-      if (this->n_levels() == 0)
-        return;
-
       if (settings & construct_multigrid_hierarchy)
-        {
-          // find level ghost owners
-          for (typename Triangulation<dim,spacedim>::cell_iterator
-               cell = this->begin();
-               cell != this->end();
-               ++cell)
-            if (cell->level_subdomain_id() != numbers::artificial_subdomain_id
-                && cell->level_subdomain_id() != this->locally_owned_subdomain())
-              this->number_cache.level_ghost_owners.insert(cell->level_subdomain_id());
-
-#ifdef DEBUG
-          // Check that level_ghost_owners is symmetric by sending a message
-          // to everyone
-          {
-            int ierr = MPI_Barrier(this->mpi_communicator);
-            AssertThrowMPI(ierr);
-
-            // important: preallocate to avoid (re)allocation:
-            std::vector<MPI_Request> requests (this->number_cache.level_ghost_owners.size());
-            int dummy = 0;
-            unsigned int req_counter = 0;
-
-            for (std::set<types::subdomain_id>::iterator it = this->number_cache.level_ghost_owners.begin();
-                 it != this->number_cache.level_ghost_owners.end();
-                 ++it, ++req_counter)
-              {
-                Assert (typeid(types::subdomain_id)
-                        == typeid(unsigned int),
-                        ExcNotImplemented());
-                ierr = MPI_Isend(&dummy, 1, MPI_UNSIGNED,
-                                 *it, 9001, this->mpi_communicator,
-                                 &requests[req_counter]);
-                AssertThrowMPI(ierr);
-              }
-
-            for (std::set<types::subdomain_id>::iterator it = this->number_cache.level_ghost_owners.begin();
-                 it != this->number_cache.level_ghost_owners.end();
-                 ++it)
-              {
-                Assert (typeid(types::subdomain_id)
-                        == typeid(unsigned int),
-                        ExcNotImplemented());
-                unsigned int dummy;
-                ierr = MPI_Recv(&dummy, 1, MPI_UNSIGNED,
-                                *it, 9001, this->mpi_communicator,
-                                MPI_STATUS_IGNORE);
-                AssertThrowMPI(ierr);
-              }
-
-            if (requests.size() > 0)
-              {
-                ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
-                AssertThrowMPI(ierr);
-              }
-
-            ierr = MPI_Barrier(this->mpi_communicator);
-            AssertThrowMPI(ierr);
-          }
-#endif
-
-          Assert(this->number_cache.level_ghost_owners.size() < Utilities::MPI::n_mpi_processes(this->mpi_communicator),
-                 ExcInternalError());
-        }
+        parallel::Triangulation<dim,spacedim>::fill_level_ghost_owners();
     }
 
 
+
     template <int dim, int spacedim>
     typename dealii::internal::p4est::types<dim>::tree *
     Triangulation<dim,spacedim>::
index 940ffe07069cf699b8217817b2e566a0e64da6f5..222b32d898bdf1f178c0c835208657172d169580 100644 (file)
@@ -215,6 +215,80 @@ namespace parallel
                          static_cast<types::global_dof_index>(0));
     number_cache.n_global_levels = Utilities::MPI::max(this->n_levels(), this->mpi_communicator);
   }
+
+
+
+  template <int dim, int spacedim>
+  void
+  Triangulation<dim,spacedim>::fill_level_ghost_owners ()
+  {
+    number_cache.level_ghost_owners.clear ();
+
+    if (this->n_levels() > 0)
+      {
+        // find level ghost owners
+        for (typename Triangulation<dim,spacedim>::cell_iterator
+             cell = this->begin();
+             cell != this->end();
+             ++cell)
+          if (cell->level_subdomain_id() != numbers::artificial_subdomain_id
+              && cell->level_subdomain_id() != this->locally_owned_subdomain())
+            this->number_cache.level_ghost_owners.insert(cell->level_subdomain_id());
+
+#ifdef DEBUG
+        // Check that level_ghost_owners is symmetric by sending a message
+        // to everyone
+        {
+          int ierr = MPI_Barrier(this->mpi_communicator);
+          AssertThrowMPI(ierr);
+
+          // important: preallocate to avoid (re)allocation:
+          std::vector<MPI_Request> requests (this->number_cache.level_ghost_owners.size());
+          int dummy = 0;
+          unsigned int req_counter = 0;
+
+          for (std::set<types::subdomain_id>::iterator it = this->number_cache.level_ghost_owners.begin();
+               it != this->number_cache.level_ghost_owners.end();
+               ++it, ++req_counter)
+            {
+              Assert (typeid(types::subdomain_id)
+                      == typeid(unsigned int),
+                      ExcNotImplemented());
+              ierr = MPI_Isend(&dummy, 1, MPI_UNSIGNED,
+                               *it, 9001, this->mpi_communicator,
+                               &requests[req_counter]);
+              AssertThrowMPI(ierr);
+            }
+
+          for (std::set<types::subdomain_id>::iterator it = this->number_cache.level_ghost_owners.begin();
+               it != this->number_cache.level_ghost_owners.end();
+               ++it)
+            {
+              Assert (typeid(types::subdomain_id)
+                      == typeid(unsigned int),
+                      ExcNotImplemented());
+              unsigned int dummy;
+              ierr = MPI_Recv(&dummy, 1, MPI_UNSIGNED,
+                              *it, 9001, this->mpi_communicator,
+                              MPI_STATUS_IGNORE);
+              AssertThrowMPI(ierr);
+            }
+
+          if (requests.size() > 0)
+            {
+              ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+              AssertThrowMPI(ierr);
+            }
+
+          ierr = MPI_Barrier(this->mpi_communicator);
+          AssertThrowMPI(ierr);
+        }
+#endif
+
+        Assert(this->number_cache.level_ghost_owners.size() < Utilities::MPI::n_mpi_processes(this->mpi_communicator),
+               ExcInternalError());
+      }
+  }
 #else
   template <int dim, int spacedim>
   void
index 3386327657df9e14bfea3732a6285a43b2452f81..fd78e7cd92a9667225704a2b321d9907e6bd1cc5 100644 (file)
@@ -2414,6 +2414,68 @@ namespace internal
 
           return subdomain_association;
         }
+
+
+        /**
+         * level subdomain association. Similar to the above function only
+         * for level meshes. This function assigns boundary dofs in
+         * the same way as parallel::distributed::Triangulation (proc with
+         * smallest index) instead of the coin flip method above.
+         */
+        template <class DoFHandlerType>
+        std::vector<types::subdomain_id>
+        get_dof_level_subdomain_association (const DoFHandlerType          &dof_handler,
+                                             const types::global_dof_index  n_dofs_on_level,
+                                             const unsigned int             n_procs,
+                                             const unsigned int             level)
+        {
+          (void)n_procs;
+          std::vector<types::subdomain_id> level_subdomain_association (n_dofs_on_level,
+              numbers::invalid_subdomain_id);
+          std::vector<types::global_dof_index> local_dof_indices;
+          local_dof_indices.reserve (DoFTools::max_dofs_per_cell(dof_handler));
+
+          // loop over all cells and record which subdomain a DoF belongs to.
+          // interface goes to proccessor with smaller subdomain id
+          typename DoFHandlerType::cell_iterator
+          cell = dof_handler.begin(level),
+          endc = dof_handler.end(level);
+          for (; cell!=endc; ++cell)
+            {
+              // get the owner of the cell; note that we have made sure above that
+              // all cells are either locally owned or ghosts (not artificial), so
+              // this call will always yield the true owner
+              const types::subdomain_id level_subdomain_id = cell->level_subdomain_id();
+              const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell;
+              local_dof_indices.resize (dofs_per_cell);
+              cell->get_mg_dof_indices (local_dof_indices);
+
+              // set level subdomain ids. if dofs already have their values set then
+              // they must be on partition interfaces. in that case assign them to the
+              // processor wit the smaller subdomain id.
+              for (unsigned int i=0; i<dofs_per_cell; ++i)
+                if (level_subdomain_association[local_dof_indices[i]] ==
+                    numbers::invalid_subdomain_id)
+                  level_subdomain_association[local_dof_indices[i]] = level_subdomain_id;
+                else if (level_subdomain_association[local_dof_indices[i]] > level_subdomain_id)
+                  {
+                    level_subdomain_association[local_dof_indices[i]] = level_subdomain_id;
+                  }
+            }
+
+          Assert (std::find (level_subdomain_association.begin(),
+                             level_subdomain_association.end(),
+                             numbers::invalid_subdomain_id)
+                  == level_subdomain_association.end(),
+                  ExcInternalError());
+
+          Assert (*std::max_element (level_subdomain_association.begin(),
+                                     level_subdomain_association.end())
+                  < n_procs,
+                  ExcInternalError());
+
+          return level_subdomain_association;
+        }
       }
 
 
@@ -2581,12 +2643,168 @@ namespace internal
       ParallelShared<DoFHandlerType>::
       distribute_mg_dofs () const
       {
-        // this is not currently implemented; the algorithm should work
-        // as above, though: first call the sequential numbering
-        // algorithm, then re-enumerate subdomain-wise
-        Assert(false, ExcNotImplemented());
+        const unsigned int dim      = DoFHandlerType::dimension;
+        const unsigned int spacedim = DoFHandlerType::space_dimension;
 
-        return std::vector<NumberCache>();
+        const parallel::shared::Triangulation<dim, spacedim> *tr =
+          (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim>*> (&this->dof_handler->get_triangulation()));
+        Assert(tr != nullptr, ExcInternalError());
+
+        const unsigned int n_procs = Utilities::MPI::n_mpi_processes(tr->get_communicator());
+        const unsigned int n_levels = tr->n_global_levels();
+
+        std::vector<NumberCache> number_caches;
+        number_caches.reserve(n_levels);
+
+        // We create an index set for each level
+        for (unsigned int lvl=0; lvl<n_levels; ++lvl)
+          {
+            // If the underlying shared::Tria allows artificial cells,
+            // then save the current set of level subdomain ids, and set
+            // subdomain ids to the "true" owner of each cell. we later
+            // restore these flags
+            // Note: "allows_artificial_cells" is currently enforced for
+            //       MG computations.
+            std::vector<types::subdomain_id> saved_level_subdomain_ids;
+            saved_level_subdomain_ids.resize(tr->n_cells(lvl));
+            {
+              typename parallel::shared::Triangulation<dim,spacedim>::cell_iterator
+              cell = this->dof_handler->get_triangulation().begin(lvl),
+              endc = this->dof_handler->get_triangulation().end(lvl);
+
+              const std::vector<types::subdomain_id> &true_level_subdomain_ids
+                = tr->get_true_level_subdomain_ids_of_cells(lvl);
+
+              for (unsigned int index=0; cell != endc; ++cell, ++index)
+                {
+                  saved_level_subdomain_ids[index] = cell->level_subdomain_id();
+                  cell->set_level_subdomain_id(true_level_subdomain_ids[index]);
+                }
+            }
+
+            // Next let the sequential algorithm do its magic. it is going to
+            // enumerate DoFs on all cells on the given level, regardless of owner
+            const types::global_dof_index n_dofs_on_level =
+              Implementation::distribute_dofs_on_level(numbers::invalid_subdomain_id,
+                                                       *this->dof_handler,
+                                                       lvl);
+
+            // then re-enumerate them based on their level subdomain association.
+            // for this, we first have to identify for each current DoF
+            // index which subdomain they belong to. ideally, we would
+            // like to call DoFRenumbering::subdomain_wise(), but
+            // because the NumberCache of the current DoFHandler is not
+            // fully set up yet, we can't quite do that. also, that
+            // function has to deal with other kinds of triangulations as
+            // well, whereas we here know what kind of triangulation
+            // we have and can simplify the code accordingly
+            std::vector<types::global_dof_index> new_dof_indices (n_dofs_on_level,
+                                                                  numbers::invalid_dof_index);
+            {
+              // first get the association of each dof with a subdomain and
+              // determine the total number of subdomain ids used
+              const std::vector<types::subdomain_id> level_subdomain_association
+                = get_dof_level_subdomain_association (*this->dof_handler,
+                                                       n_dofs_on_level, n_procs, lvl);
+
+              // then renumber the subdomains by first looking at those belonging
+              // to subdomain 0, then those of subdomain 1, etc. note that the
+              // algorithm is stable, i.e. if two dofs i,j have i<j and belong to
+              // the same subdomain, then they will be in this order also after
+              // reordering
+              types::global_dof_index next_free_index = 0;
+              for (types::subdomain_id level_subdomain=0; level_subdomain<n_procs; ++level_subdomain)
+                for (types::global_dof_index i=0; i<n_dofs_on_level; ++i)
+                  if (level_subdomain_association[i] == level_subdomain)
+                    {
+                      Assert (new_dof_indices[i] == numbers::invalid_dof_index,
+                              ExcInternalError());
+                      new_dof_indices[i] = next_free_index;
+                      ++next_free_index;
+                    }
+
+              // we should have numbered all dofs
+              Assert (next_free_index == n_dofs_on_level, ExcInternalError());
+              Assert (std::find (new_dof_indices.begin(), new_dof_indices.end(),
+                                 numbers::invalid_dof_index)
+                      == new_dof_indices.end(),
+                      ExcInternalError());
+            }
+
+            // finally do the renumbering. we can use the sequential
+            // version of the function because we do things on all
+            // cells and all cells have their subdomain ids and DoFs
+            // correctly set
+            Implementation::renumber_mg_dofs (new_dof_indices, IndexSet(0),
+                                              *this->dof_handler, lvl, true);
+
+            // update the number cache. for this, we first have to find the level subdomain
+            // association for each DoF again following renumbering, from which we
+            // can then compute the IndexSets of locally owned DoFs for all processors.
+            // all other fields then follow from this
+            //
+            // given the way we enumerate degrees of freedom, the locally owned
+            // ranges must all be contiguous and consecutive. this makes filling
+            // the IndexSets cheap. an assertion at the top verifies that this
+            // assumption is true
+            const std::vector<types::subdomain_id> level_subdomain_association
+              = get_dof_level_subdomain_association (*this->dof_handler,
+                                                     n_dofs_on_level, n_procs, lvl);
+
+            for (unsigned int i=1; i<n_dofs_on_level; ++i)
+              Assert (level_subdomain_association[i] >= level_subdomain_association[i-1],
+                      ExcInternalError());
+
+            std::vector<IndexSet> locally_owned_dofs_per_processor (n_procs,
+                                                                    IndexSet(n_dofs_on_level));
+            {
+              // we know that the set of subdomain indices is contiguous from
+              // the assertion above; find the start and end index for each
+              // processor, taking into account that sometimes a processor
+              // may not in fact have any DoFs at all. we do the latter
+              // by just identifying contiguous ranges of level_subdomain_ids
+              // and filling IndexSets for those subdomains; subdomains
+              // that don't appear will lead to IndexSets that are simply
+              // never touched and remain empty as initialized above.
+              unsigned int start_index = 0;
+              unsigned int end_index   = 0;
+              while (start_index < n_dofs_on_level)
+                {
+                  while ((end_index) < n_dofs_on_level &&
+                         (level_subdomain_association[end_index] == level_subdomain_association[start_index]))
+                    ++end_index;
+
+                  // we've now identified a range of same indices. set that
+                  // range in the corresponding IndexSet
+                  if (end_index > start_index)
+                    {
+                      const unsigned int level_subdomain_owner = level_subdomain_association[start_index];
+                      locally_owned_dofs_per_processor[level_subdomain_owner]
+                      .add_range (start_index, end_index);
+                    }
+
+                  // then move on to thinking about the next range
+                  start_index = end_index;
+                }
+            }
+
+            // finally, restore current level subdomain ids
+            {
+              typename parallel::shared::Triangulation<dim,spacedim>::cell_iterator
+              cell = this->dof_handler->get_triangulation().begin(lvl),
+              endc = this->dof_handler->get_triangulation().end(lvl);
+
+              for (unsigned int index=0; cell != endc; ++cell, ++index)
+                cell->set_level_subdomain_id(saved_level_subdomain_ids[index]);
+
+              // add NumberCache for current level
+              number_caches.emplace_back (NumberCache(locally_owned_dofs_per_processor,
+                                                      this->dof_handler->get_triangulation()
+                                                      .locally_owned_subdomain ()));
+            }
+          }
+
+        return number_caches;
       }
 
 
index b9cb4fb7d155507d2e2c59c8c37fb01ad7f093f1..69b6c14204b080166fcad670a9045bc7284f2ac7 100644 (file)
@@ -154,10 +154,10 @@ namespace internal
             }
         }
 
-      const dealii::parallel::distributed::Triangulation<dim,spacedim> *tria =
-        (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
+      const dealii::parallel::Triangulation<dim,spacedim> *tria =
+        (dynamic_cast<const parallel::Triangulation<dim,spacedim>*>
          (&mg_dof.get_triangulation()));
-      AssertThrow(send_data_temp.size()==0 || tria!=nullptr, ExcMessage("parallel Multigrid only works with a distributed Triangulation!"));
+      AssertThrow(send_data_temp.size()==0 || tria!=nullptr, ExcMessage("We should only be sending information with a parallel Triangulation!"));
 
 #ifdef DEAL_II_WITH_MPI
       if (tria)
diff --git a/tests/multigrid/step-50_02.cc b/tests/multigrid/step-50_02.cc
new file mode 100644 (file)
index 0000000..f150a09
--- /dev/null
@@ -0,0 +1,578 @@
+/* ---------------------------------------------------------------------
+ *
+ * Copyright (C) 2003 - 2017 by the deal.II authors
+ *
+ * This file is part of the deal.II library.
+ *
+ * The deal.II library is free software; you can use it, redistribute
+ * it, and/or modify it under the terms of the GNU Lesser General
+ * Public License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ * The full text of the license can be found in the file LICENSE at
+ * the top level of the deal.II distribution.
+ *
+ * ---------------------------------------------------------------------
+
+ *
+ *
+ */
+
+// Working version of step-50 with a parallel::shared::Triangulation
+
+#include "../tests.h"
+
+
+#include <deal.II/base/quadrature_lib.h>
+#include <deal.II/base/function.h>
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/conditional_ostream.h>
+
+#include <deal.II/lac/constraint_matrix.h>
+#include <deal.II/lac/vector.h>
+#include <deal.II/lac/full_matrix.h>
+#include <deal.II/lac/sparse_matrix.h>
+#include <deal.II/lac/solver_cg.h>
+#include <deal.II/lac/solver_gmres.h>
+#include <deal.II/lac/precondition.h>
+
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_refinement.h>
+#include <deal.II/grid/tria_boundary_lib.h>
+
+#include <deal.II/dofs/dof_accessor.h>
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_values.h>
+
+#include <deal.II/numerics/vector_tools.h>
+#include <deal.II/numerics/data_out.h>
+#include <deal.II/numerics/error_estimator.h>
+
+#include <deal.II/base/index_set.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/distributed/grid_refinement.h>
+
+#include <deal.II/multigrid/mg_constrained_dofs.h>
+#include <deal.II/multigrid/multigrid.h>
+#include <deal.II/multigrid/mg_transfer.h>
+#include <deal.II/multigrid/mg_tools.h>
+#include <deal.II/multigrid/mg_coarse.h>
+#include <deal.II/multigrid/mg_smoother.h>
+#include <deal.II/multigrid/mg_matrix.h>
+
+
+#include <deal.II/lac/generic_linear_algebra.h>
+
+
+namespace LA
+{
+  using namespace dealii::LinearAlgebraTrilinos;
+}
+
+#include <iostream>
+#include <fstream>
+#include <sstream>
+
+namespace Step50
+{
+  using namespace dealii;
+
+
+
+  template <int dim>
+  class LaplaceProblem
+  {
+  public:
+    LaplaceProblem (const unsigned int deg);
+    void run ();
+
+  private:
+    void setup_system ();
+    void assemble_system ();
+    void assemble_multigrid ();
+    void solve ();
+    void refine_grid ();
+
+    parallel::shared::Triangulation<dim> triangulation;
+    FE_Q<dim>                            fe;
+    DoFHandler<dim>                      mg_dof_handler;
+
+    typedef LA::MPI::SparseMatrix matrix_t;
+    typedef LA::MPI::Vector vector_t;
+
+    matrix_t system_matrix;
+
+    IndexSet locally_relevant_set;
+
+    ConstraintMatrix constraints;
+
+    vector_t solution;
+    vector_t system_rhs;
+
+    const unsigned int degree;
+
+    MGLevelObject<matrix_t> mg_matrices;
+    MGLevelObject<matrix_t> mg_interface_matrices;
+    MGConstrainedDoFs       mg_constrained_dofs;
+  };
+
+
+
+
+
+  template <int dim>
+  class Coefficient : public Function<dim>
+  {
+  public:
+    Coefficient () : Function<dim>() {}
+
+    virtual double value (const Point<dim>   &p,
+                          const unsigned int  component = 0) const;
+
+    virtual void value_list (const std::vector<Point<dim> > &points,
+                             std::vector<double>            &values,
+                             const unsigned int              component = 0) const;
+  };
+
+
+
+  template <int dim>
+  double Coefficient<dim>::value (const Point<dim> &p,
+                                  const unsigned int) const
+  {
+    if (p.square() < 0.5*0.5)
+      return 5;
+    else
+      return 1;
+  }
+
+
+
+  template <int dim>
+  void Coefficient<dim>::value_list (const std::vector<Point<dim> > &points,
+                                     std::vector<double>            &values,
+                                     const unsigned int              component) const
+  {
+    (void)component;
+    const unsigned int n_points = points.size();
+
+    Assert (values.size() == n_points,
+            ExcDimensionMismatch (values.size(), n_points));
+
+    Assert (component == 0,
+            ExcIndexRange (component, 0, 1));
+
+    for (unsigned int i=0; i<n_points; ++i)
+      values[i] = Coefficient<dim>::value (points[i]);
+  }
+
+
+  template <int dim>
+  LaplaceProblem<dim>::LaplaceProblem (const unsigned int degree)
+    :
+    triangulation (MPI_COMM_WORLD,
+                   typename Triangulation<dim>::MeshSmoothing
+                   (Triangulation<dim>::limit_level_difference_at_vertices), true,
+                   // parallel::shared::Triangulation<dim>::Settings::partition_custom_signal),
+                   typename parallel::shared::Triangulation<dim>::Settings
+                   (parallel::shared::Triangulation<dim>::partition_zorder |
+                    parallel::shared::Triangulation<dim>::construct_multigrid_hierarchy)),
+    fe (degree),
+    mg_dof_handler (triangulation),
+    degree(degree)
+  {}
+
+
+
+  template <int dim>
+  void LaplaceProblem<dim>::setup_system ()
+  {
+    mg_dof_handler.distribute_dofs (fe);
+    mg_dof_handler.distribute_mg_dofs ();
+
+    DoFTools::extract_locally_relevant_dofs (mg_dof_handler,
+                                             locally_relevant_set);
+
+    solution.reinit(mg_dof_handler.locally_owned_dofs(), MPI_COMM_WORLD);
+    system_rhs.reinit(mg_dof_handler.locally_owned_dofs(), MPI_COMM_WORLD);
+
+    constraints.reinit (locally_relevant_set);
+    DoFTools::make_hanging_node_constraints (mg_dof_handler, constraints);
+
+    std::set<types::boundary_id>         dirichlet_boundary_ids;
+    typename FunctionMap<dim>::type      dirichlet_boundary;
+    Functions::ConstantFunction<dim>                    homogeneous_dirichlet_bc (1.0);
+    dirichlet_boundary_ids.insert(0);
+    dirichlet_boundary[0] = &homogeneous_dirichlet_bc;
+    VectorTools::interpolate_boundary_values (mg_dof_handler,
+                                              dirichlet_boundary,
+                                              constraints);
+    constraints.close ();
+
+    DynamicSparsityPattern dsp(mg_dof_handler.n_dofs(), mg_dof_handler.n_dofs());
+    DoFTools::make_sparsity_pattern (mg_dof_handler, dsp, constraints);
+    system_matrix.reinit (mg_dof_handler.locally_owned_dofs(), dsp, MPI_COMM_WORLD, true);
+
+
+    mg_constrained_dofs.clear();
+    mg_constrained_dofs.initialize(mg_dof_handler);
+    mg_constrained_dofs.make_zero_boundary_constraints(mg_dof_handler, dirichlet_boundary_ids);
+
+
+    const unsigned int n_levels = triangulation.n_global_levels();
+
+    mg_interface_matrices.resize(0, n_levels-1);
+    mg_interface_matrices.clear_elements ();
+    mg_matrices.resize(0, n_levels-1);
+    mg_matrices.clear_elements ();
+
+    for (unsigned int level=0; level<n_levels; ++level)
+      {
+        DynamicSparsityPattern dsp(mg_dof_handler.n_dofs(level),
+                                   mg_dof_handler.n_dofs(level));
+        MGTools::make_sparsity_pattern(mg_dof_handler, dsp, level);
+
+        mg_matrices[level].reinit(mg_dof_handler.locally_owned_mg_dofs(level),
+                                  mg_dof_handler.locally_owned_mg_dofs(level),
+                                  dsp,
+                                  MPI_COMM_WORLD, true);
+
+        mg_interface_matrices[level].reinit(mg_dof_handler.locally_owned_mg_dofs(level),
+                                            mg_dof_handler.locally_owned_mg_dofs(level),
+                                            dsp,
+                                            MPI_COMM_WORLD, true);
+      }
+  }
+
+
+
+  template <int dim>
+  void LaplaceProblem<dim>::assemble_system ()
+  {
+    const QGauss<dim>  quadrature_formula(degree+1);
+
+    FEValues<dim> fe_values (fe, quadrature_formula,
+                             update_values    |  update_gradients |
+                             update_quadrature_points  |  update_JxW_values);
+
+    const unsigned int   dofs_per_cell = fe.dofs_per_cell;
+    const unsigned int   n_q_points    = quadrature_formula.size();
+
+    FullMatrix<double>   cell_matrix (dofs_per_cell, dofs_per_cell);
+    Vector<double>       cell_rhs (dofs_per_cell);
+
+    std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+
+    const Coefficient<dim> coefficient;
+    std::vector<double>    coefficient_values (n_q_points);
+
+    typename DoFHandler<dim>::active_cell_iterator
+    cell = mg_dof_handler.begin_active(),
+    endc = mg_dof_handler.end();
+    for (; cell!=endc; ++cell)
+      if (cell->is_locally_owned())
+        {
+          cell_matrix = 0;
+          cell_rhs = 0;
+
+          fe_values.reinit (cell);
+
+          coefficient.value_list (fe_values.get_quadrature_points(),
+                                  coefficient_values);
+
+          for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
+            for (unsigned int i=0; i<dofs_per_cell; ++i)
+              {
+                for (unsigned int j=0; j<dofs_per_cell; ++j)
+                  cell_matrix(i,j) += (coefficient_values[q_point] *
+                                       fe_values.shape_grad(i,q_point) *
+                                       fe_values.shape_grad(j,q_point) *
+                                       fe_values.JxW(q_point));
+
+                cell_rhs(i) += (fe_values.shape_value(i,q_point) *
+                                10.0 *
+                                fe_values.JxW(q_point));
+              }
+
+          cell->get_dof_indices (local_dof_indices);
+          constraints.distribute_local_to_global (cell_matrix, cell_rhs,
+                                                  local_dof_indices,
+                                                  system_matrix, system_rhs);
+        }
+
+    system_matrix.compress(VectorOperation::add);
+    system_rhs.compress(VectorOperation::add);
+  }
+
+
+
+  template <int dim>
+  void LaplaceProblem<dim>::assemble_multigrid ()
+  {
+    QGauss<dim>  quadrature_formula(1+degree);
+
+    FEValues<dim> fe_values (fe, quadrature_formula,
+                             update_values   | update_gradients |
+                             update_quadrature_points | update_JxW_values);
+
+    const unsigned int   dofs_per_cell   = fe.dofs_per_cell;
+    const unsigned int   n_q_points      = quadrature_formula.size();
+
+    FullMatrix<double>   cell_matrix (dofs_per_cell, dofs_per_cell);
+
+    std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+
+    const Coefficient<dim> coefficient;
+    std::vector<double>    coefficient_values (n_q_points);
+
+
+
+    std::vector<ConstraintMatrix> boundary_constraints (triangulation.n_global_levels());
+    ConstraintMatrix empty_constraints;
+    for (unsigned int level=0; level<triangulation.n_global_levels(); ++level)
+      {
+        IndexSet dofset;
+        DoFTools::extract_locally_relevant_level_dofs (mg_dof_handler, level, dofset);
+        boundary_constraints[level].reinit(dofset);
+        boundary_constraints[level].add_lines (mg_constrained_dofs.get_refinement_edge_indices(level));
+        boundary_constraints[level].add_lines (mg_constrained_dofs.get_boundary_indices(level));
+
+        boundary_constraints[level].close ();
+      }
+
+    typename DoFHandler<dim>::cell_iterator cell = mg_dof_handler.begin(),
+                                            endc = mg_dof_handler.end();
+
+    for (; cell!=endc; ++cell)
+      if (cell->level_subdomain_id()==triangulation.locally_owned_subdomain())
+        {
+          cell_matrix = 0;
+          fe_values.reinit (cell);
+
+          coefficient.value_list (fe_values.get_quadrature_points(),
+                                  coefficient_values);
+
+          for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
+            for (unsigned int i=0; i<dofs_per_cell; ++i)
+              for (unsigned int j=0; j<dofs_per_cell; ++j)
+                cell_matrix(i,j) += (coefficient_values[q_point] *
+                                     fe_values.shape_grad(i,q_point) *
+                                     fe_values.shape_grad(j,q_point) *
+                                     fe_values.JxW(q_point));
+
+          cell->get_mg_dof_indices (local_dof_indices);
+
+          boundary_constraints[cell->level()]
+          .distribute_local_to_global (cell_matrix,
+                                       local_dof_indices,
+                                       mg_matrices[cell->level()]);
+
+
+          const IndexSet &interface_dofs_on_level
+            = mg_constrained_dofs.get_refinement_edge_indices(cell->level());
+          const unsigned int lvl = cell->level();
+
+          for (unsigned int i=0; i<dofs_per_cell; ++i)
+            for (unsigned int j=0; j<dofs_per_cell; ++j)
+              if (interface_dofs_on_level.is_element(local_dof_indices[i])   // at_refinement_edge(i)
+                  &&
+                  !interface_dofs_on_level.is_element(local_dof_indices[j])   // !at_refinement_edge(j)
+                  &&
+                  (
+                    (!mg_constrained_dofs.is_boundary_index(lvl, local_dof_indices[i])
+                     &&
+                     !mg_constrained_dofs.is_boundary_index(lvl, local_dof_indices[j])
+                    ) // ( !boundary(i) && !boundary(j) )
+                    ||
+                    (
+                      mg_constrained_dofs.is_boundary_index(lvl, local_dof_indices[i])
+                      &&
+                      local_dof_indices[i]==local_dof_indices[j]
+                    ) // ( boundary(i) && boundary(j) && i==j )
+                  )
+                 )
+                {
+                }
+              else
+                {
+                  cell_matrix(i,j) = 0;
+                }
+
+
+          empty_constraints
+          .distribute_local_to_global (cell_matrix,
+                                       local_dof_indices,
+                                       mg_interface_matrices[cell->level()]);
+        }
+
+    for (unsigned int i=0; i<triangulation.n_global_levels(); ++i)
+      {
+        mg_matrices[i].compress(VectorOperation::add);
+        mg_interface_matrices[i].compress(VectorOperation::add);
+      }
+  }
+
+
+
+
+  template <int dim>
+  void LaplaceProblem<dim>::solve ()
+  {
+    MGTransferPrebuilt<vector_t> mg_transfer(mg_constrained_dofs);
+    mg_transfer.build_matrices(mg_dof_handler);
+
+    matrix_t &coarse_matrix = mg_matrices[0];
+
+    SolverControl coarse_solver_control (1000, 1e-10, false, false);
+    SolverCG<vector_t> coarse_solver(coarse_solver_control);
+    PreconditionIdentity id;
+    MGCoarseGridIterativeSolver<vector_t, SolverCG<vector_t>, matrix_t, PreconditionIdentity>
+    coarse_grid_solver(coarse_solver, coarse_matrix, id);
+
+    typedef LA::MPI::PreconditionJacobi Smoother;
+    MGSmootherPrecondition<matrix_t, Smoother, vector_t> mg_smoother;
+    mg_smoother.initialize(mg_matrices, Smoother::AdditionalData(0.5));
+    mg_smoother.set_steps(2);
+
+    mg::Matrix<vector_t> mg_matrix(mg_matrices);
+    mg::Matrix<vector_t> mg_interface_up(mg_interface_matrices);
+    mg::Matrix<vector_t> mg_interface_down(mg_interface_matrices);
+
+    Multigrid<vector_t > mg(mg_matrix,
+                            coarse_grid_solver,
+                            mg_transfer,
+                            mg_smoother,
+                            mg_smoother);
+    mg.set_edge_matrices(mg_interface_down, mg_interface_up);
+
+    PreconditionMG<dim, vector_t, MGTransferPrebuilt<vector_t> >
+    preconditioner(mg_dof_handler, mg, mg_transfer);
+
+
+    SolverControl solver_control (500, 1e-8*system_rhs.l2_norm(), false);
+    SolverCG<vector_t> solver (solver_control);
+
+    if (false)
+      {
+        /*
+         TrilinosWrappers::PreconditionAMG prec;
+
+         TrilinosWrappers::PreconditionAMG::AdditionalData Amg_data;
+         Amg_data.elliptic = true;
+         Amg_data.higher_order_elements = true;
+         Amg_data.smoother_sweeps = 2;
+         Amg_data.aggregation_threshold = 0.02;
+
+         prec.initialize (system_matrix,
+                          Amg_data);
+         solver.solve (system_matrix, solution, system_rhs, prec);
+        */
+      }
+    else
+      {
+        solver.solve (system_matrix, solution, system_rhs,
+                      preconditioner);
+      }
+    deallog << "   CG converged in " << solver_control.last_step() << " iterations." << std::endl;
+
+    constraints.distribute (solution);
+  }
+
+
+
+  template <int dim>
+  void LaplaceProblem<dim>::refine_grid ()
+  {
+    for (typename Triangulation<dim>::active_cell_iterator cell=triangulation.begin_active(); cell != triangulation.end(); ++cell)
+      for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
+        if (cell->vertex(v)[0] <= 0.5 && cell->vertex(v)[1] <= 0.5)
+          cell->set_refine_flag();
+    triangulation.execute_coarsening_and_refinement ();
+  }
+
+
+
+  template <int dim>
+  void LaplaceProblem<dim>::run ()
+  {
+    for (unsigned int cycle=0; cycle<2; ++cycle)
+      {
+        deallog << "Cycle " << cycle << ':' << std::endl;
+
+        if (cycle == 0)
+          {
+            GridGenerator::hyper_cube (triangulation);
+            triangulation.refine_global (5);
+          }
+        else
+          refine_grid ();
+
+        deallog << "   Number of active cells:       "
+                << triangulation.n_global_active_cells()
+                << std::endl;
+
+        setup_system ();
+
+        deallog << "   Number of degrees of freedom: "
+                << mg_dof_handler.n_dofs()
+                << " (by level: ";
+        for (unsigned int level=0; level<triangulation.n_global_levels(); ++level)
+          deallog << mg_dof_handler.n_dofs(level)
+                  << (level == triangulation.n_global_levels()-1
+                      ? ")" : ", ");
+        deallog << std::endl;
+
+        assemble_system ();
+        assemble_multigrid ();
+
+        solve ();
+      }
+  }
+}
+
+
+int main (int argc, char *argv[])
+{
+  dealii::Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+  mpi_initlog(true);
+
+  try
+    {
+      using namespace dealii;
+      using namespace Step50;
+
+      LaplaceProblem<2> laplace_problem(1/*degree*/);
+      laplace_problem.run ();
+    }
+  catch (std::exception &exc)
+    {
+      std::cerr << std::endl << std::endl
+                << "----------------------------------------------------"
+                << std::endl;
+      std::cerr << "Exception on processing: " << std::endl
+                << exc.what() << std::endl
+                << "Aborting!" << std::endl
+                << "----------------------------------------------------"
+                << std::endl;
+    }
+  catch (...)
+    {
+      std::cerr << std::endl << std::endl
+                << "----------------------------------------------------"
+                << std::endl;
+      std::cerr << "Unknown exception!" << std::endl
+                << "Aborting!" << std::endl
+                << "----------------------------------------------------"
+                << std::endl;
+      throw;
+    }
+
+  return 0;
+}
diff --git a/tests/multigrid/step-50_02.with_mpi=true.with_trilinos=true.mpirun=3.output b/tests/multigrid/step-50_02.with_mpi=true.with_trilinos=true.mpirun=3.output
new file mode 100644 (file)
index 0000000..02d1ad5
--- /dev/null
@@ -0,0 +1,13 @@
+
+DEAL::Cycle 0:
+DEAL::   Number of active cells:       1024
+DEAL::   Number of degrees of freedom: 1089 (by level: 4, 9, 25, 81, 289, 1089)
+DEAL:cg::Starting value 29.7769
+DEAL:cg::Convergence step 7 value 8.10686e-08
+DEAL::   CG converged in 7 iterations.
+DEAL::Cycle 1:
+DEAL::   Number of active cells:       1891
+DEAL::   Number of degrees of freedom: 1990 (by level: 4, 9, 25, 81, 289, 1089, 1225)
+DEAL:cg::Starting value 41.0662
+DEAL:cg::Convergence step 7 value 2.21064e-07
+DEAL::   CG converged in 7 iterations.
diff --git a/tests/sharedtria/tria_mg_dof_01.cc b/tests/sharedtria/tria_mg_dof_01.cc
new file mode 100644 (file)
index 0000000..948091b
--- /dev/null
@@ -0,0 +1,148 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// distibute mg dofs on a shared and distributed mesh and compare
+// Note: this doesnt pass for all meshes since, for some complicated refinement
+//       schemes, cells may be traversed in different order in a distributed
+//       triangulation as opposed to a shared triangulation.
+
+#include "../tests.h"
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/dofs/dof_accessor.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+
+template <int dim>
+void compare_meshes (DoFHandler<dim> &shared_dof_handler, DoFHandler<dim> &distributed_dof_handler)
+{
+  FE_Q<dim> fe (2);
+
+  shared_dof_handler.distribute_dofs(fe);
+  shared_dof_handler.distribute_mg_dofs();
+
+  distributed_dof_handler.distribute_dofs(fe);
+  distributed_dof_handler.distribute_mg_dofs();
+
+  unsigned int n_levels = distributed_dof_handler.get_triangulation().n_global_levels();
+  for (unsigned int lvl=0; lvl<n_levels; ++lvl)
+    {
+      IndexSet shared_dofs = shared_dof_handler.locally_owned_mg_dofs(lvl);
+      IndexSet distributed_dofs = distributed_dof_handler.locally_owned_mg_dofs(lvl);
+      Assert(shared_dofs==distributed_dofs, ExcInternalError());
+
+      typename DoFHandler<dim>::cell_iterator
+      cell = distributed_dof_handler.begin(lvl),
+      endc = distributed_dof_handler.end(lvl);
+      for (; cell!=endc; ++cell)
+        {
+          if (cell->level_subdomain_id()==numbers::artificial_subdomain_id)
+            continue;
+
+          typename Triangulation<dim>::cell_iterator
+          tria_shared_cell = cell->id().to_cell(shared_dof_handler.get_triangulation());
+          typename DoFHandler<dim>::cell_iterator dof_shared_cell(&shared_dof_handler.get_triangulation(),
+                                                                  tria_shared_cell->level(),
+                                                                  tria_shared_cell->index(),
+                                                                  &shared_dof_handler);
+
+          std::vector<types::global_dof_index> distributed_cell_dofs(fe.dofs_per_cell);
+          std::vector<types::global_dof_index> shared_cell_dofs(fe.dofs_per_cell);
+          cell->get_mg_dof_indices(distributed_cell_dofs);
+          dof_shared_cell->get_mg_dof_indices(shared_cell_dofs);
+          for (unsigned int i=0; i<fe.dofs_per_cell; ++i)
+            Assert(distributed_cell_dofs[i]==shared_cell_dofs[i], ExcInternalError());
+        }
+    }
+}
+
+
+
+template <int dim>
+void test()
+{
+  parallel::shared::Triangulation<dim> shared_tria(MPI_COMM_WORLD,typename Triangulation<dim>::MeshSmoothing
+                                                   (Triangulation<dim>::limit_level_difference_at_vertices), true,
+                                                   typename parallel::shared::Triangulation<dim>::Settings
+                                                   (parallel::shared::Triangulation<dim>::partition_zorder |
+                                                    parallel::shared::Triangulation<dim>::construct_multigrid_hierarchy));
+  DoFHandler<dim> shared_dof_handler(shared_tria);
+
+  parallel::distributed::Triangulation<dim> distributed_tria(MPI_COMM_WORLD,Triangulation<dim>::
+                                                             limit_level_difference_at_vertices,
+                                                             parallel::distributed::Triangulation<dim>::construct_multigrid_hierarchy);
+  DoFHandler<dim> distributed_dof_handler(distributed_tria);
+
+  GridGenerator::hyper_cube(shared_tria);
+  shared_tria.refine_global(2);
+  for (typename Triangulation<dim>::active_cell_iterator cell=shared_tria.begin_active(); cell != shared_tria.end(); ++cell)
+    {
+      if (dim==1)
+        if (cell->center()[0] < 0.5)
+          cell->set_refine_flag();
+      if (dim==2)
+        if (cell->center()[0] < 0.5 && cell->center()[1] < 0.5)
+          cell->set_refine_flag();
+      if (dim==3)
+        if (cell->center()[0] < 0.5 && cell->center()[1] && cell->center()[2] < 0.5)
+          cell->set_refine_flag();
+    }
+  shared_tria.execute_coarsening_and_refinement();
+
+  GridGenerator::hyper_cube(distributed_tria);
+  distributed_tria.refine_global(2);
+  for (typename Triangulation<dim>::active_cell_iterator cell=distributed_tria.begin_active(); cell != distributed_tria.end(); ++cell)
+    {
+      if (dim==1)
+        if (cell->is_locally_owned() && cell->center()[0] < 0.5)
+          cell->set_refine_flag();
+      if (dim==2)
+        if (cell->is_locally_owned() && cell->center()[0] < 0.5 && cell->center()[1] < 0.5)
+          cell->set_refine_flag();
+      if (dim==3)
+        if (cell->is_locally_owned() && cell->center()[0] < 0.5 && cell->center()[1] && cell->center()[2] < 0.5)
+          cell->set_refine_flag();
+    }
+  distributed_tria.execute_coarsening_and_refinement();
+
+  compare_meshes(shared_dof_handler,distributed_dof_handler);
+}
+
+int main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+  MPILogInitAll all;
+
+  deallog.push("2d");
+  test<2>();
+  deallog.pop();
+  deallog.push("3d");
+  test<3>();
+  deallog.pop();
+
+  deallog << "OK" << std::endl;
+}
+
diff --git a/tests/sharedtria/tria_mg_dof_01.mpirun=3.with_p4est=true.output b/tests/sharedtria/tria_mg_dof_01.mpirun=3.with_p4est=true.output
new file mode 100644 (file)
index 0000000..3342d14
--- /dev/null
@@ -0,0 +1,8 @@
+
+DEAL:0::OK
+
+DEAL:1::OK
+
+
+DEAL:2::OK
+
diff --git a/tests/sharedtria/tria_mg_dof_02.cc b/tests/sharedtria/tria_mg_dof_02.cc
new file mode 100644 (file)
index 0000000..7978bd5
--- /dev/null
@@ -0,0 +1,121 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// distribute mg dofs for a shared triangulation and
+// check level_ghost_owners is set properly
+
+#include "../tests.h"
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/dofs/dof_accessor.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+
+template <int dim>
+void write_dof_data (DoFHandler<dim> &dof_handler)
+{
+  FE_Q<dim> fe (2);
+
+  dof_handler.distribute_dofs(fe);
+  dof_handler.distribute_mg_dofs();
+
+  unsigned int n_levels = dof_handler.get_triangulation().n_global_levels();
+  for (unsigned int lvl=0; lvl<n_levels; ++lvl)
+    {
+      std::vector<IndexSet> dof_index_per_proc = dof_handler.locally_owned_mg_dofs_per_processor(lvl);
+      for (unsigned int i=0; i<dof_index_per_proc.size(); ++i)
+        dof_index_per_proc[i].print(deallog);
+
+
+      typename DoFHandler<dim>::cell_iterator
+      cell = dof_handler.begin(lvl),
+      endc = dof_handler.end(lvl);
+      for (; cell!=endc; ++cell)
+        {
+          if (cell->level_subdomain_id()==numbers::artificial_subdomain_id)
+            continue;
+
+          std::vector<types::global_dof_index> local_mg_dof_indices(fe.dofs_per_cell);
+          cell->get_mg_dof_indices(local_mg_dof_indices);
+          deallog << "proc " << Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) << ", "
+                  << "cell " << cell->id() << ", mg_dof_indices: ";
+          for (unsigned int i=0; i<fe.dofs_per_cell; ++i)
+            deallog << local_mg_dof_indices[i] << " ";
+          deallog << std::endl;
+
+        }
+    }
+}
+
+
+
+template <int dim>
+void test()
+{
+  parallel::shared::Triangulation<dim> tria(MPI_COMM_WORLD,typename Triangulation<dim>::MeshSmoothing
+                                            (Triangulation<dim>::limit_level_difference_at_vertices), true,
+                                            typename parallel::shared::Triangulation<dim>::Settings
+                                            (parallel::shared::Triangulation<dim>::partition_zorder |
+                                             parallel::shared::Triangulation<dim>::construct_multigrid_hierarchy));
+
+  DoFHandler<dim> dof_handler(tria);
+
+  GridGenerator::hyper_cube(tria,-1,1);
+
+  tria.refine_global(1);
+
+  for (typename Triangulation<dim>::active_cell_iterator cell=tria.begin_active(); cell != tria.end(); ++cell)
+    {
+      if (dim==2)
+        if (cell->center()[0] < 0)
+          cell->set_refine_flag();
+      if (dim==3)
+        if (cell->center()[0] < 0 && cell->center()[1] < 0)
+          cell->set_refine_flag();
+    }
+  tria.execute_coarsening_and_refinement();
+
+  deallog << "proc " << Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) << ", level_ghost_owners: ";
+  for (std::set<types::subdomain_id>::iterator it=tria.level_ghost_owners().begin(); it!=tria.level_ghost_owners().end(); ++it)
+    deallog << *it << " ";
+  deallog << std::endl;
+
+  write_dof_data(dof_handler);
+}
+
+int main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+  MPILogInitAll all;
+
+  deallog.push("2d");
+  test<2>();
+  deallog.pop();
+  deallog.push("3d");
+  test<3>();
+  deallog.pop();
+}
+
diff --git a/tests/sharedtria/tria_mg_dof_02.mpirun=3.output b/tests/sharedtria/tria_mg_dof_02.mpirun=3.output
new file mode 100644 (file)
index 0000000..f6720e9
--- /dev/null
@@ -0,0 +1,155 @@
+
+DEAL:0:2d::proc 0, level_ghost_owners: 1 2 
+DEAL:0:2d::{[0,8]}
+DEAL:0:2d::{}
+DEAL:0:2d::{}
+DEAL:0:2d::proc 0, cell 0_0:, mg_dof_indices: 0 1 2 3 4 5 6 7 8 
+DEAL:0:2d::{[0,8]}
+DEAL:0:2d::{[9,14]}
+DEAL:0:2d::{[15,24]}
+DEAL:0:2d::proc 0, cell 0_1:0, mg_dof_indices: 0 1 2 3 4 5 6 7 8 
+DEAL:0:2d::proc 0, cell 0_1:1, mg_dof_indices: 1 9 3 10 5 11 12 13 14 
+DEAL:0:2d::proc 0, cell 0_1:2, mg_dof_indices: 2 3 15 16 17 18 7 19 20 
+DEAL:0:2d::proc 0, cell 0_1:3, mg_dof_indices: 3 10 16 21 18 22 13 23 24 
+DEAL:0:2d::{[0,24]}
+DEAL:0:2d::{}
+DEAL:0:2d::{[25,44]}
+DEAL:0:2d::proc 0, cell 0_2:00, mg_dof_indices: 0 1 2 3 4 5 6 7 8 
+DEAL:0:2d::proc 0, cell 0_2:01, mg_dof_indices: 1 9 3 10 5 11 12 13 14 
+DEAL:0:2d::proc 0, cell 0_2:02, mg_dof_indices: 2 3 15 16 17 18 7 19 20 
+DEAL:0:2d::proc 0, cell 0_2:03, mg_dof_indices: 3 10 16 21 18 22 13 23 24 
+DEAL:0:2d::proc 0, cell 0_2:20, mg_dof_indices: 15 16 25 26 27 28 19 29 30 
+DEAL:0:2d::proc 0, cell 0_2:21, mg_dof_indices: 16 21 26 31 28 32 23 33 34 
+DEAL:0:3d::proc 0, level_ghost_owners: 1 2 
+DEAL:0:3d::{[0,26]}
+DEAL:0:3d::{}
+DEAL:0:3d::{}
+DEAL:0:3d::proc 0, cell 0_0:, mg_dof_indices: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 
+DEAL:0:3d::{[0,26]}
+DEAL:0:3d::{[27,74]}
+DEAL:0:3d::{[75,124]}
+DEAL:0:3d::proc 0, cell 0_1:0, mg_dof_indices: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 
+DEAL:0:3d::proc 0, cell 0_1:1, mg_dof_indices: 1 27 3 28 5 29 7 30 9 31 32 33 13 34 35 36 17 37 19 38 21 39 40 41 42 43 44 
+DEAL:0:3d::proc 0, cell 0_1:2, mg_dof_indices: 2 3 45 46 6 7 47 48 49 50 11 51 52 53 15 54 18 19 55 56 57 58 23 59 60 61 62 
+DEAL:0:3d::proc 0, cell 0_1:3, mg_dof_indices: 3 28 46 63 7 30 48 64 50 65 33 66 53 67 36 68 19 38 56 69 58 70 41 71 72 73 74 
+DEAL:0:3d::proc 0, cell 0_1:4, mg_dof_indices: 4 5 6 7 75 76 77 78 12 13 14 15 79 80 81 82 83 84 85 86 87 88 89 90 25 91 92 
+DEAL:0:3d::proc 0, cell 0_1:5, mg_dof_indices: 5 29 7 30 76 93 78 94 13 34 35 36 80 95 96 97 84 98 86 99 88 100 101 102 43 103 104 
+DEAL:0:3d::proc 0, cell 0_1:6, mg_dof_indices: 6 7 47 48 77 78 105 106 52 53 15 54 107 108 82 109 85 86 110 111 112 113 90 114 61 115 116 
+DEAL:0:3d::proc 0, cell 0_1:7, mg_dof_indices: 7 30 48 64 78 94 106 117 53 67 36 68 108 118 97 119 86 99 111 120 113 121 102 122 73 123 124 
+DEAL:0:3d::{[0,124]}
+DEAL:0:3d::{}
+DEAL:0:3d::{[125,224]}
+DEAL:0:3d::proc 0, cell 0_2:00, mg_dof_indices: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 
+DEAL:0:3d::proc 0, cell 0_2:01, mg_dof_indices: 1 27 3 28 5 29 7 30 9 31 32 33 13 34 35 36 17 37 19 38 21 39 40 41 42 43 44 
+DEAL:0:3d::proc 0, cell 0_2:02, mg_dof_indices: 2 3 45 46 6 7 47 48 49 50 11 51 52 53 15 54 18 19 55 56 57 58 23 59 60 61 62 
+DEAL:0:3d::proc 0, cell 0_2:03, mg_dof_indices: 3 28 46 63 7 30 48 64 50 65 33 66 53 67 36 68 19 38 56 69 58 70 41 71 72 73 74 
+DEAL:0:3d::proc 0, cell 0_2:04, mg_dof_indices: 4 5 6 7 75 76 77 78 12 13 14 15 79 80 81 82 83 84 85 86 87 88 89 90 25 91 92 
+DEAL:0:3d::proc 0, cell 0_2:05, mg_dof_indices: 5 29 7 30 76 93 78 94 13 34 35 36 80 95 96 97 84 98 86 99 88 100 101 102 43 103 104 
+DEAL:0:3d::proc 0, cell 0_2:06, mg_dof_indices: 6 7 47 48 77 78 105 106 52 53 15 54 107 108 82 109 85 86 110 111 112 113 90 114 61 115 116 
+DEAL:0:3d::proc 0, cell 0_2:07, mg_dof_indices: 7 30 48 64 78 94 106 117 53 67 36 68 108 118 97 119 86 99 111 120 113 121 102 122 73 123 124 
+DEAL:0:3d::proc 0, cell 0_2:40, mg_dof_indices: 75 76 77 78 125 126 127 128 79 80 81 82 129 130 131 132 133 134 135 136 137 138 139 140 91 141 142 
+DEAL:0:3d::proc 0, cell 0_2:41, mg_dof_indices: 76 93 78 94 126 143 128 144 80 95 96 97 130 145 146 147 134 148 136 149 138 150 151 152 103 153 154 
+DEAL:0:3d::proc 0, cell 0_2:42, mg_dof_indices: 77 78 105 106 127 128 155 156 107 108 82 109 157 158 132 159 135 136 160 161 162 163 140 164 115 165 166 
+DEAL:0:3d::proc 0, cell 0_2:43, mg_dof_indices: 78 94 106 117 128 144 156 167 108 118 97 119 158 168 147 169 136 149 161 170 163 171 152 172 123 173 174 
+
+DEAL:1:2d::proc 1, level_ghost_owners: 0 2 
+DEAL:1:2d::{[0,8]}
+DEAL:1:2d::{}
+DEAL:1:2d::{}
+DEAL:1:2d::proc 1, cell 0_0:, mg_dof_indices: 0 1 2 3 4 5 6 7 8 
+DEAL:1:2d::{[0,8]}
+DEAL:1:2d::{[9,14]}
+DEAL:1:2d::{[15,24]}
+DEAL:1:2d::proc 1, cell 0_1:0, mg_dof_indices: 0 1 2 3 4 5 6 7 8 
+DEAL:1:2d::proc 1, cell 0_1:1, mg_dof_indices: 1 9 3 10 5 11 12 13 14 
+DEAL:1:2d::proc 1, cell 0_1:2, mg_dof_indices: 2 3 15 16 17 18 7 19 20 
+DEAL:1:2d::proc 1, cell 0_1:3, mg_dof_indices: 3 10 16 21 18 22 13 23 24 
+DEAL:1:2d::{[0,24]}
+DEAL:1:2d::{}
+DEAL:1:2d::{[25,44]}
+DEAL:1:2d::proc 1, cell 0_2:01, mg_dof_indices: 1 9 3 10 5 11 12 13 14 
+DEAL:1:2d::proc 1, cell 0_2:03, mg_dof_indices: 3 10 16 21 18 22 13 23 24 
+DEAL:1:2d::proc 1, cell 0_2:21, mg_dof_indices: 16 21 26 31 28 32 23 33 34 
+DEAL:1:3d::proc 1, level_ghost_owners: 0 2 
+DEAL:1:3d::{[0,26]}
+DEAL:1:3d::{}
+DEAL:1:3d::{}
+DEAL:1:3d::proc 1, cell 0_0:, mg_dof_indices: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 
+DEAL:1:3d::{[0,26]}
+DEAL:1:3d::{[27,74]}
+DEAL:1:3d::{[75,124]}
+DEAL:1:3d::proc 1, cell 0_1:0, mg_dof_indices: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 
+DEAL:1:3d::proc 1, cell 0_1:1, mg_dof_indices: 1 27 3 28 5 29 7 30 9 31 32 33 13 34 35 36 17 37 19 38 21 39 40 41 42 43 44 
+DEAL:1:3d::proc 1, cell 0_1:2, mg_dof_indices: 2 3 45 46 6 7 47 48 49 50 11 51 52 53 15 54 18 19 55 56 57 58 23 59 60 61 62 
+DEAL:1:3d::proc 1, cell 0_1:3, mg_dof_indices: 3 28 46 63 7 30 48 64 50 65 33 66 53 67 36 68 19 38 56 69 58 70 41 71 72 73 74 
+DEAL:1:3d::proc 1, cell 0_1:4, mg_dof_indices: 4 5 6 7 75 76 77 78 12 13 14 15 79 80 81 82 83 84 85 86 87 88 89 90 25 91 92 
+DEAL:1:3d::proc 1, cell 0_1:5, mg_dof_indices: 5 29 7 30 76 93 78 94 13 34 35 36 80 95 96 97 84 98 86 99 88 100 101 102 43 103 104 
+DEAL:1:3d::proc 1, cell 0_1:6, mg_dof_indices: 6 7 47 48 77 78 105 106 52 53 15 54 107 108 82 109 85 86 110 111 112 113 90 114 61 115 116 
+DEAL:1:3d::proc 1, cell 0_1:7, mg_dof_indices: 7 30 48 64 78 94 106 117 53 67 36 68 108 118 97 119 86 99 111 120 113 121 102 122 73 123 124 
+DEAL:1:3d::{[0,124]}
+DEAL:1:3d::{}
+DEAL:1:3d::{[125,224]}
+DEAL:1:3d::proc 1, cell 0_2:01, mg_dof_indices: 1 27 3 28 5 29 7 30 9 31 32 33 13 34 35 36 17 37 19 38 21 39 40 41 42 43 44 
+DEAL:1:3d::proc 1, cell 0_2:02, mg_dof_indices: 2 3 45 46 6 7 47 48 49 50 11 51 52 53 15 54 18 19 55 56 57 58 23 59 60 61 62 
+DEAL:1:3d::proc 1, cell 0_2:03, mg_dof_indices: 3 28 46 63 7 30 48 64 50 65 33 66 53 67 36 68 19 38 56 69 58 70 41 71 72 73 74 
+DEAL:1:3d::proc 1, cell 0_2:05, mg_dof_indices: 5 29 7 30 76 93 78 94 13 34 35 36 80 95 96 97 84 98 86 99 88 100 101 102 43 103 104 
+DEAL:1:3d::proc 1, cell 0_2:06, mg_dof_indices: 6 7 47 48 77 78 105 106 52 53 15 54 107 108 82 109 85 86 110 111 112 113 90 114 61 115 116 
+DEAL:1:3d::proc 1, cell 0_2:07, mg_dof_indices: 7 30 48 64 78 94 106 117 53 67 36 68 108 118 97 119 86 99 111 120 113 121 102 122 73 123 124 
+DEAL:1:3d::proc 1, cell 0_2:41, mg_dof_indices: 76 93 78 94 126 143 128 144 80 95 96 97 130 145 146 147 134 148 136 149 138 150 151 152 103 153 154 
+DEAL:1:3d::proc 1, cell 0_2:42, mg_dof_indices: 77 78 105 106 127 128 155 156 107 108 82 109 157 158 132 159 135 136 160 161 162 163 140 164 115 165 166 
+DEAL:1:3d::proc 1, cell 0_2:43, mg_dof_indices: 78 94 106 117 128 144 156 167 108 118 97 119 158 168 147 169 136 149 161 170 163 171 152 172 123 173 174 
+
+
+DEAL:2:2d::proc 2, level_ghost_owners: 0 1 
+DEAL:2:2d::{[0,8]}
+DEAL:2:2d::{}
+DEAL:2:2d::{}
+DEAL:2:2d::proc 2, cell 0_0:, mg_dof_indices: 0 1 2 3 4 5 6 7 8 
+DEAL:2:2d::{[0,8]}
+DEAL:2:2d::{[9,14]}
+DEAL:2:2d::{[15,24]}
+DEAL:2:2d::proc 2, cell 0_1:0, mg_dof_indices: 0 1 2 3 4 5 6 7 8 
+DEAL:2:2d::proc 2, cell 0_1:1, mg_dof_indices: 1 9 3 10 5 11 12 13 14 
+DEAL:2:2d::proc 2, cell 0_1:2, mg_dof_indices: 2 3 15 16 17 18 7 19 20 
+DEAL:2:2d::proc 2, cell 0_1:3, mg_dof_indices: 3 10 16 21 18 22 13 23 24 
+DEAL:2:2d::{[0,24]}
+DEAL:2:2d::{}
+DEAL:2:2d::{[25,44]}
+DEAL:2:2d::proc 2, cell 0_2:02, mg_dof_indices: 2 3 15 16 17 18 7 19 20 
+DEAL:2:2d::proc 2, cell 0_2:03, mg_dof_indices: 3 10 16 21 18 22 13 23 24 
+DEAL:2:2d::proc 2, cell 0_2:20, mg_dof_indices: 15 16 25 26 27 28 19 29 30 
+DEAL:2:2d::proc 2, cell 0_2:21, mg_dof_indices: 16 21 26 31 28 32 23 33 34 
+DEAL:2:2d::proc 2, cell 0_2:22, mg_dof_indices: 25 26 35 36 37 38 29 39 40 
+DEAL:2:2d::proc 2, cell 0_2:23, mg_dof_indices: 26 31 36 41 38 42 33 43 44 
+DEAL:2:3d::proc 2, level_ghost_owners: 0 1 
+DEAL:2:3d::{[0,26]}
+DEAL:2:3d::{}
+DEAL:2:3d::{}
+DEAL:2:3d::proc 2, cell 0_0:, mg_dof_indices: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 
+DEAL:2:3d::{[0,26]}
+DEAL:2:3d::{[27,74]}
+DEAL:2:3d::{[75,124]}
+DEAL:2:3d::proc 2, cell 0_1:0, mg_dof_indices: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 
+DEAL:2:3d::proc 2, cell 0_1:1, mg_dof_indices: 1 27 3 28 5 29 7 30 9 31 32 33 13 34 35 36 17 37 19 38 21 39 40 41 42 43 44 
+DEAL:2:3d::proc 2, cell 0_1:2, mg_dof_indices: 2 3 45 46 6 7 47 48 49 50 11 51 52 53 15 54 18 19 55 56 57 58 23 59 60 61 62 
+DEAL:2:3d::proc 2, cell 0_1:3, mg_dof_indices: 3 28 46 63 7 30 48 64 50 65 33 66 53 67 36 68 19 38 56 69 58 70 41 71 72 73 74 
+DEAL:2:3d::proc 2, cell 0_1:4, mg_dof_indices: 4 5 6 7 75 76 77 78 12 13 14 15 79 80 81 82 83 84 85 86 87 88 89 90 25 91 92 
+DEAL:2:3d::proc 2, cell 0_1:5, mg_dof_indices: 5 29 7 30 76 93 78 94 13 34 35 36 80 95 96 97 84 98 86 99 88 100 101 102 43 103 104 
+DEAL:2:3d::proc 2, cell 0_1:6, mg_dof_indices: 6 7 47 48 77 78 105 106 52 53 15 54 107 108 82 109 85 86 110 111 112 113 90 114 61 115 116 
+DEAL:2:3d::proc 2, cell 0_1:7, mg_dof_indices: 7 30 48 64 78 94 106 117 53 67 36 68 108 118 97 119 86 99 111 120 113 121 102 122 73 123 124 
+DEAL:2:3d::{[0,124]}
+DEAL:2:3d::{}
+DEAL:2:3d::{[125,224]}
+DEAL:2:3d::proc 2, cell 0_2:04, mg_dof_indices: 4 5 6 7 75 76 77 78 12 13 14 15 79 80 81 82 83 84 85 86 87 88 89 90 25 91 92 
+DEAL:2:3d::proc 2, cell 0_2:05, mg_dof_indices: 5 29 7 30 76 93 78 94 13 34 35 36 80 95 96 97 84 98 86 99 88 100 101 102 43 103 104 
+DEAL:2:3d::proc 2, cell 0_2:06, mg_dof_indices: 6 7 47 48 77 78 105 106 52 53 15 54 107 108 82 109 85 86 110 111 112 113 90 114 61 115 116 
+DEAL:2:3d::proc 2, cell 0_2:07, mg_dof_indices: 7 30 48 64 78 94 106 117 53 67 36 68 108 118 97 119 86 99 111 120 113 121 102 122 73 123 124 
+DEAL:2:3d::proc 2, cell 0_2:40, mg_dof_indices: 75 76 77 78 125 126 127 128 79 80 81 82 129 130 131 132 133 134 135 136 137 138 139 140 91 141 142 
+DEAL:2:3d::proc 2, cell 0_2:41, mg_dof_indices: 76 93 78 94 126 143 128 144 80 95 96 97 130 145 146 147 134 148 136 149 138 150 151 152 103 153 154 
+DEAL:2:3d::proc 2, cell 0_2:42, mg_dof_indices: 77 78 105 106 127 128 155 156 107 108 82 109 157 158 132 159 135 136 160 161 162 163 140 164 115 165 166 
+DEAL:2:3d::proc 2, cell 0_2:43, mg_dof_indices: 78 94 106 117 128 144 156 167 108 118 97 119 158 168 147 169 136 149 161 170 163 171 152 172 123 173 174 
+DEAL:2:3d::proc 2, cell 0_2:44, mg_dof_indices: 125 126 127 128 175 176 177 178 129 130 131 132 179 180 181 182 183 184 185 186 187 188 189 190 141 191 192 
+DEAL:2:3d::proc 2, cell 0_2:45, mg_dof_indices: 126 143 128 144 176 193 178 194 130 145 146 147 180 195 196 197 184 198 186 199 188 200 201 202 153 203 204 
+DEAL:2:3d::proc 2, cell 0_2:46, mg_dof_indices: 127 128 155 156 177 178 205 206 157 158 132 159 207 208 182 209 185 186 210 211 212 213 190 214 165 215 216 
+DEAL:2:3d::proc 2, cell 0_2:47, mg_dof_indices: 128 144 156 167 178 194 206 217 158 168 147 169 208 218 197 219 186 199 211 220 213 221 202 222 173 223 224 
+

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.