]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Introduce parallel::shared::Triangulation
authorDenis Davydov <davydden@gmail.com>
Wed, 12 Aug 2015 11:47:33 +0000 (13:47 +0200)
committerDenis Davydov <davydden@gmail.com>
Fri, 14 Aug 2015 11:31:01 +0000 (13:31 +0200)
It encapsulates ditribution of cells among processors and in the majority of cases behaves like
distributed triangulation counterpart.

31 files changed:
doc/news/changes.h
include/deal.II/distributed/shared_tria.h [new file with mode: 0644]
include/deal.II/distributed/tria.h
include/deal.II/distributed/tria_base.h [new file with mode: 0644]
include/deal.II/dofs/dof_handler_policy.h
include/deal.II/dofs/dof_tools.h
include/deal.II/grid/tria_accessor.templates.h
source/distributed/CMakeLists.txt
source/distributed/shared_tria.cc [new file with mode: 0644]
source/distributed/shared_tria.inst.in [new file with mode: 0644]
source/distributed/tria.cc
source/distributed/tria_base.cc [new file with mode: 0644]
source/distributed/tria_base.inst.in [new file with mode: 0644]
source/dofs/dof_handler.cc
source/dofs/dof_handler_policy.cc
source/dofs/dof_handler_policy.inst.in
source/dofs/dof_renumbering.cc
source/dofs/dof_tools.cc
source/dofs/dof_tools.inst.in
source/hp/dof_handler.cc
tests/CMakeLists.txt
tests/sharedtria/CMakeLists.txt [new file with mode: 0644]
tests/sharedtria/dof_01.cc [new file with mode: 0644]
tests/sharedtria/dof_01.with_metis=true.mpirun=3.output [new file with mode: 0644]
tests/sharedtria/dof_01.with_metis=true.output [new file with mode: 0644]
tests/sharedtria/dof_02.cc [new file with mode: 0644]
tests/sharedtria/dof_02.with_metis=true.mpirun=3.output [new file with mode: 0644]
tests/sharedtria/dof_02.with_metis=true.output [new file with mode: 0644]
tests/sharedtria/tria_01.cc [new file with mode: 0644]
tests/sharedtria/tria_01.with_metis=true.mpirun=3.output [new file with mode: 0644]
tests/sharedtria/tria_01.with_metis=true.output [new file with mode: 0644]

index bd6c4f94fce3eb30e373cb6e004f0dbd1ac8c478..ff44c44573687301b594e7fd21687ad0c12a497f 100644 (file)
@@ -66,6 +66,15 @@ inconvenience this causes.
 
 
 <ol>
+  <li> New: parallel::shared::Triangulation class which extends
+  Triangulation class to automatically partition triangulation when run
+  with MPI. Identical functionality between parallel::shared::Triangulation and
+  parallel::distributed::Triangulation is grouped in the parent class
+  parallel::Triangulation.
+  <br>
+  (Denis Davydov, 2015/08/14)
+  </li>
+
   <li> New: The online documentation of all functions now includes
   links to the file and line where that function is implemented. Both
   are clickable to provide immediate access to the source code of a
diff --git a/include/deal.II/distributed/shared_tria.h b/include/deal.II/distributed/shared_tria.h
new file mode 100644 (file)
index 0000000..9d8a676
--- /dev/null
@@ -0,0 +1,150 @@
+// ---------------------------------------------------------------------
+// $Id: tria.h 32739 2014-04-08 16:39:47Z denis.davydov $
+//
+// Copyright (C) 2008 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#ifndef __deal2__distributed__shared_tria_h
+#define __deal2__distributed__shared_tria_h
+
+
+#include <deal.II/base/config.h>
+#include <deal.II/base/subscriptor.h>
+#include <deal.II/base/smartpointer.h>
+#include <deal.II/base/template_constraints.h>
+#include <deal.II/grid/tria.h>
+
+#include <deal.II/distributed/tria_base.h>
+
+#include <deal.II/base/std_cxx1x/function.h>
+#include <deal.II/base/std_cxx1x/tuple.h>
+
+#include <set>
+#include <vector>
+#include <list>
+#include <utility>
+
+#ifdef DEAL_II_WITH_MPI
+#  include <mpi.h>
+#endif
+
+
+DEAL_II_NAMESPACE_OPEN
+
+template <int, int> class Triangulation;
+
+
+namespace parallel
+{
+
+#ifdef DEAL_II_WITH_MPI
+
+
+  namespace shared
+  {
+
+    /**
+     * This is an extension of dealii::Triangulation class to automatically
+     * partition triangulation when run with MPI.
+     * Different from the parallel::distributed::Triangulation, the entire mesh
+     * is stored on each processor. However, cells are labeled according to
+     * the id of the processor which "owns" them. The partitioning is done
+     * automatically inside the DoFHandler by calling Metis.
+     * This enables distributing DoFs among processors and therefore splitting
+     * matrices and vectors across processors.
+     * The usage of this class is demonstrated in Step-18.
+     *
+     * @author Denis Davydov, 2015
+     * @ingroup distributed
+     *
+     */
+    template <int dim, int spacedim = dim>
+    class Triangulation : public dealii::parallel::Triangulation<dim,spacedim>
+    {
+    public:
+      typedef typename dealii::Triangulation<dim,spacedim>::active_cell_iterator active_cell_iterator;
+      typedef typename dealii::Triangulation<dim,spacedim>::cell_iterator        cell_iterator;
+
+      /**
+       * Constructor.
+       */
+      Triangulation (MPI_Comm mpi_communicator,
+                     const typename dealii::Triangulation<dim,spacedim>::MeshSmoothing =
+                       (dealii::Triangulation<dim,spacedim>::none) );
+
+      /**
+       * Destructor.
+       */
+      virtual ~Triangulation ();
+
+      /**
+       * Coarsen and refine the mesh according to refinement and
+       * coarsening flags set.
+       *
+       * This step is equivalent to the dealii::Triangulation class
+       * with an addition of calling dealii::GridTools::partition_triangulation() at the end.
+       */
+      virtual void execute_coarsening_and_refinement ();
+
+      /**
+        * Create a triangulation.
+        *
+        * This function also partitions triangulation based on the
+        * MPI communicator provided to constructor.
+        */
+      virtual void create_triangulation (const std::vector< Point< spacedim > > &vertices,
+                                         const std::vector< CellData< dim > > &cells,
+                                         const SubCellData &subcelldata);
+
+    };
+  }
+#else
+
+  namespace shared
+  {
+
+    /**
+     * Dummy class the compiler chooses for parallel shared
+     * triangulations if we didn't actually configure deal.II with the
+     * MPI library. The existence of this class allows us to refer
+     * to parallel::shared::Triangulation objects throughout the
+     * library even if it is disabled.
+     *
+     * Since the constructor of this class is private, no such objects
+     * can actually be created if we don't have p4est available.
+     */
+    template <int dim, int spacedim = dim>
+    class Triangulation : public dealii::parallel::Triangulation<dim,spacedim>
+    {
+    private:
+      /**
+       * Constructor.
+       */
+      Triangulation ();
+    public:
+
+      /**
+       * Destructor.
+       */
+      virtual ~Triangulation ();
+
+    };
+  }
+
+
+#endif
+}
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
index 2edf49acffedf8bf861cdbd81edfb3ecd6a8860f..7b24492e16bccee0d5e9745ea0e3e905617f7c6c 100644 (file)
@@ -26,6 +26,8 @@
 #include <deal.II/base/std_cxx11/function.h>
 #include <deal.II/base/std_cxx11/tuple.h>
 
+#include <deal.II/distributed/tria_base.h>
+
 #include <set>
 #include <vector>
 #include <list>
@@ -324,7 +326,7 @@ namespace parallel
      * @ingroup distributed
      */
     template <int dim, int spacedim = dim>
-    class Triangulation : public dealii::Triangulation<dim,spacedim>
+    class Triangulation : public dealii::parallel::Triangulation<dim,spacedim>
     {
     public:
       /**
@@ -618,49 +620,6 @@ namespace parallel
       void
       communicate_locally_moved_vertices (const std::vector<bool> &vertex_locally_moved);
 
-      /**
-       * Return the subdomain id of those cells that are owned by the current
-       * processor. All cells in the triangulation that do not have this
-       * subdomain id are either owned by another processor or have children
-       * that only exist on other processors.
-       */
-      types::subdomain_id locally_owned_subdomain () const;
-
-      /**
-       * Return the number of active cells in the triangulation that are
-       * locally owned, i.e. that have a subdomain_id equal to
-       * locally_owned_subdomain(). Note that there may be more active cells
-       * in the triangulation stored on the present processor, such as for
-       * example ghost cells, or cells further away from the locally owned
-       * block of cells but that are needed to ensure that the triangulation
-       * that stores this processor's set of active cells still remains
-       * balanced with respect to the 2:1 size ratio of adjacent cells.
-       *
-       * As a consequence of the remark above, the result of this function is
-       * always smaller or equal to the result of the function with the same
-       * name in the ::Triangulation base class, which includes the active
-       * ghost and artificial cells (see also
-       * @ref GlossArtificialCell
-       * and
-       * @ref GlossGhostCell).
-       */
-      unsigned int n_locally_owned_active_cells () const;
-
-      /**
-       * Return the sum over all processors of the number of active cells
-       * owned by each processor. This equals the overall number of active
-       * cells in the distributed triangulation.
-       */
-      virtual types::global_dof_index n_global_active_cells () const;
-
-      /**
-       * Returns the global maximum level. This may be bigger than the number
-       * dealii::Triangulation::n_levels() (a function in this class's base
-       * class) returns if the current processor only stores cells in parts of
-       * the domain that are not very refined, but if other processors store
-       * cells in more deeply refined parts of the domain.
-       */
-      virtual unsigned int n_global_levels () const;
 
       /**
        * Returns true if the triangulation has hanging nodes.
@@ -680,20 +639,6 @@ namespace parallel
       virtual
       bool has_hanging_nodes() const;
 
-      /**
-       * Return the number of active cells owned by each of the MPI processes
-       * that contribute to this triangulation. The element of this vector
-       * indexed by locally_owned_subdomain() equals the result of
-       * n_locally_owned_active_cells().
-       */
-      const std::vector<unsigned int> &
-      n_locally_owned_active_cells_per_processor () const;
-
-      /**
-       * Return the MPI communicator used by this triangulation.
-       */
-      MPI_Comm get_communicator () const;
-
       /**
        * Return the local memory consumption in bytes.
        */
@@ -876,43 +821,17 @@ namespace parallel
 
 
     private:
-      /**
-       * MPI communicator to be used for the triangulation. We create a unique
-       * communicator for this class, which is a duplicate of the one passed
-       * to the constructor.
-       */
-      MPI_Comm mpi_communicator;
 
       /**
        * store the Settings.
        */
       Settings settings;
 
-      /**
-       * The subdomain id to be used for the current processor.
-       */
-      types::subdomain_id my_subdomain;
-
       /**
        * A flag that indicates whether the triangulation has actual content.
        */
       bool triangulation_has_content;
 
-      /**
-       * A structure that contains some numbers about the distributed
-       * triangulation.
-       */
-      struct NumberCache
-      {
-        std::vector<unsigned int> n_locally_owned_active_cells;
-        types::global_dof_index   n_global_active_cells;
-        unsigned int              n_global_levels;
-
-        NumberCache();
-      };
-
-      NumberCache number_cache;
-
       /**
        * A data structure that holds the connectivity between trees. Since
        * each tree is rooted in a coarse grid cell, this data structure holds
@@ -1019,12 +938,6 @@ namespace parallel
        */
       void copy_local_forest_to_triangulation ();
 
-
-      /**
-       * Update the number_cache variable after mesh creation or refinement.
-       */
-      void update_number_cache ();
-
       /**
        * Internal function notifying all registered classes to attach their
        * data before repartitioning occurs. Called from
@@ -1052,7 +965,7 @@ namespace parallel
      * all this class does is throw an exception.
      */
     template <int spacedim>
-    class Triangulation<1,spacedim> : public dealii::Triangulation<1,spacedim>
+    class Triangulation<1,spacedim> : public dealii::parallel::Triangulation<1,spacedim>
     {
     public:
       /**
@@ -1066,19 +979,6 @@ namespace parallel
        */
       virtual ~Triangulation ();
 
-      /**
-       * Return the MPI communicator used by this triangulation.
-       */
-      MPI_Comm get_communicator () const;
-
-      /**
-       * Return the sum over all processors of the number of active cells
-       * owned by each processor. This equals the overall number of active
-       * cells in the distributed triangulation.
-       */
-      types::global_dof_index n_global_active_cells () const;
-      virtual unsigned int n_global_levels () const;
-
       /**
        * Returns a permutation vector for the order the coarse cells are
        * handed of to p4est. For example the first element i in this vector
@@ -1124,14 +1024,6 @@ namespace parallel
       void
       communicate_locally_moved_vertices (const std::vector<bool> &vertex_locally_moved);
 
-      /**
-       * Return the subdomain id of those cells that are owned by the current
-       * processor. All cells in the triangulation that do not have this
-       * subdomain id are either owned by another processor or have children
-       * that only exist on other processors.
-       */
-      types::subdomain_id locally_owned_subdomain () const;
-
       /**
        * Dummy arrays. This class isn't usable but the compiler wants to see
        * these variables at a couple places anyway.
@@ -1210,12 +1102,6 @@ namespace parallel
        */
       types::subdomain_id locally_owned_subdomain () const;
 
-      /**
-       * Return the MPI communicator used by this triangulation.
-       */
-#ifdef DEAL_II_WITH_MPI
-      MPI_Comm get_communicator () const;
-#endif
     };
   }
 }
diff --git a/include/deal.II/distributed/tria_base.h b/include/deal.II/distributed/tria_base.h
new file mode 100644 (file)
index 0000000..b5f1ade
--- /dev/null
@@ -0,0 +1,192 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#ifndef __deal2__distributed__tria_base_h
+#define __deal2__distributed__tria_base_h
+
+
+#include <deal.II/base/config.h>
+#include <deal.II/base/subscriptor.h>
+#include <deal.II/base/smartpointer.h>
+#include <deal.II/base/template_constraints.h>
+#include <deal.II/grid/tria.h>
+
+#include <deal.II/base/std_cxx1x/function.h>
+#include <deal.II/base/std_cxx1x/tuple.h>
+
+#include <set>
+#include <vector>
+#include <list>
+#include <utility>
+
+#ifdef DEAL_II_WITH_MPI
+#  include <mpi.h>
+#endif
+
+
+DEAL_II_NAMESPACE_OPEN
+
+template <int, int> class Triangulation;
+
+
+namespace parallel
+{
+  /**
+   * This class describes the interface for all triangulation classes that
+   * work in parallel, namely parallel::distributed::Triangulation
+   * and parallel::shared::Triangulation.
+   */
+  template <int dim, int spacedim = dim>
+  class Triangulation : public dealii::Triangulation<dim,spacedim>
+  {
+  public:
+
+    /**
+     * Constructor.
+     */
+#ifdef DEAL_II_WITH_MPI
+    Triangulation (MPI_Comm mpi_communicator,
+                   const typename dealii::Triangulation<dim,spacedim>::MeshSmoothing smooth_grid = (dealii::Triangulation<dim,spacedim>::none),
+                   const bool check_for_distorted_cells = false);
+#else
+    Triangulation ();
+#endif
+
+    /**
+     * Destructor.
+     */
+    virtual ~Triangulation ();
+
+#ifdef DEAL_II_WITH_MPI
+    /**
+     * Return MPI communicator used by this triangulation.
+     */
+    virtual MPI_Comm get_communicator () const;
+#endif
+
+    /**
+     * Implementation of the same function as in the base class.
+     */
+    virtual void copy_triangulation (const dealii::Triangulation<dim, spacedim> &old_tria);
+
+    /**
+     * Return the number of active cells owned by each of the MPI processes
+     * that contribute to this triangulation. The element of this vector
+     * indexed by locally_owned_subdomain() equals the result of
+     * n_locally_owned_active_cells().
+     */
+    const std::vector<unsigned int> &
+    n_locally_owned_active_cells_per_processor () const;
+
+
+    /**
+     * Return the number of active cells in the triangulation that are
+     * locally owned, i.e. that have a subdomain_id equal to
+     * locally_owned_subdomain(). Note that there may be more active cells
+     * in the triangulation stored on the present processor, such as for
+     * example ghost cells, or cells further away from the locally owned
+     * block of cells but that are needed to ensure that the triangulation
+     * that stores this processor's set of active cells still remains
+     * balanced with respect to the 2:1 size ratio of adjacent cells.
+     *
+     * As a consequence of the remark above, the result of this function is
+     * always smaller or equal to the result of the function with the same
+     * name in the ::Triangulation base class, which includes the active
+     * ghost and artificial cells (see also
+     * @ref GlossArtificialCell
+     * and
+     * @ref GlossGhostCell).
+     */
+    unsigned int n_locally_owned_active_cells () const;
+
+    /**
+     * Return the sum over all processors of the number of active cells
+     * owned by each processor. This equals the overall number of active
+     * cells in the triangulation.
+     */
+    virtual types::global_dof_index n_global_active_cells () const;
+
+    /**
+     * Return the local memory consumption in bytes.
+     */
+    virtual std::size_t memory_consumption () const;
+
+
+    /**
+     * Returns the global maximum level. This may be bigger than the number
+     * dealii::Triangulation::n_levels() (a function in this class's base
+     * class) returns if the current processor only stores cells in parts of
+     * the domain that are not very refined, but if other processors store
+     * cells in more deeply refined parts of the domain.
+     */
+    virtual unsigned int n_global_levels () const;
+
+    /**
+     * Return the subdomain id of those cells that are owned by the current
+     * processor. All cells in the triangulation that do not have this
+     * subdomain id are either owned by another processor or have children
+     * that only exist on other processors.
+     */
+    types::subdomain_id locally_owned_subdomain () const;
+
+
+  protected:
+#ifdef DEAL_II_WITH_MPI
+    /**
+     * MPI communicator to be used for the triangulation. We create a unique
+     * communicator for this class, which is a duplicate of the one passed
+     * to the constructor.
+     */
+    MPI_Comm mpi_communicator;
+#endif
+
+    /**
+     * The subdomain id to be used for the current processor.
+     */
+    types::subdomain_id my_subdomain;
+
+    /**
+     * total number of subdomains.
+     */
+    types::subdomain_id n_subdomains;
+
+    /**
+     * A structure that contains some numbers about the distributed
+     * triangulation.
+     */
+    struct NumberCache
+    {
+      std::vector<unsigned int> n_locally_owned_active_cells;
+      types::global_dof_index   n_global_active_cells;
+      unsigned int              n_global_levels;
+
+      NumberCache();
+    };
+
+    NumberCache number_cache;
+
+    /**
+     * Update the number_cache variable after mesh creation or refinement.
+     */
+    void update_number_cache ();
+
+
+  };
+
+} // namespace parallel
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
index e44663844b801492485ac8c59a87cc6f9b227413..51a96465e1d44c0c20a0a911cb1f91f57737e236 100644 (file)
@@ -21,6 +21,8 @@
 #include <deal.II/base/config.h>
 #include <deal.II/base/exceptions.h>
 #include <deal.II/base/template_constraints.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/dofs/dof_renumbering.h>
 
 #include <vector>
 #include <map>
@@ -61,11 +63,19 @@ namespace internal
         virtual ~PolicyBase ();
 
         /**
-         * Distribute degrees of freedom on the object given as last argument.
+         * Distribute degrees of freedom on
+         * the object given as first argument.
+         * The reference to the NumberCache of the
+         * DoFHandler object has to be passed in a
+         * second argument. It could then be modified to
+         * make DoFHandler related functions work properly
+         * when called within the policies classes.
+         * The updated NumberCache is written to that argument.
          */
         virtual
-        NumberCache
-        distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const = 0;
+        void
+        distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler,
+                         NumberCache &number_cache) const = 0;
 
         /**
          * Distribute the multigrid dofs on each level
@@ -76,12 +86,20 @@ namespace internal
                             std::vector<NumberCache> &number_caches) const = 0;
 
         /**
-         * Renumber degrees of freedom as specified by the first argument.
+         * Renumber degrees of freedom as
+         * specified by the first argument.
+         * The reference to the NumberCache of the
+         * DoFHandler object has to be passed in a
+         * second argument. It could then be modified to
+         * make DoFHandler related functions work properly
+         * when called within the policies classes.
+         * The updated NumberCache is written to that argument.
          */
         virtual
-        NumberCache
+        void
         renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
-                       dealii::DoFHandler<dim,spacedim> &dof_handler) const = 0;
+                       dealii::DoFHandler<dim,spacedim> &dof_handler,
+                       NumberCache &number_cache) const = 0;
       };
 
 
@@ -97,8 +115,9 @@ namespace internal
          * Distribute degrees of freedom on the object given as last argument.
          */
         virtual
-        NumberCache
-        distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+        void
+        distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler,
+                         NumberCache &number_cache) const;
 
         /**
          * Distribute multigrid DoFs.
@@ -112,9 +131,62 @@ namespace internal
          * Renumber degrees of freedom as specified by the first argument.
          */
         virtual
-        NumberCache
+        void
         renumber_dofs (const std::vector<types::global_dof_index>  &new_numbers,
-                       dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+                       dealii::DoFHandler<dim,spacedim> &dof_handler,
+                       NumberCache &number_cache) const;
+      };
+
+      /**
+        * This class implements the
+        * policy for operations when
+        * we use a
+        * parallel::shared::Triangulation
+        * object.
+        */
+      template <int dim, int spacedim>
+      class ParallelShared : public Sequential<dim,spacedim>
+      {
+      public:
+
+        /**
+          * Distribute degrees of freedom on
+          * the object given as first argument.
+          *
+          * On distribution, DoFs are renumbered subdomain-wise and
+          * number_cache.n_locally_owned_dofs_per_processor[i] and
+          * number_cache.locally_owned_dofs are updated consistently.
+          */
+        virtual
+        void
+        distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler,
+                         NumberCache &number_cache) const;
+
+        /**
+         * This function is not yet implemented.
+         */
+        virtual
+        void
+        distribute_mg_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler,
+                            std::vector<NumberCache> &number_caches) const;
+
+        /**
+          * Renumber degrees of freedom as
+          * specified by the first argument.
+          *
+          * The input argument @p new_numbers may either have as many entries
+          * as there are global degrees of freedom (i.e. dof_handler.n_dofs() )
+          * or dof_handler.locally_owned_dofs().n_elements().
+          * Therefore it can be utilised with renumbering functions
+          * implemented for the parallel::distributed case.
+          */
+        virtual
+        void
+        renumber_dofs (const std::vector<types::global_dof_index>  &new_numbers,
+                       dealii::DoFHandler<dim,spacedim> &dof_handler,
+                       NumberCache &number_cache) const;
+      private:
+
       };
 
 
@@ -130,8 +202,9 @@ namespace internal
          * Distribute degrees of freedom on the object given as last argument.
          */
         virtual
-        NumberCache
-        distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+        void
+        distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler,
+                         NumberCache &number_cache) const;
 
         /**
          * Distribute multigrid DoFs.
@@ -145,9 +218,10 @@ namespace internal
          * Renumber degrees of freedom as specified by the first argument.
          */
         virtual
-        NumberCache
+        void
         renumber_dofs (const std::vector<types::global_dof_index>  &new_numbers,
-                       dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+                       dealii::DoFHandler<dim,spacedim> &dof_handler,
+                       NumberCache &number_cache) const;
       };
     }
   }
index 38ca2fab94c9abcff30fe0f1ee1a12db8836bb8d..7e6c33c19c9929cd19b521aeb0b27feab19d122b 100644 (file)
@@ -1437,6 +1437,22 @@ namespace DoFTools
   extract_locally_relevant_dofs (const DH &dof_handler,
                                  IndexSet &dof_set);
 
+  /**
+   *
+   * For each processor, determine the set of locally owned degrees of freedom as an IndexSet.
+   * This function then returns a vector of index sets, where the vector has size equal to the
+   * number of MPI processes that participate in the DoF handler object.
+   *
+   * The function can be used for objects of type dealii::Triangulation or parallel::shared::Triangulation.
+   * It will not work for objects of type parallel::distributed::Triangulation since for such triangulations
+   * we do not have information about all cells of the triangulation available locally,
+   * and consequently can not say anything definitive about the degrees of freedom active on other
+   * processors' locally owned cells.
+   */
+  template <class DH>
+  std::vector<IndexSet>
+  locally_owned_dofs_per_subdomain (const DH   &dof_handler);
+
   /**
    * For each DoF, return in the output array to which subdomain (as given by
    * the <tt>cell->subdomain_id()</tt> function) it belongs. The output array
index 0ebb75b84073798dc8401b12dc606b04f358b691..41b226da8db2986ca3ea03106ab1a7eb36439cec 100644 (file)
@@ -27,6 +27,7 @@
 #include <deal.II/grid/tria_accessor.h>
 #include <deal.II/grid/tria_iterator.templates.h>
 #include <deal.II/distributed/tria.h>
+#include <deal.II/distributed/shared_tria.h>
 
 #include <cmath>
 
@@ -38,6 +39,11 @@ namespace parallel
   {
     template <int, int> class Triangulation;
   }
+
+  namespace shared
+  {
+    template <int, int> class Triangulation;
+  }
 }
 
 
@@ -3030,19 +3036,20 @@ CellAccessor<dim,spacedim>::is_locally_owned () const
 {
   Assert (this->active(),
           ExcMessage("is_locally_owned() can only be called on active cells!"));
-#ifndef DEAL_II_WITH_P4EST
+#ifndef DEAL_II_WITH_MPI
   return true;
 #else
   if (is_artificial())
     return false;
 
-  const parallel::distributed::Triangulation<dim,spacedim> *pdt
-    = dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim> *>(this->tria);
+  const parallel::Triangulation<dim,spacedim> *pt
+    = dynamic_cast<const parallel::Triangulation<dim,spacedim> *>(this->tria);
 
-  if (pdt == 0)
+  if (pt == 0)
     return true;
   else
-    return (this->subdomain_id() == pdt->locally_owned_subdomain());
+    return (this->subdomain_id() == pt->locally_owned_subdomain());
+
 #endif
 }
 
@@ -3052,16 +3059,19 @@ inline
 bool
 CellAccessor<dim,spacedim>::is_locally_owned_on_level () const
 {
-#ifndef DEAL_II_WITH_P4EST
+
+#ifndef DEAL_II_WITH_MPI
   return true;
 #else
-  const parallel::distributed::Triangulation<dim,spacedim> *pdt
-    = dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim> *>(this->tria);
 
-  if (pdt == 0)
+  const parallel::Triangulation<dim,spacedim> *pt
+    = dynamic_cast<const parallel::Triangulation<dim,spacedim> *>(this->tria);
+
+  if (pt == 0)
     return true;
   else
-    return (this->level_subdomain_id() == pdt->locally_owned_subdomain());
+    return (this->level_subdomain_id() == pt->locally_owned_subdomain());
+
 #endif
 }
 
@@ -3073,19 +3083,21 @@ CellAccessor<dim,spacedim>::is_ghost () const
 {
   Assert (this->active(),
           ExcMessage("is_ghost() can only be called on active cells!"));
-#ifndef DEAL_II_WITH_P4EST
-  return false;
-#else
   if (is_artificial() || this->has_children())
     return false;
 
-  const parallel::distributed::Triangulation<dim,spacedim> *pdt
-    = dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim> *>(this->tria);
+#ifndef DEAL_II_WITH_MPI
+  return false;
+#else
+
+  const parallel::Triangulation<dim,spacedim> *pt
+    = dynamic_cast<const parallel::Triangulation<dim,spacedim> *>(this->tria);
 
-  if (pdt == 0)
+  if (pt == 0)
     return false;
   else
-    return (this->subdomain_id() != pdt->locally_owned_subdomain());
+    return (this->subdomain_id() != pt->locally_owned_subdomain());
+
 #endif
 }
 
@@ -3098,10 +3110,18 @@ CellAccessor<dim,spacedim>::is_artificial () const
 {
   Assert (this->active(),
           ExcMessage("is_artificial() can only be called on active cells!"));
-#ifndef DEAL_II_WITH_P4EST
+#ifndef DEAL_II_WITH_MPI
   return false;
 #else
-  return this->subdomain_id() == numbers::artificial_subdomain_id;
+
+  const parallel::Triangulation<dim,spacedim> *pt
+    = dynamic_cast<const parallel::Triangulation<dim,spacedim> *>(this->tria);
+
+  if (pt == 0)
+    return false;
+  else
+    return this->subdomain_id() == numbers::artificial_subdomain_id;
+
 #endif
 }
 
index d7cb0558252f174d1500073f320d75e90da559f5..f12666ca2961098446fe364b45ac3d817233d0d3 100644 (file)
@@ -19,12 +19,16 @@ SET(_src
   grid_refinement.cc
   solution_transfer.cc
   tria.cc
+  tria_base.cc
+  shared_tria.cc
   )
 
 SET(_inst
   grid_refinement.inst.in
   solution_transfer.inst.in
   tria.inst.in
+  shared_tria.inst.in
+  tria_base.inst.in
   )
 
 FILE(GLOB _header
diff --git a/source/distributed/shared_tria.cc b/source/distributed/shared_tria.cc
new file mode 100644 (file)
index 0000000..6503518
--- /dev/null
@@ -0,0 +1,128 @@
+// ---------------------------------------------------------------------
+// $Id: tria.cc 32807 2014-04-22 15:01:57Z heister $
+//
+// Copyright (C) 2008 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/memory_consumption.h>
+#include <deal.II/base/logstream.h>
+#include <deal.II/lac/sparsity_tools.h>
+#include <deal.II/lac/sparsity_pattern.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/distributed/tria.h>
+
+
+#include <algorithm>
+#include <numeric>
+#include <iostream>
+#include <fstream>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+#ifdef DEAL_II_WITH_MPI
+namespace parallel
+{
+  namespace shared
+  {
+
+    template <int dim, int spacedim>
+    Triangulation<dim,spacedim>::Triangulation (MPI_Comm mpi_communicator,
+                                                const typename dealii::Triangulation<dim,spacedim>::MeshSmoothing smooth_grid):
+      dealii::parallel::Triangulation<dim,spacedim>(mpi_communicator,smooth_grid,false)
+    {
+    }
+
+
+    template <int dim, int spacedim>
+    Triangulation<dim,spacedim>::~Triangulation ()
+    {
+
+    }
+
+    template <int dim, int spacedim>
+    void
+    Triangulation<dim,spacedim>::execute_coarsening_and_refinement ()
+    {
+      dealii::Triangulation<dim,spacedim>::execute_coarsening_and_refinement ();
+      dealii::GridTools::partition_triangulation (this->n_subdomains, *this);
+      this->update_number_cache ();
+    }
+
+    template <int dim, int spacedim>
+    void
+    Triangulation<dim,spacedim>::create_triangulation (const std::vector< Point< spacedim > > &vertices,
+                                                       const std::vector< CellData< dim > > &cells,
+                                                       const SubCellData &subcelldata)
+    {
+      try
+        {
+          dealii::Triangulation<dim,spacedim>::
+          create_triangulation (vertices, cells, subcelldata);
+        }
+      catch (const typename dealii::Triangulation<dim,spacedim>::DistortedCellList &)
+        {
+          // the underlying triangulation should not be checking for distorted
+          // cells
+          AssertThrow (false, ExcInternalError());
+        }
+      dealii::GridTools::partition_triangulation (this->n_subdomains, *this);
+      this->update_number_cache ();
+    }
+
+  }
+}
+
+#else
+
+namespace parallel
+{
+  namespace shared
+  {
+    template <int dim, int spacedim>
+    Triangulation<dim,spacedim>::Triangulation ()
+    {
+      Assert (false, ExcNotImplemented());
+    }
+
+
+    template <int dim, int spacedim>
+    Triangulation<dim,spacedim>::~Triangulation ()
+    {
+      Assert (false, ExcNotImplemented());
+    }
+
+    template <int dim, int spacedim>
+    types::subdomain_id
+    Triangulation<dim,spacedim>::locally_owned_subdomain () const
+    {
+      Assert (false, ExcNotImplemented());
+      return 0;
+    }
+
+  }
+}
+
+
+#endif
+
+
+/*-------------- Explicit Instantiations -------------------------------*/
+#include "shared_tria.inst"
+
+DEAL_II_NAMESPACE_CLOSE
diff --git a/source/distributed/shared_tria.inst.in b/source/distributed/shared_tria.inst.in
new file mode 100644 (file)
index 0000000..f6a863a
--- /dev/null
@@ -0,0 +1,36 @@
+// ---------------------------------------------------------------------
+// $Id: tria.inst.in 32674 2014-03-20 16:57:24Z denis.davydov $
+//
+// Copyright (C) 2010 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+for (deal_II_dimension : DIMENSIONS)
+  {
+    namespace parallel
+    \{
+      namespace shared
+      \{
+        template class Triangulation<deal_II_dimension>;
+#       if deal_II_dimension < 3
+        template class Triangulation<deal_II_dimension, deal_II_dimension+1>;
+#       endif
+#       if deal_II_dimension < 2
+        template class Triangulation<deal_II_dimension, deal_II_dimension+2>;
+#       endif
+      \}
+    \}
+
+  }
+
index 327926340d579318477ea4073b1a2761d7872eb8..c25994341066e6070ae7ce6598ed735590a1352e 100644 (file)
@@ -2162,16 +2162,6 @@ namespace parallel
     /* ---------------------- class Triangulation<dim,spacedim> ------------------------------ */
 
 
-
-    template <int dim, int spacedim>
-    Triangulation<dim,spacedim>::NumberCache::NumberCache()
-      :
-      n_global_active_cells(0),
-      n_global_levels(0)
-    {}
-
-
-
     template <int dim, int spacedim>
     Triangulation<dim,spacedim>::
     Triangulation (MPI_Comm mpi_communicator,
@@ -2179,13 +2169,11 @@ namespace parallel
                    const Settings settings_)
       :
       // do not check for distorted cells
-      dealii::Triangulation<dim,spacedim>
-      (smooth_grid,
+      dealii::parallel::Triangulation<dim,spacedim>
+      (mpi_communicator,
+       smooth_grid,
        false),
-      mpi_communicator (Utilities::MPI::
-                        duplicate_communicator(mpi_communicator)),
       settings(settings_),
-      my_subdomain (Utilities::MPI::this_mpi_process (this->mpi_communicator)),
       triangulation_has_content (false),
       connectivity (0),
       parallel_forest (0),
@@ -2200,9 +2188,6 @@ namespace parallel
       dealii::internal::p4est::InitFinalize::do_initialize ();
 
       parallel_ghost = 0;
-
-      number_cache.n_locally_owned_active_cells
-      .resize (Utilities::MPI::n_mpi_processes (mpi_communicator));
     }
 
 
@@ -2217,9 +2202,6 @@ namespace parallel
       Assert (connectivity == 0,    ExcInternalError());
       Assert (parallel_forest == 0, ExcInternalError());
       Assert (refinement_in_progress == false, ExcInternalError());
-
-      // get rid of the unique communicator used here again
-      MPI_Comm_free (&mpi_communicator);
     }
 
 
@@ -2264,7 +2246,7 @@ namespace parallel
           AssertThrow (false, ExcInternalError());
         }
 
-      update_number_cache ();
+      this->update_number_cache ();
     }
 
 
@@ -2605,14 +2587,14 @@ namespace parallel
 
       dealii::Triangulation<dim,spacedim>::clear ();
 
-      update_number_cache ();
+      this->update_number_cache ();
     }
 
     template <int dim, int spacedim>
     bool
     Triangulation<dim,spacedim>::has_hanging_nodes () const
     {
-      if (n_global_levels()<=1)
+      if (this->n_global_levels()<=1)
         return false; // can not have hanging nodes without refined cells
 
       // if there are any active cells with level less than n_global_levels()-1, then
@@ -2622,8 +2604,8 @@ namespace parallel
       // The problem is that we cannot just ask for the first active cell, but
       // instead need to filter over locally owned cells.
       bool have_coarser_cell = false;
-      for (typename Triangulation<dim, spacedim>::active_cell_iterator cell = this->begin_active(n_global_levels()-2);
-           cell != this->end(n_global_levels()-2);
+      for (typename Triangulation<dim, spacedim>::active_cell_iterator cell = this->begin_active(this->n_global_levels()-2);
+           cell != this->end(this->n_global_levels()-2);
            ++cell)
         if (cell->is_locally_owned())
           {
@@ -2632,7 +2614,7 @@ namespace parallel
           }
 
       // return true if at least one process has a coarser cell
-      return 0<Utilities::MPI::max(have_coarser_cell?1:0, mpi_communicator);
+      return 0<Utilities::MPI::max(have_coarser_cell?1:0, this->mpi_communicator);
     }
 
 
@@ -2678,13 +2660,13 @@ namespace parallel
 
       Assert(this->n_cells()>0, ExcMessage("Can not save() an empty Triangulation."));
 
-      if (my_subdomain==0)
+      if (this->my_subdomain==0)
         {
           std::string fname=std::string(filename)+".info";
           std::ofstream f(fname.c_str());
           f << "version nproc attached_bytes n_attached_objs n_coarse_cells" << std::endl
             << 2 << " "
-            << Utilities::MPI::n_mpi_processes (mpi_communicator) << " "
+            << Utilities::MPI::n_mpi_processes (this->mpi_communicator) << " "
             << real_data_size << " "
             << attached_data_pack_callbacks.size() << " "
             << this->n_cells(0)
@@ -2746,7 +2728,7 @@ namespace parallel
       Assert(this->n_cells(0) == n_coarse_cells, ExcMessage("Number of coarse cells differ!"));
 #if DEAL_II_P4EST_VERSION_GTE(0,3,4,3)
 #else
-      AssertThrow(numcpus <= Utilities::MPI::n_mpi_processes (mpi_communicator),
+      AssertThrow(numcpus <= Utilities::MPI::n_mpi_processes (this->mpi_communicator),
                   ExcMessage("parallel::distributed::Triangulation::load() only supports loading "
                              "saved data with a greater or equal number of processes than were used to "
                              "save() when using p4est 0.3.4.2."));
@@ -2758,7 +2740,7 @@ namespace parallel
 
 #if DEAL_II_P4EST_VERSION_GTE(0,3,4,3)
       parallel_forest = dealii::internal::p4est::functions<dim>::load_ext (
-                          filename, mpi_communicator,
+                          filename, this->mpi_communicator,
                           attached_size, attached_size>0,
                           autopartition, 0,
                           this,
@@ -2766,12 +2748,12 @@ namespace parallel
 #else
       (void)autopartition;
       parallel_forest = dealii::internal::p4est::functions<dim>::load (
-                          filename, mpi_communicator,
+                          filename, this->mpi_communicator,
                           attached_size, attached_size>0,
                           this,
                           &connectivity);
 #endif
-      if (numcpus != Utilities::MPI::n_mpi_processes (mpi_communicator))
+      if (numcpus != Utilities::MPI::n_mpi_processes (this->mpi_communicator))
         // We are changing the number of CPUs so we need to repartition.
         // Note that p4est actually distributes the cells between the changed
         // number of CPUs and so everything works without this call, but
@@ -2795,7 +2777,7 @@ namespace parallel
           AssertThrow (false, ExcInternalError());
         }
 
-      update_number_cache ();
+      this->update_number_cache ();
     }
 
 
@@ -2884,7 +2866,7 @@ namespace parallel
       // now create a forest out of the connectivity data structure
       parallel_forest
         = dealii::internal::p4est::functions<2>::
-          new_forest (mpi_communicator,
+          new_forest (this->mpi_communicator,
                       connectivity,
                       /* minimum initial number of quadrants per tree */ 0,
                       /* minimum level of upfront refinement */ 0,
@@ -2955,7 +2937,7 @@ namespace parallel
       // now create a forest out of the connectivity data structure
       parallel_forest
         = dealii::internal::p4est::functions<2>::
-          new_forest (mpi_communicator,
+          new_forest (this->mpi_communicator,
                       connectivity,
                       /* minimum initial number of quadrants per tree */ 0,
                       /* minimum level of upfront refinement */ 0,
@@ -3107,7 +3089,7 @@ namespace parallel
       // now create a forest out of the connectivity data structure
       parallel_forest
         = dealii::internal::p4est::functions<3>::
-          new_forest (mpi_communicator,
+          new_forest (this->mpi_communicator,
                       connectivity,
                       /* minimum initial number of quadrants per tree */ 0,
                       /* minimum level of upfront refinement */ 0,
@@ -3239,7 +3221,7 @@ namespace parallel
                   match_tree_recursively<dim,spacedim> (*tree, cell,
                                                         p4est_coarse_cell,
                                                         *parallel_forest,
-                                                        my_subdomain);
+                                                        this->my_subdomain);
                 }
             }
 
@@ -3310,7 +3292,7 @@ namespace parallel
            cell != this->end();
            ++cell)
         {
-          if (cell->subdomain_id() != my_subdomain
+          if (cell->subdomain_id() != this->my_subdomain
               &&
               cell->subdomain_id() != numbers::artificial_subdomain_id)
             ++num_ghosts;
@@ -3377,7 +3359,7 @@ namespace parallel
               determine_level_subdomain_id_recursively<dim,spacedim> (*tree, tree_index, cell,
                                                                       p4est_coarse_cell,
                                                                       *parallel_forest,
-                                                                      my_subdomain,
+                                                                      this->my_subdomain,
                                                                       marked_vertices);
             }
 
@@ -3446,7 +3428,7 @@ namespace parallel
         const unsigned int total_local_cells = this->n_active_cells();
         (void)total_local_cells;
 
-        if (Utilities::MPI::n_mpi_processes (mpi_communicator) == 1)
+        if (Utilities::MPI::n_mpi_processes (this->mpi_communicator) == 1)
           Assert (static_cast<unsigned int>(parallel_forest->local_num_quadrants) ==
                   total_local_cells,
                   ExcInternalError())
@@ -3461,7 +3443,7 @@ namespace parallel
              cell = this->begin_active();
              cell != this->end(); ++cell)
           {
-            if (cell->subdomain_id() == my_subdomain)
+            if (cell->subdomain_id() == this->my_subdomain)
               ++n_owned;
           }
 
@@ -3531,7 +3513,7 @@ namespace parallel
       RefineAndCoarsenList<dim,spacedim>
       refine_and_coarsen_list (*this,
                                p4est_tree_to_coarse_cell_permutation,
-                               my_subdomain);
+                               this->my_subdomain);
 
       // copy refine and coarsen flags into p4est and execute the refinement
       // and coarsening. this uses the refine_and_coarsen_list just built,
@@ -3616,7 +3598,7 @@ namespace parallel
 
       refinement_in_progress = false;
 
-      update_number_cache ();
+      this->update_number_cache ();
     }
 
     template <int dim, int spacedim>
@@ -3665,7 +3647,7 @@ namespace parallel
           PartitionWeights<dim,spacedim> partition_weights (*this,
                                                             cell_weights,
                                                             p4est_tree_to_coarse_cell_permutation,
-                                                            my_subdomain);
+                                                            this->my_subdomain);
           parallel_forest->user_pointer = &partition_weights;
 
           dealii::internal::p4est::functions<dim>::
@@ -3691,65 +3673,11 @@ namespace parallel
       refinement_in_progress = false;
 
       // update how many cells, edges, etc, we store locally
-      update_number_cache ();
-    }
-
-
-    template <int dim, int spacedim>
-    void
-    Triangulation<dim,spacedim>::update_number_cache ()
-    {
-      Assert (number_cache.n_locally_owned_active_cells.size()
-              ==
-              Utilities::MPI::n_mpi_processes (mpi_communicator),
-              ExcInternalError());
-
-      std::fill (number_cache.n_locally_owned_active_cells.begin(),
-                 number_cache.n_locally_owned_active_cells.end(),
-                 0);
-
-      if (this->n_levels() == 0)
-        {
-          // Skip communication done below if we do not have any cells
-          // (meaning the Triangulation is empty on all processors). This will
-          // happen when called from the destructor of Triangulation, which
-          // can get called during exception handling causing a hang in this
-          // function.
-          number_cache.n_global_active_cells = 0;
-          number_cache.n_global_levels = 0;
-          return;
-        }
-
-      if (this->n_levels() > 0)
-        for (typename Triangulation<dim,spacedim>::active_cell_iterator
-             cell = this->begin_active();
-             cell != this->end(); ++cell)
-          if (cell->subdomain_id() == my_subdomain)
-            ++number_cache.n_locally_owned_active_cells[my_subdomain];
-
-      unsigned int send_value
-        = number_cache.n_locally_owned_active_cells[my_subdomain];
-      MPI_Allgather (&send_value,
-                     1,
-                     MPI_UNSIGNED,
-                     &number_cache.n_locally_owned_active_cells[0],
-                     1,
-                     MPI_UNSIGNED,
-                     mpi_communicator);
-
-      number_cache.n_global_active_cells
-        = std::accumulate (number_cache.n_locally_owned_active_cells.begin(),
-                           number_cache.n_locally_owned_active_cells.end(),
-                           /* ensure sum is computed with correct data type:*/
-                           static_cast<types::global_dof_index>(0));
-      number_cache.n_global_levels = Utilities::MPI::max(this->n_levels(), mpi_communicator);
+      this->update_number_cache ();
     }
 
 
 
-
-
-
     template <int dim, int spacedim>
     void
     Triangulation<dim,spacedim>::
@@ -3900,54 +3828,6 @@ namespace parallel
              ExcInternalError());
     }
 
-
-
-    template <int dim, int spacedim>
-    types::subdomain_id
-    Triangulation<dim,spacedim>::locally_owned_subdomain () const
-    {
-      Assert (dim > 1, ExcNotImplemented());
-      return my_subdomain;
-    }
-
-
-
-    template <int dim, int spacedim>
-    unsigned int
-    Triangulation<dim,spacedim>::n_locally_owned_active_cells () const
-    {
-      return number_cache.n_locally_owned_active_cells[my_subdomain];
-    }
-
-
-
-    template <int dim, int spacedim>
-    types::global_dof_index
-    Triangulation<dim,spacedim>::n_global_active_cells () const
-    {
-      return number_cache.n_global_active_cells;
-    }
-
-
-
-    template <int dim, int spacedim>
-    unsigned int
-    Triangulation<dim,spacedim>::n_global_levels () const
-    {
-      return number_cache.n_global_levels;
-    }
-
-
-
-    template <int dim, int spacedim>
-    const std::vector<unsigned int> &
-    Triangulation<dim,spacedim>::n_locally_owned_active_cells_per_processor () const
-    {
-      return number_cache.n_locally_owned_active_cells;
-    }
-
-
-
     template <int dim, int spacedim>
     unsigned int
     Triangulation<dim,spacedim>::
@@ -4335,14 +4215,6 @@ namespace parallel
     }
 
 
-    template <int dim, int spacedim>
-    MPI_Comm
-    Triangulation<dim,spacedim>::get_communicator () const
-    {
-      return mpi_communicator;
-    }
-
-
     template<int dim, int spacedim>
     void
     Triangulation<dim,spacedim>::add_periodicity
@@ -4459,7 +4331,7 @@ namespace parallel
       dealii::internal::p4est::functions<dim>::destroy (parallel_forest);
       parallel_forest
         = dealii::internal::p4est::functions<dim>::
-          new_forest (mpi_communicator,
+          new_forest (this->mpi_communicator,
                       connectivity,
                       /* minimum initial number of quadrants per tree */ 0,
                       /* minimum level of upfront refinement */ 0,
@@ -4492,13 +4364,8 @@ namespace parallel
     Triangulation<dim,spacedim>::memory_consumption () const
     {
       std::size_t mem=
-        this->dealii::Triangulation<dim,spacedim>::memory_consumption()
-        + MemoryConsumption::memory_consumption(mpi_communicator)
-        + MemoryConsumption::memory_consumption(my_subdomain)
+        this->dealii::parallel::Triangulation<dim,spacedim>::memory_consumption()
         + MemoryConsumption::memory_consumption(triangulation_has_content)
-        + MemoryConsumption::memory_consumption(number_cache.n_locally_owned_active_cells)
-        + MemoryConsumption::memory_consumption(number_cache.n_global_active_cells)
-        + MemoryConsumption::memory_consumption(number_cache.n_global_levels)
         + MemoryConsumption::memory_consumption(connectivity)
         + MemoryConsumption::memory_consumption(parallel_forest)
         + MemoryConsumption::memory_consumption(refinement_in_progress)
@@ -4557,7 +4424,8 @@ namespace parallel
                   ExcMessage ("Parallel distributed triangulations can only "
                               "be copied, if no refinement is in progress!"));
 
-          mpi_communicator = Utilities::MPI::duplicate_communicator (old_tria_x->get_communicator ());
+          // duplicate MPI communicator, stored in the base class
+          dealii::parallel::Triangulation<dim,spacedim>::copy_triangulation (old_tria);
 
           coarse_cell_to_p4est_tree_permutation = old_tria_x->coarse_cell_to_p4est_tree_permutation;
           p4est_tree_to_coarse_cell_permutation = old_tria_x->p4est_tree_to_coarse_cell_permutation;
@@ -4584,7 +4452,7 @@ namespace parallel
           AssertThrow (false, ExcInternalError());
         }
 
-      update_number_cache ();
+      this->update_number_cache ();
     }
 
 
@@ -4667,6 +4535,10 @@ namespace parallel
 
     template <int spacedim>
     Triangulation<1,spacedim>::Triangulation (MPI_Comm)
+      :
+      dealii::parallel::Triangulation<1,spacedim>(MPI_COMM_WORLD,
+                                                  typename dealii::Triangulation<1,spacedim>::MeshSmoothing(),
+                                                  false)
     {
       Assert (false, ExcNotImplemented());
     }
@@ -4689,40 +4561,6 @@ namespace parallel
     }
 
 
-    template <int spacedim>
-    types::subdomain_id
-    Triangulation<1,spacedim>::locally_owned_subdomain () const
-    {
-      Assert (false, ExcNotImplemented());
-      return 0;
-    }
-
-
-    template <int spacedim>
-    types::global_dof_index
-    Triangulation<1,spacedim>::n_global_active_cells () const
-    {
-      Assert (false, ExcNotImplemented());
-      return 0;
-    }
-
-
-    template <int spacedim>
-    unsigned int
-    Triangulation<1,spacedim>::n_global_levels () const
-    {
-      Assert (false, ExcNotImplemented());
-      return 0;
-    }
-
-
-    template <int spacedim>
-    MPI_Comm
-    Triangulation<1,spacedim>::get_communicator () const
-    {
-      return MPI_COMM_WORLD;
-    }
-
     template <int spacedim>
     const std::vector<types::global_dof_index> &
     Triangulation<1,spacedim>::get_p4est_tree_to_coarse_cell_permutation() const
diff --git a/source/distributed/tria_base.cc b/source/distributed/tria_base.cc
new file mode 100644 (file)
index 0000000..39fd50d
--- /dev/null
@@ -0,0 +1,222 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/memory_consumption.h>
+#include <deal.II/base/logstream.h>
+#include <deal.II/lac/sparsity_tools.h>
+#include <deal.II/lac/sparsity_pattern.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/distributed/tria_base.h>
+
+
+#include <algorithm>
+#include <numeric>
+#include <iostream>
+#include <fstream>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace parallel
+{
+
+#ifdef DEAL_II_WITH_MPI
+  template <int dim, int spacedim>
+  Triangulation<dim,spacedim>::Triangulation (MPI_Comm mpi_communicator,
+                                              const typename dealii::Triangulation<dim,spacedim>::MeshSmoothing smooth_grid,
+                                              const bool check_for_distorted_cells)
+    :
+    dealii::Triangulation<dim,spacedim>(smooth_grid,check_for_distorted_cells),
+    mpi_communicator (Utilities::MPI::
+                      duplicate_communicator(mpi_communicator)),
+    my_subdomain (Utilities::MPI::this_mpi_process (this->mpi_communicator)),
+    n_subdomains(Utilities::MPI::n_mpi_processes(mpi_communicator))
+  {
+    number_cache.n_locally_owned_active_cells.resize (n_subdomains);
+  };
+
+  template <int dim, int spacedim>
+  void
+  Triangulation<dim,spacedim>::copy_triangulation (const dealii::Triangulation<dim, spacedim> &old_tria)
+  {
+    if (const dealii::parallel::Triangulation<dim,spacedim> *
+        old_tria_x = dynamic_cast<const dealii::parallel::Triangulation<dim,spacedim> *>(&old_tria))
+      {
+        mpi_communicator = Utilities::MPI::duplicate_communicator (old_tria_x->get_communicator ());
+      }
+  }
+
+#else
+  template <int dim, int spacedim>
+  Triangulation<dim,spacedim>::Triangulation()
+  {
+    Assert (false, ExcNotImplemented());
+  }
+
+  template <int dim, int spacedim>
+  void
+  Triangulation<dim,spacedim>::copy_triangulation (const dealii::Triangulation<dim, spacedim> &old_tria)
+  {
+    Assert (false, ExcNotImplemented());
+  }
+
+
+#endif
+
+  template <int dim, int spacedim>
+  std::size_t
+  Triangulation<dim,spacedim>::memory_consumption() const
+  {
+    std::size_t mem=
+      this->dealii::Triangulation<dim,spacedim>::memory_consumption()
+      + MemoryConsumption::memory_consumption(mpi_communicator)
+      + MemoryConsumption::memory_consumption(my_subdomain)
+      + MemoryConsumption::memory_consumption(number_cache.n_locally_owned_active_cells)
+      + MemoryConsumption::memory_consumption(number_cache.n_global_active_cells)
+      + MemoryConsumption::memory_consumption(number_cache.n_global_levels);
+    return mem;
+
+  }
+
+  template <int dim, int spacedim>
+  Triangulation<dim,spacedim>::~Triangulation ()
+  {
+    // get rid of the unique communicator used here again
+    MPI_Comm_free (&this->mpi_communicator);
+  };
+
+  template <int dim, int spacedim>
+  Triangulation<dim,spacedim>::NumberCache::NumberCache()
+    :
+    n_global_active_cells(0),
+    n_global_levels(0)
+  {}
+
+  template <int dim, int spacedim>
+  unsigned int
+  Triangulation<dim,spacedim>::n_locally_owned_active_cells () const
+  {
+    return number_cache.n_locally_owned_active_cells[my_subdomain];
+  }
+
+  template <int dim, int spacedim>
+  unsigned int
+  Triangulation<dim,spacedim>::n_global_levels () const
+  {
+    return number_cache.n_global_levels;
+  }
+
+  template <int dim, int spacedim>
+  types::global_dof_index
+  Triangulation<dim,spacedim>::n_global_active_cells () const
+  {
+    return number_cache.n_global_active_cells;
+  }
+
+  template <int dim, int spacedim>
+  const std::vector<unsigned int> &
+  Triangulation<dim,spacedim>::n_locally_owned_active_cells_per_processor () const
+  {
+    return number_cache.n_locally_owned_active_cells;
+  }
+
+#ifdef DEAL_II_WITH_MPI
+  template <int dim, int spacedim>
+  MPI_Comm
+  Triangulation<dim,spacedim>::get_communicator () const
+  {
+    return mpi_communicator;
+  }
+
+  template <int dim, int spacedim>
+  void
+  Triangulation<dim,spacedim>::update_number_cache ()
+  {
+    Assert (number_cache.n_locally_owned_active_cells.size()
+            ==
+            Utilities::MPI::n_mpi_processes (this->mpi_communicator),
+            ExcInternalError());
+
+    std::fill (number_cache.n_locally_owned_active_cells.begin(),
+               number_cache.n_locally_owned_active_cells.end(),
+               0);
+
+    if (this->n_levels() == 0)
+      {
+        // Skip communication done below if we do not have any cells
+        // (meaning the Triangulation is empty on all processors). This will
+        // happen when called from the destructor of Triangulation, which
+        // can get called during exception handling causing a hang in this
+        // function.
+        number_cache.n_global_active_cells = 0;
+        number_cache.n_global_levels = 0;
+        return;
+      }
+
+    if (this->n_levels() > 0)
+      for (typename Triangulation<dim,spacedim>::active_cell_iterator
+           cell = this->begin_active();
+           cell != this->end(); ++cell)
+        if (cell->subdomain_id() == my_subdomain)
+          ++number_cache.n_locally_owned_active_cells[my_subdomain];
+
+    unsigned int send_value
+      = number_cache.n_locally_owned_active_cells[my_subdomain];
+    MPI_Allgather (&send_value,
+                   1,
+                   MPI_UNSIGNED,
+                   &number_cache.n_locally_owned_active_cells[0],
+                   1,
+                   MPI_UNSIGNED,
+                   this->mpi_communicator);
+
+    number_cache.n_global_active_cells
+      = std::accumulate (number_cache.n_locally_owned_active_cells.begin(),
+                         number_cache.n_locally_owned_active_cells.end(),
+                         /* ensure sum is computed with correct data type:*/
+                         static_cast<types::global_dof_index>(0));
+    number_cache.n_global_levels = Utilities::MPI::max(this->n_levels(), this->mpi_communicator);
+  }
+#else
+  template <int dim, int spacedim>
+  void
+  Triangulation<dim,spacedim>::update_number_cache ()
+  {
+    Assert (false, ExcNotImplemented());
+  }
+
+#endif
+
+  template <int dim, int spacedim>
+  types::subdomain_id
+  Triangulation<dim,spacedim>::locally_owned_subdomain () const
+  {
+    Assert (dim > 1, ExcNotImplemented());
+    return my_subdomain;
+  }
+
+
+}
+
+
+/*-------------- Explicit Instantiations -------------------------------*/
+#include "tria_base.inst"
+
+DEAL_II_NAMESPACE_CLOSE
diff --git a/source/distributed/tria_base.inst.in b/source/distributed/tria_base.inst.in
new file mode 100644 (file)
index 0000000..93ac41e
--- /dev/null
@@ -0,0 +1,33 @@
+// ---------------------------------------------------------------------
+// $Id: tria.inst.in 32674 2014-03-20 16:57:24Z denis.davydov $
+//
+// Copyright (C) 2010 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+for (deal_II_dimension : DIMENSIONS)
+  {
+    namespace parallel
+    \{
+        template class Triangulation<deal_II_dimension>;
+#       if deal_II_dimension < 3
+        template class Triangulation<deal_II_dimension, deal_II_dimension+1>;
+#       endif
+#       if deal_II_dimension < 2
+        template class Triangulation<deal_II_dimension, deal_II_dimension+2>;
+#       endif
+    \}
+
+  }
+
index 26059240e7bdd20ff3dbe19bf259eceeae711a69..342b7b524b3d0b1d9bfd13d5c233869bf727e0d6 100644 (file)
@@ -72,6 +72,8 @@ namespace internal
       policy_name = "Policy::Sequential<";
     else if (dynamic_cast<const typename dealii::internal::DoFHandler::Policy::ParallelDistributed<dim,spacedim>*>(&policy))
       policy_name = "Policy::ParallelDistributed<";
+    else if (dynamic_cast<const typename dealii::internal::DoFHandler::Policy::ParallelShared<dim,spacedim>*>(&policy))
+      policy_name = "Policy::ParallelShared<";
     else
       AssertThrow(false, ExcNotImplemented());
     policy_name += Utilities::int_to_string(dim)+
@@ -762,9 +764,13 @@ DoFHandler<dim,spacedim>::DoFHandler (const Triangulation<dim,spacedim> &tria)
   // decide whether we need a
   // sequential or a parallel
   // distributed policy
-  if (dynamic_cast<const parallel::distributed::Triangulation< dim, spacedim >*>
+  if (dynamic_cast<const parallel::shared::Triangulation< dim, spacedim>*>
       (&tria)
-      == 0)
+      != 0)
+    policy.reset (new internal::DoFHandler::Policy::ParallelShared<dim,spacedim>());
+  else if (dynamic_cast<const parallel::distributed::Triangulation< dim, spacedim >*>
+           (&tria)
+           == 0)
     policy.reset (new internal::DoFHandler::Policy::Sequential<dim,spacedim>());
   else
     policy.reset (new internal::DoFHandler::Policy::ParallelDistributed<dim,spacedim>());
@@ -802,9 +808,13 @@ DoFHandler<dim,spacedim>::initialize(
   // decide whether we need a
   // sequential or a parallel
   // distributed policy
-  if (dynamic_cast<const parallel::distributed::Triangulation< dim, spacedim >*>
+  if (dynamic_cast<const parallel::shared::Triangulation< dim, spacedim>*>
       (&t)
-      == 0)
+      != 0)
+    policy.reset (new internal::DoFHandler::Policy::ParallelShared<dim,spacedim>());
+  else if (dynamic_cast<const parallel::distributed::Triangulation< dim, spacedim >*>
+           (&t)
+           == 0)
     policy.reset (new internal::DoFHandler::Policy::Sequential<dim,spacedim>());
   else
     policy.reset (new internal::DoFHandler::Policy::ParallelDistributed<dim,spacedim>());
@@ -1225,7 +1235,7 @@ void DoFHandler<dim,spacedim>::distribute_dofs (const FiniteElement<dim,spacedim
   internal::DoFHandler::Implementation::reserve_space (*this);
 
   // hand things off to the policy
-  number_cache = policy->distribute_dofs (*this);
+  policy->distribute_dofs (*this,number_cache);
 
   // initialize the block info object
   // only if this is a sequential
@@ -1344,7 +1354,7 @@ DoFHandler<dim,spacedim>::renumber_dofs (const std::vector<types::global_dof_ind
               ExcMessage ("New DoF index is not less than the total number of dofs."));
 #endif
 
-  number_cache = policy->renumber_dofs (new_numbers, *this);
+  policy->renumber_dofs (new_numbers, *this,number_cache);
 }
 
 
index 1ec1ebb66875887fbc24b46737df36e973d4e70d..d573f83d47b45816a854b936afdaee03c15d8759 100644 (file)
@@ -869,9 +869,10 @@ namespace internal
 
 
       template <int dim, int spacedim>
-      NumberCache
+      void
       Sequential<dim,spacedim>::
-      distribute_dofs (DoFHandler<dim,spacedim> &dof_handler) const
+      distribute_dofs (DoFHandler<dim,spacedim> &dof_handler,
+                       NumberCache &number_cache_current ) const
       {
         const types::global_dof_index n_dofs =
           Implementation::distribute_dofs (0,
@@ -897,7 +898,7 @@ namespace internal
         number_cache.locally_owned_dofs_per_processor
           = std::vector<IndexSet> (1,
                                    number_cache.locally_owned_dofs);
-        return number_cache;
+        number_cache_current = number_cache;
       }
 
 
@@ -928,10 +929,11 @@ namespace internal
       }
 
       template <int dim, int spacedim>
-      NumberCache
+      void
       Sequential<dim,spacedim>::
       renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
-                     dealii::DoFHandler<dim,spacedim> &dof_handler) const
+                     dealii::DoFHandler<dim,spacedim> &dof_handler,
+                     NumberCache &number_cache_current) const
       {
         Implementation::renumber_dofs (new_numbers, IndexSet(0),
                                        dof_handler, true);
@@ -959,10 +961,158 @@ namespace internal
         number_cache.locally_owned_dofs_per_processor
           = std::vector<IndexSet> (1,
                                    number_cache.locally_owned_dofs);
-        return number_cache;
+        number_cache_current = number_cache;
+      }
+
+      /* --------------------- class ParallelShared ---------------- */
+
+      template <int dim, int spacedim>
+      void
+      ParallelShared<dim,spacedim>::
+      distribute_dofs (DoFHandler<dim,spacedim> &dof_handler,
+                       NumberCache &number_cache) const
+      {
+        Sequential<dim,spacedim>::distribute_dofs (dof_handler,number_cache);
+        DoFRenumbering::subdomain_wise (dof_handler);
+        number_cache.locally_owned_dofs_per_processor = DoFTools::locally_owned_dofs_per_subdomain (dof_handler);
+        number_cache.locally_owned_dofs = number_cache.locally_owned_dofs_per_processor[dof_handler.get_tria().locally_owned_subdomain()];
+        number_cache.n_locally_owned_dofs_per_processor.resize (number_cache.locally_owned_dofs_per_processor.size());
+        for (unsigned int i = 0; i < number_cache.n_locally_owned_dofs_per_processor.size(); i++)
+          number_cache.n_locally_owned_dofs_per_processor[i] = number_cache.locally_owned_dofs_per_processor[i].n_elements();
+        number_cache.n_locally_owned_dofs = number_cache.n_locally_owned_dofs_per_processor[dof_handler.get_tria().locally_owned_subdomain()];
       }
 
+      template <int dim, int spacedim>
+      void
+      ParallelShared<dim,spacedim>::
+      distribute_mg_dofs (DoFHandler<dim,spacedim> &dof_handler,
+                          std::vector<NumberCache> &number_caches) const
+      {
+        // first, call the sequential function to distribute dofs
+        Sequential<dim,spacedim>:: distribute_mg_dofs (dof_handler, number_caches);
+        // now we need to update the number cache.
+        // This part is not yet implemented.
+        AssertThrow(false,ExcNotImplemented());
+      }
+
+      template <int dim, int spacedim>
+      void
+      ParallelShared<dim,spacedim>::
+      renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
+                     dealii::DoFHandler<dim,spacedim> &dof_handler,
+                     NumberCache &number_cache) const
+      {
+
+#ifndef DEAL_II_WITH_MPI
+        (void)dof_handler;
+        Assert (false, ExcNotImplemented());
+
+#else
+        std::vector<types::global_dof_index> global_gathered_numbers (dof_handler.n_dofs (), 0);
+        // as we call DoFRenumbering::subdomain_wise (dof_handler) from distribute_dofs(),
+        // we need to support sequential-like input.
+        // Distributed-like input from, for example, component_wise renumbering is also supported.
+        if (new_numbers.size () == dof_handler.n_dofs ())
+          {
+            global_gathered_numbers = new_numbers;
+          }
+        else
+          {
+            Assert(new_numbers.size() == dof_handler.locally_owned_dofs().n_elements(),
+                   ExcInternalError());
+            const parallel::shared::Triangulation<dim, spacedim> *tr =
+              (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim>*> (&dof_handler.get_tria ()));
+            Assert(tr != 0, ExcInternalError());
+            const unsigned int n_cpu = Utilities::MPI::n_mpi_processes (tr->get_communicator ());
+            const unsigned int this_process =
+              Utilities::MPI::this_mpi_process (tr->get_communicator ());
+            std::vector<types::global_dof_index> gathered_new_numbers (dof_handler.n_dofs (), 0);
+            Assert(this_process == dof_handler.get_tria ().locally_owned_subdomain (),
+                   ExcInternalError())
+
+            //gather new numbers among processors into one vector
+            {
+              std::vector<types::global_dof_index> new_numbers_copy (new_numbers);
+              // displs:
+              // Entry i specifies the displacement (relative to recvbuf )
+              // at which to place the incoming data from process i
+              // rcounts:
+              // containing the number of elements that are to be received from each process
+              std::vector<int> displs(n_cpu),
+                  rcounts(n_cpu);
+              types::global_dof_index shift = 0;
+              //set rcounts based on new_numbers:
+              int cur_count = new_numbers_copy.size ();
+              MPI_Allgather (&cur_count,  1, MPI_INT,
+                             &rcounts[0], 1, MPI_INT,
+                             tr->get_communicator ());
+
+              for (unsigned int i = 0; i < n_cpu; i++)
+                {
+                  displs[i]  = shift;
+                  shift     += rcounts[i];
+                }
+              Assert(((int)new_numbers_copy.size()) == rcounts[this_process],
+                     ExcInternalError());
+              MPI_Allgatherv (&new_numbers_copy[0],     new_numbers_copy.size (),
+                              DEAL_II_DOF_INDEX_MPI_TYPE,
+                              &gathered_new_numbers[0], &rcounts[0],
+                              &displs[0],
+                              DEAL_II_DOF_INDEX_MPI_TYPE,
+                              tr->get_communicator ());
+            }
 
+            // put new numbers according to the current locally_owned_dofs_per_processor IndexSets
+            types::global_dof_index shift = 0;
+            // flag_1 and flag_2 are
+            // used to control that there is a
+            // one-to-one relation between old and new DoFs.
+            std::vector<unsigned int> flag_1 (dof_handler.n_dofs (), 0),
+                flag_2 (dof_handler.n_dofs (), 0);
+            for (unsigned int i = 0; i < n_cpu; i++)
+              {
+                const IndexSet &iset =
+                  number_cache.locally_owned_dofs_per_processor[i];
+                for (types::global_dof_index ind = 0;
+                     ind < iset.n_elements (); ind++)
+                  {
+                    const types::global_dof_index target = iset.nth_index_in_set (ind);
+                    const types::global_dof_index value  = gathered_new_numbers[shift + ind];
+                    Assert(target < dof_handler.n_dofs(), ExcInternalError());
+                    Assert(value  < dof_handler.n_dofs(), ExcInternalError());
+                    global_gathered_numbers[target] = value;
+                    flag_1[target]++;
+                    flag_2[value]++;
+                  }
+                shift += iset.n_elements ();
+              }
+
+            Assert(*std::max_element(flag_1.begin(), flag_1.end()) == 1,
+                   ExcInternalError());
+            Assert(*std::min_element(flag_1.begin(), flag_1.end()) == 1,
+                   ExcInternalError());
+            Assert((*std::max_element(flag_2.begin(), flag_2.end())) == 1,
+                   ExcInternalError());
+            Assert((*std::min_element(flag_2.begin(), flag_2.end())) == 1,
+                   ExcInternalError());
+          }
+        Sequential<dim, spacedim>::renumber_dofs (global_gathered_numbers, dof_handler, number_cache);
+        // correct number_cache:
+        number_cache.locally_owned_dofs_per_processor =
+          DoFTools::locally_owned_dofs_per_subdomain (dof_handler);
+        number_cache.locally_owned_dofs =
+          number_cache.locally_owned_dofs_per_processor[dof_handler.get_tria ().locally_owned_subdomain ()];
+        // sequential renumbering returns a vector of size 1 here,
+        // correct this:
+        number_cache.n_locally_owned_dofs_per_processor.resize(number_cache.locally_owned_dofs_per_processor.size());
+        for (unsigned int i = 0;
+             i < number_cache.n_locally_owned_dofs_per_processor.size (); i++)
+          number_cache.n_locally_owned_dofs_per_processor[i] = number_cache.locally_owned_dofs_per_processor[i].n_elements ();
+
+        number_cache.n_locally_owned_dofs =
+          number_cache.n_locally_owned_dofs_per_processor[dof_handler.get_tria ().locally_owned_subdomain ()];
+#endif
+      }
 
       /* --------------------- class ParallelDistributed ---------------- */
 
@@ -1929,9 +2079,10 @@ namespace internal
 
 
       template <int dim, int spacedim>
-      NumberCache
+      void
       ParallelDistributed<dim, spacedim>::
-      distribute_dofs (DoFHandler<dim,spacedim> &dof_handler) const
+      distribute_dofs (DoFHandler<dim,spacedim> &dof_handler,
+                       NumberCache &number_cache_current) const
       {
         NumberCache number_cache;
 
@@ -2145,7 +2296,7 @@ namespace internal
 #endif // DEBUG
 #endif // DEAL_II_WITH_P4EST
 
-        return number_cache;
+        number_cache_current = number_cache;
       }
 
 
@@ -2396,10 +2547,11 @@ namespace internal
 
 
       template <int dim, int spacedim>
-      NumberCache
+      void
       ParallelDistributed<dim, spacedim>::
       renumber_dofs (const std::vector<dealii::types::global_dof_index> &new_numbers,
-                     dealii::DoFHandler<dim,spacedim> &dof_handler) const
+                     dealii::DoFHandler<dim,spacedim> &dof_handler,
+                     NumberCache &number_cache_current) const
       {
         (void)new_numbers;
         (void)dof_handler;
@@ -2628,7 +2780,7 @@ namespace internal
         }
 #endif
 
-        return number_cache;
+        number_cache_current = number_cache;
       }
     }
   }
index 09f4d7f91d0562bf110a91ddccfaf414cf1260b2..68db7aaa3506aec25a1d52ac024e5f402678f66a 100644 (file)
@@ -24,17 +24,20 @@ namespace internal
     \{
       template class PolicyBase<deal_II_dimension,deal_II_dimension>;
       template class Sequential<deal_II_dimension,deal_II_dimension>;
+      template class ParallelShared<deal_II_dimension,deal_II_dimension>;
       template class ParallelDistributed<deal_II_dimension,deal_II_dimension>;
 
 #if deal_II_dimension==1 || deal_II_dimension==2
       template class PolicyBase<deal_II_dimension,deal_II_dimension+1>;
       template class Sequential<deal_II_dimension,deal_II_dimension+1>;
+      template class ParallelShared<deal_II_dimension,deal_II_dimension+1>;
       template class ParallelDistributed<deal_II_dimension,deal_II_dimension+1>;
 #endif
 
 #if deal_II_dimension==3
       template class PolicyBase<1,3>;
       template class Sequential<1,3>;
+      template class ParallelShared<1,3>;
       template class ParallelDistributed<1,3>;
 #endif
     \}
index 3e81ab2e655489ccae827916ff991b5ad40f705b..cdd4d2d4d29466bee7b03c5152a819ed0aeb8826 100644 (file)
@@ -747,11 +747,11 @@ namespace DoFRenumbering
     const unsigned int n_buckets = fe_collection.n_components();
     std::vector<types::global_dof_index> shifts(n_buckets);
 
-    if (const parallel::distributed::Triangulation<dim,spacedim> *tria
-        = (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
+    if (const parallel::Triangulation<dim,spacedim> *tria
+        = (dynamic_cast<const parallel::Triangulation<dim,spacedim>*>
            (&start->get_dof_handler().get_tria())))
       {
-#ifdef DEAL_II_WITH_P4EST
+#ifdef DEAL_II_WITH_MPI
         std::vector<types::global_dof_index> local_dof_count(n_buckets);
 
         for (unsigned int c=0; c<n_buckets; ++c)
@@ -1034,11 +1034,11 @@ namespace DoFRenumbering
     const unsigned int n_buckets = fe_collection.n_blocks();
     std::vector<types::global_dof_index> shifts(n_buckets);
 
-    if (const parallel::distributed::Triangulation<dim,spacedim> *tria
-        = (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
+    if (const parallel::Triangulation<dim,spacedim> *tria
+        = (dynamic_cast<const parallel::Triangulation<dim,spacedim>*>
            (&start->get_dof_handler().get_tria())))
       {
-#ifdef DEAL_II_WITH_P4EST
+#ifdef DEAL_II_WITH_MPI
         std::vector<types::global_dof_index> local_dof_count(n_buckets);
 
         for (unsigned int c=0; c<n_buckets; ++c)
index fd120cd65d973a6ae3a99f6c9c395e994cace98f..6e859e4ec1e7dd7ee082f991ab5ded20ba3cc958 100644 (file)
@@ -1073,7 +1073,54 @@ namespace DoFTools
       active_fe_indices[cell->active_cell_index()] = cell->active_fe_index();
   }
 
+  template <class DH>
+  std::vector<IndexSet>
+  locally_owned_dofs_per_subdomain (const DH  &dof_handler)
+  {
+    //the following is a random process (flip of a coin), thus should be called once only.
+    std::vector< dealii::types::subdomain_id > subdomain_association (dof_handler.n_dofs ());
+    dealii::DoFTools::get_subdomain_association (dof_handler, subdomain_association);
+
+    const unsigned int n_subdomains = 1 + (*std::max_element (subdomain_association.begin (),
+                                                              subdomain_association.end ()   ));
+
+    std::vector<dealii::IndexSet> index_sets (n_subdomains,dealii::IndexSet(dof_handler.n_dofs()));
+
+    // loop over subdomain_association and populate IndexSet when a
+    // change in subdomain ID is found
+    dealii::types::global_dof_index i_min          = 0;
+    dealii::types::global_dof_index this_subdomain = subdomain_association[0];
+
+    for (dealii::types::global_dof_index index = 1;
+         index < subdomain_association.size (); ++index)
+      {
+        //found index different from the current one
+        if (subdomain_association[index] != this_subdomain)
+          {
+            index_sets[this_subdomain].add_range (i_min, index);
+            i_min = index;
+            this_subdomain = subdomain_association[index];
+          }
+      }
 
+    // the very last element is of different index
+    if (i_min == subdomain_association.size () - 1)
+      {
+        index_sets[this_subdomain].add_index (i_min);
+      }
+
+    // otherwise there are at least two different indices
+    else
+      {
+        index_sets[this_subdomain].add_range (
+          i_min, subdomain_association.size ());
+      }
+
+    for (unsigned int i = 0; i < n_subdomains; i++)
+      index_sets[i].compress ();
+
+    return index_sets;
+  }
 
   template <class DH>
   void
@@ -1093,6 +1140,9 @@ namespace DoFTools
            ExcDimensionMismatch(subdomain_association.size(),
                                 dof_handler.n_dofs()));
 
+    Assert(dof_handler.n_dofs() > 0,
+           ExcMessage("Number of DoF is not positive. "
+                      "This could happen when the function is called before NumberCache is written."));
     // preset all values by an invalid value
     std::fill_n (subdomain_association.begin(), dof_handler.n_dofs(),
                  numbers::invalid_subdomain_id);
index 53824eba45d3703c0ed816056dc9566cfb70ced1..0ac9e83efbf49f3ba0e115539f0de1a3934444b8 100644 (file)
@@ -269,7 +269,15 @@ void
 DoFTools::get_subdomain_association<hp::DoFHandler<deal_II_dimension> >
 (const hp::DoFHandler<deal_II_dimension> &dof_handler,
  std::vector<types::subdomain_id>           &subdomain_association);
-
+template
+std::vector<IndexSet>
+DoFTools::locally_owned_dofs_per_subdomain<DoFHandler<deal_II_dimension> >
+(const DoFHandler<deal_II_dimension> &dof_handler);
+template
+std::vector<IndexSet>
+DoFTools::locally_owned_dofs_per_subdomain<hp::DoFHandler<deal_II_dimension> >
+(const hp::DoFHandler<deal_II_dimension> &dof_handler);
 
 template
 unsigned int
@@ -362,6 +370,15 @@ void
 DoFTools::get_subdomain_association<hp::DoFHandler<deal_II_dimension,deal_II_dimension+1> >
 (const hp::DoFHandler<deal_II_dimension,deal_II_dimension+1> &dof_handler,
  std::vector<types::subdomain_id>           &subdomain_association);
+template
+std::vector<IndexSet>
+DoFTools::locally_owned_dofs_per_subdomain<DoFHandler<deal_II_dimension,deal_II_dimension+1> >
+(const DoFHandler<deal_II_dimension,deal_II_dimension+1> &dof_handler);
+template
+std::vector<IndexSet>
+DoFTools::locally_owned_dofs_per_subdomain<hp::DoFHandler<deal_II_dimension,deal_II_dimension+1> >
+(const hp::DoFHandler<deal_II_dimension,deal_II_dimension+1> &dof_handler);
 
 template
 void
@@ -398,6 +415,15 @@ void
 DoFTools::get_subdomain_association<hp::DoFHandler<1,3> >
 (const hp::DoFHandler<1,3> &dof_handler,
  std::vector<types::subdomain_id>           &subdomain_association);
+template
+std::vector<IndexSet>
+DoFTools::locally_owned_dofs_per_subdomain<DoFHandler<1,3> >
+(const DoFHandler<1,3>     &dof_handler);
+template
+std::vector<IndexSet>
+DoFTools::locally_owned_dofs_per_subdomain<hp::DoFHandler<1,3> >
+(const hp::DoFHandler<1,3> &dof_handler);
 
 template
 unsigned int
index fde77d8414871d59c927df657c0de97f7872c983..62c53cb706fb2848cf504b8fac85a5ea8ee1172a 100644 (file)
@@ -2743,15 +2743,26 @@ namespace hp
     number_cache.n_global_dofs        = next_free_dof;
     number_cache.n_locally_owned_dofs = number_cache.n_global_dofs;
 
-    number_cache.locally_owned_dofs
-      = IndexSet (number_cache.n_global_dofs);
-    number_cache.locally_owned_dofs.add_range (0,
-                                               number_cache.n_global_dofs);
-    Assert (number_cache.n_global_dofs < std::numeric_limits<unsigned int>::max (),
-            ExcMessage ("Global number of degrees of freedom is too large."));
-    number_cache.n_locally_owned_dofs_per_processor
-      = std::vector<types::global_dof_index> (1,
-                                              (types::global_dof_index) number_cache.n_global_dofs);
+    if (dynamic_cast<const parallel::shared::Triangulation< dim, spacedim >*>
+        (&this->get_tria())
+        == 0)
+      {
+        number_cache.locally_owned_dofs
+          = IndexSet (number_cache.n_global_dofs);
+        number_cache.locally_owned_dofs.add_range (0,
+                                                   number_cache.n_global_dofs);
+        Assert (number_cache.n_global_dofs < std::numeric_limits<unsigned int>::max (),
+                ExcMessage ("Global number of degrees of freedom is too large."));
+        number_cache.n_locally_owned_dofs_per_processor
+          = std::vector<types::global_dof_index> (1,
+                                                  (types::global_dof_index) number_cache.n_global_dofs);
+      }
+    else
+      {
+        AssertThrow(false, ExcNotImplemented() );
+        //number_cache.locally_owned_dofs = dealii::DoFTools::locally_owned_dofs_with_subdomain(this,tria->locally_owned_subdomain() );
+        //TODO: update n_locally_owned_dofs_per_processor as well
+      }
 
     number_cache.locally_owned_dofs_per_processor
       = std::vector<IndexSet> (1,
index 1d3804e6c712831f88af79a0e3d40edf8c18ad09..d9459fdc2af66ba955df5e563d59b5bf899f3562 100644 (file)
@@ -86,7 +86,7 @@ SET(_categories
   a-framework algorithms all-headers aniso arpack base bits build_tests
   codim_one deal.II distributed_grids fe gla grid hp integrators lac lapack
   manifold matrix_free metis mpi multigrid opencascade petsc serialization
-  slepc trilinos umfpack
+  slepc trilinos umfpack sharedtria
   )
 IF(DEFINED DEAL_II_HAVE_TESTS_DIRECTORY)
   # Only set up mesh_converter tests if the testsuite is set up as a
diff --git a/tests/sharedtria/CMakeLists.txt b/tests/sharedtria/CMakeLists.txt
new file mode 100644 (file)
index 0000000..0b20ef4
--- /dev/null
@@ -0,0 +1,5 @@
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8.9)
+INCLUDE(${DEAL_II_SOURCE_DIR}/tests/setup_testsubproject.cmake)
+PROJECT(testsuite CXX)
+INCLUDE(${DEAL_II_TARGET_CONFIG})
+DEAL_II_PICKUP_TESTS()
diff --git a/tests/sharedtria/dof_01.cc b/tests/sharedtria/dof_01.cc
new file mode 100644 (file)
index 0000000..4fa58f2
--- /dev/null
@@ -0,0 +1,148 @@
+// ---------------------------------------------------------------------
+// $Id: dof_handler_number_cache.cc 31761 2013-11-22 14:42:37Z heister $
+//
+// Copyright (C) 2008 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// check number cache for shared_tria
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/intergrid_map.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+
+#include <fstream>
+#include <cstdlib>
+#include <numeric>
+
+
+template<int dim>
+void test()
+{
+  parallel::shared::Triangulation<dim>
+  triangulation (MPI_COMM_WORLD);
+
+  FESystem<dim> fe (FE_Q<dim>(3),2,
+                    FE_DGQ<dim>(1),1);
+
+  DoFHandler<dim> dof_handler (triangulation);
+
+  GridGenerator::hyper_cube(triangulation);
+  triangulation.refine_global (2);
+
+  const unsigned int n_refinements[] = { 0, 4, 3, 2 };
+  for (unsigned int i=0; i<n_refinements[dim]; ++i)
+    {
+      // refine one-fifth of cells randomly
+      std::vector<bool> flags (triangulation.n_active_cells(), false);
+      for (unsigned int k=0; k<flags.size()/5 + 1; ++k)
+        flags[Testing::rand() % flags.size()] = true;
+      // make sure there's at least one that
+      // will be refined
+      flags[0] = true;
+
+      // refine triangulation
+      unsigned int index=0;
+      for (typename Triangulation<dim>::active_cell_iterator
+           cell = triangulation.begin_active();
+           cell != triangulation.end(); ++cell)
+          {
+            if (flags[index])
+              cell->set_refine_flag();
+            ++index;
+          }
+
+      Assert (index <= triangulation.n_active_cells(), ExcInternalError());
+
+      // flag all other cells for coarsening
+      // (this should ensure that at least
+      // some of them will actually be
+      // coarsened)
+      index=0;
+      for (typename Triangulation<dim>::active_cell_iterator
+           cell = triangulation.begin_active();
+           cell != triangulation.end(); ++cell)
+          {
+            if (!flags[index])
+              cell->set_coarsen_flag();
+            ++index;
+          }
+
+      triangulation.execute_coarsening_and_refinement ();
+      dof_handler.distribute_dofs (fe);
+
+      deallog
+      << "n_dofs: " << dof_handler.n_dofs() << std::endl
+      << "n_locally_owned_dofs: " << dof_handler.n_locally_owned_dofs() << std::endl;
+
+      deallog << "n_locally_owned_dofs_per_processor: ";
+      std::vector<unsigned int> v = dof_handler.n_locally_owned_dofs_per_processor();
+      unsigned int sum = 0;
+      for (unsigned int i=0;i<v.size();++i)
+        {
+          deallog << v[i] << " ";
+          sum += v[i];
+        }
+      deallog << " sum: " << sum << std::endl;
+
+      Assert(dof_handler.n_locally_owned_dofs() == dof_handler.n_locally_owned_dofs_per_processor()[triangulation.locally_owned_subdomain()], ExcInternalError());
+      Assert( dof_handler.n_locally_owned_dofs() == dof_handler.locally_owned_dofs().n_elements(), ExcInternalError());
+
+      const unsigned int N = dof_handler.n_dofs();
+
+      Assert (dof_handler.n_locally_owned_dofs() <= N,
+              ExcInternalError());
+      Assert (std::accumulate (dof_handler.n_locally_owned_dofs_per_processor().begin(),
+                               dof_handler.n_locally_owned_dofs_per_processor().end(),
+                               0U) == N,
+              ExcInternalError());
+
+      IndexSet all (N);
+      for (unsigned int i=0;
+          i<dof_handler.locally_owned_dofs_per_processor().size(); ++i)
+        {
+          IndexSet intersect = all & dof_handler.locally_owned_dofs_per_processor()[i];
+          Assert(intersect.n_elements()==0, ExcInternalError());
+          all.add_indices(dof_handler.locally_owned_dofs_per_processor()[i]);
+        }
+
+      Assert(all == complete_index_set(N), ExcInternalError());
+    }
+}
+
+
+int main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  MPILogInitAll all;
+
+  deallog.push("2d");
+  test<2>();
+  deallog.pop();
+
+  deallog.push("3d");
+  test<3>();
+  deallog.pop();
+}
diff --git a/tests/sharedtria/dof_01.with_metis=true.mpirun=3.output b/tests/sharedtria/dof_01.with_metis=true.mpirun=3.output
new file mode 100644 (file)
index 0000000..40e7378
--- /dev/null
@@ -0,0 +1,50 @@
+
+DEAL:0:2d::n_dofs: 818
+DEAL:0:2d::n_locally_owned_dofs: 289
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 289 297 232  sum: 818
+DEAL:0:2d::n_dofs: 1754
+DEAL:0:2d::n_locally_owned_dofs: 578
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 578 588 588  sum: 1754
+DEAL:0:2d::n_dofs: 3056
+DEAL:0:2d::n_locally_owned_dofs: 1023
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020  sum: 3056
+DEAL:0:3d::n_dofs: 13282
+DEAL:0:3d::n_locally_owned_dofs: 4446
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450  sum: 13282
+DEAL:0:3d::n_dofs: 41826
+DEAL:0:3d::n_locally_owned_dofs: 13862
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833  sum: 41826
+
+DEAL:1:2d::n_dofs: 818
+DEAL:1:2d::n_locally_owned_dofs: 297
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 289 297 232  sum: 818
+DEAL:1:2d::n_dofs: 1754
+DEAL:1:2d::n_locally_owned_dofs: 588
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 578 588 588  sum: 1754
+DEAL:1:2d::n_dofs: 3056
+DEAL:1:2d::n_locally_owned_dofs: 1013
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020  sum: 3056
+DEAL:1:3d::n_dofs: 13282
+DEAL:1:3d::n_locally_owned_dofs: 4386
+DEAL:1:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450  sum: 13282
+DEAL:1:3d::n_dofs: 41826
+DEAL:1:3d::n_locally_owned_dofs: 14131
+DEAL:1:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833  sum: 41826
+
+
+DEAL:2:2d::n_dofs: 818
+DEAL:2:2d::n_locally_owned_dofs: 232
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 289 297 232  sum: 818
+DEAL:2:2d::n_dofs: 1754
+DEAL:2:2d::n_locally_owned_dofs: 588
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 578 588 588  sum: 1754
+DEAL:2:2d::n_dofs: 3056
+DEAL:2:2d::n_locally_owned_dofs: 1020
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020  sum: 3056
+DEAL:2:3d::n_dofs: 13282
+DEAL:2:3d::n_locally_owned_dofs: 4450
+DEAL:2:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450  sum: 13282
+DEAL:2:3d::n_dofs: 41826
+DEAL:2:3d::n_locally_owned_dofs: 13833
+DEAL:2:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833  sum: 41826
+
diff --git a/tests/sharedtria/dof_01.with_metis=true.output b/tests/sharedtria/dof_01.with_metis=true.output
new file mode 100644 (file)
index 0000000..d074284
--- /dev/null
@@ -0,0 +1,16 @@
+
+DEAL:0:2d::n_dofs: 818
+DEAL:0:2d::n_locally_owned_dofs: 818
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 818  sum: 818
+DEAL:0:2d::n_dofs: 1754
+DEAL:0:2d::n_locally_owned_dofs: 1754
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 1754  sum: 1754
+DEAL:0:2d::n_dofs: 3056
+DEAL:0:2d::n_locally_owned_dofs: 3056
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 3056  sum: 3056
+DEAL:0:3d::n_dofs: 13282
+DEAL:0:3d::n_locally_owned_dofs: 13282
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 13282  sum: 13282
+DEAL:0:3d::n_dofs: 41826
+DEAL:0:3d::n_locally_owned_dofs: 41826
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 41826  sum: 41826
diff --git a/tests/sharedtria/dof_02.cc b/tests/sharedtria/dof_02.cc
new file mode 100644 (file)
index 0000000..7b2eb7e
--- /dev/null
@@ -0,0 +1,149 @@
+// ---------------------------------------------------------------------
+// $Id: dof_handler_number_cache.cc 31761 2013-11-22 14:42:37Z heister $
+//
+// Copyright (C) 2008 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// check number cache for shared_tria with renumbering
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/dofs/dof_renumbering.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/intergrid_map.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+
+#include <fstream>
+#include <cstdlib>
+#include <numeric>
+
+
+template<int dim>
+void test()
+{
+  parallel::shared::Triangulation<dim>
+  triangulation (MPI_COMM_WORLD);
+
+  FESystem<dim> fe (FE_Q<dim>(3),2,
+                    FE_DGQ<dim>(1),1);
+
+  DoFHandler<dim> dof_handler (triangulation);
+
+  GridGenerator::hyper_cube(triangulation);
+  triangulation.refine_global (2);
+
+  const unsigned int n_refinements[] = { 0, 4, 3, 2 };
+  for (unsigned int i=0; i<n_refinements[dim]; ++i)
+    {
+      // refine one-fifth of cells randomly
+      std::vector<bool> flags (triangulation.n_active_cells(), false);
+      for (unsigned int k=0; k<flags.size()/5 + 1; ++k)
+        flags[Testing::rand() % flags.size()] = true;
+      // make sure there's at least one that
+      // will be refined
+      flags[0] = true;
+
+      // refine triangulation
+      unsigned int index=0;
+      for (typename Triangulation<dim>::active_cell_iterator
+           cell = triangulation.begin_active();
+           cell != triangulation.end(); ++cell)
+          {
+            if (flags[index])
+              cell->set_refine_flag();
+            ++index;
+          }
+
+      Assert (index <= triangulation.n_active_cells(), ExcInternalError());
+
+      // flag all other cells for coarsening
+      // (this should ensure that at least
+      // some of them will actually be
+      // coarsened)
+      index=0;
+      for (typename Triangulation<dim>::active_cell_iterator
+           cell = triangulation.begin_active();
+           cell != triangulation.end(); ++cell)
+          {
+            if (!flags[index])
+              cell->set_coarsen_flag();
+            ++index;
+          }
+
+      triangulation.execute_coarsening_and_refinement ();
+      dof_handler.distribute_dofs (fe);
+      DoFRenumbering::component_wise(dof_handler);      
+
+      deallog
+        << "n_dofs: " << dof_handler.n_dofs() << std::endl
+        << "n_locally_owned_dofs: " << dof_handler.n_locally_owned_dofs() << std::endl;
+
+      deallog << "n_locally_owned_dofs_per_processor: ";
+      std::vector<unsigned int> v = dof_handler.n_locally_owned_dofs_per_processor();
+      unsigned int sum = 0;
+      for (unsigned int i=0;i<v.size();++i)
+        {
+          deallog << v[i] << " ";
+          sum += v[i];
+        }
+      deallog << " sum: " << sum << std::endl;
+
+      Assert(dof_handler.n_locally_owned_dofs() == dof_handler.n_locally_owned_dofs_per_processor()[triangulation.locally_owned_subdomain()], ExcInternalError());
+      Assert( dof_handler.n_locally_owned_dofs() == dof_handler.locally_owned_dofs().n_elements(), ExcInternalError());
+
+      const unsigned int N = dof_handler.n_dofs();
+
+      Assert (dof_handler.n_locally_owned_dofs() <= N,
+              ExcInternalError());
+      Assert (std::accumulate (dof_handler.n_locally_owned_dofs_per_processor().begin(),
+                               dof_handler.n_locally_owned_dofs_per_processor().end(),
+                               0U) == N,
+              ExcInternalError());
+
+      IndexSet all (N);
+      for (unsigned int i=0;
+          i<dof_handler.locally_owned_dofs_per_processor().size(); ++i)
+        {
+          IndexSet intersect = all & dof_handler.locally_owned_dofs_per_processor()[i];
+          Assert(intersect.n_elements()==0, ExcInternalError());
+          all.add_indices(dof_handler.locally_owned_dofs_per_processor()[i]);
+        }
+      Assert(all == complete_index_set(N), ExcInternalError());
+    }
+}
+
+
+int main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  MPILogInitAll all;
+
+  deallog.push("2d");
+  test<2>();
+  deallog.pop();
+
+  deallog.push("3d");
+  test<3>();
+  deallog.pop();
+}
diff --git a/tests/sharedtria/dof_02.with_metis=true.mpirun=3.output b/tests/sharedtria/dof_02.with_metis=true.mpirun=3.output
new file mode 100644 (file)
index 0000000..40e7378
--- /dev/null
@@ -0,0 +1,50 @@
+
+DEAL:0:2d::n_dofs: 818
+DEAL:0:2d::n_locally_owned_dofs: 289
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 289 297 232  sum: 818
+DEAL:0:2d::n_dofs: 1754
+DEAL:0:2d::n_locally_owned_dofs: 578
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 578 588 588  sum: 1754
+DEAL:0:2d::n_dofs: 3056
+DEAL:0:2d::n_locally_owned_dofs: 1023
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020  sum: 3056
+DEAL:0:3d::n_dofs: 13282
+DEAL:0:3d::n_locally_owned_dofs: 4446
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450  sum: 13282
+DEAL:0:3d::n_dofs: 41826
+DEAL:0:3d::n_locally_owned_dofs: 13862
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833  sum: 41826
+
+DEAL:1:2d::n_dofs: 818
+DEAL:1:2d::n_locally_owned_dofs: 297
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 289 297 232  sum: 818
+DEAL:1:2d::n_dofs: 1754
+DEAL:1:2d::n_locally_owned_dofs: 588
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 578 588 588  sum: 1754
+DEAL:1:2d::n_dofs: 3056
+DEAL:1:2d::n_locally_owned_dofs: 1013
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020  sum: 3056
+DEAL:1:3d::n_dofs: 13282
+DEAL:1:3d::n_locally_owned_dofs: 4386
+DEAL:1:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450  sum: 13282
+DEAL:1:3d::n_dofs: 41826
+DEAL:1:3d::n_locally_owned_dofs: 14131
+DEAL:1:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833  sum: 41826
+
+
+DEAL:2:2d::n_dofs: 818
+DEAL:2:2d::n_locally_owned_dofs: 232
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 289 297 232  sum: 818
+DEAL:2:2d::n_dofs: 1754
+DEAL:2:2d::n_locally_owned_dofs: 588
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 578 588 588  sum: 1754
+DEAL:2:2d::n_dofs: 3056
+DEAL:2:2d::n_locally_owned_dofs: 1020
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020  sum: 3056
+DEAL:2:3d::n_dofs: 13282
+DEAL:2:3d::n_locally_owned_dofs: 4450
+DEAL:2:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450  sum: 13282
+DEAL:2:3d::n_dofs: 41826
+DEAL:2:3d::n_locally_owned_dofs: 13833
+DEAL:2:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833  sum: 41826
+
diff --git a/tests/sharedtria/dof_02.with_metis=true.output b/tests/sharedtria/dof_02.with_metis=true.output
new file mode 100644 (file)
index 0000000..d074284
--- /dev/null
@@ -0,0 +1,16 @@
+
+DEAL:0:2d::n_dofs: 818
+DEAL:0:2d::n_locally_owned_dofs: 818
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 818  sum: 818
+DEAL:0:2d::n_dofs: 1754
+DEAL:0:2d::n_locally_owned_dofs: 1754
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 1754  sum: 1754
+DEAL:0:2d::n_dofs: 3056
+DEAL:0:2d::n_locally_owned_dofs: 3056
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 3056  sum: 3056
+DEAL:0:3d::n_dofs: 13282
+DEAL:0:3d::n_locally_owned_dofs: 13282
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 13282  sum: 13282
+DEAL:0:3d::n_dofs: 41826
+DEAL:0:3d::n_locally_owned_dofs: 41826
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 41826  sum: 41826
diff --git a/tests/sharedtria/tria_01.cc b/tests/sharedtria/tria_01.cc
new file mode 100644 (file)
index 0000000..b85a32d
--- /dev/null
@@ -0,0 +1,107 @@
+// ---------------------------------------------------------------------
+// $Id: 3d_refinement_01.cc 31349 2013-10-20 19:07:06Z maier $
+//
+// Copyright (C) 2008 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// create a shared tria mesh and refine it
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/numerics/data_out.h>
+
+#include <fstream>
+
+template <int dim, int spacedim>
+void write_mesh (const parallel::shared::Triangulation<dim,spacedim> &tria,
+                const char                                *filename_)
+{
+    DataOut<dim> data_out;
+    data_out.attach_triangulation (tria);
+    Vector<float> subdomain (tria.n_active_cells());
+    for (unsigned int i=0; i<subdomain.size(); ++i)
+      subdomain(i) = tria.locally_owned_subdomain();
+    data_out.add_data_vector (subdomain, "subdomain");
+
+    data_out.build_patches ();
+    const std::string filename = (filename_ +
+                                  Utilities::int_to_string
+                                  (tria.locally_owned_subdomain(), 4));
+    {
+      std::ofstream output ((filename + ".vtu").c_str());
+      data_out.write_vtu (output);
+    }
+}
+
+
+
+template<int dim>
+void test()
+{
+  parallel::shared::Triangulation<dim> tr(MPI_COMM_WORLD);
+
+
+  GridGenerator::hyper_cube(tr);
+  tr.begin_active()->set_refine_flag();
+  tr.execute_coarsening_and_refinement ();
+  tr.begin_active()->set_refine_flag();
+  tr.execute_coarsening_and_refinement ();
+
+  deallog
+    << " locally_owned_subdomain(): " << tr.locally_owned_subdomain() << "\n"
+    << " n_active_cells: " << tr.n_active_cells() << "\n"
+    << " n_levels: " << tr.n_levels() << "\n"
+    << " n_global_levels: " << tr.n_global_levels()  << "\n"
+    //<< " n_locally_owned_active_cells: " << tr.n_locally_owned_active_cells() << "\n"
+    //<< " n_global_active_cells: " << tr.n_global_active_cells() << "\n"
+    << std::endl;
+
+  /*deallog << "n_locally_owned_active_cells_per_processor: ";
+  std::vector<unsigned int> v = tr.n_locally_owned_active_cells_per_processor();
+  for (unsigned int i=0;i<v.size();++i)
+    deallog << v[i] << " ";
+    deallog << std::endl;*/
+
+  deallog << "subdomains: ";
+  typename  parallel::distributed::Triangulation<dim>::active_cell_iterator it=tr.begin_active();
+  for (; it!=tr.end(); ++it)
+    {
+      deallog << it->subdomain_id() << " ";
+    }
+  deallog << std::endl;
+
+  //write_mesh(tr, "mesh");
+}
+
+
+int main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+  MPILogInitAll all;
+
+  deallog.push("2d");
+  test<2>();
+  deallog.pop();
+  deallog.push("3d");
+  test<3>();
+  deallog.pop();
+}
diff --git a/tests/sharedtria/tria_01.with_metis=true.mpirun=3.output b/tests/sharedtria/tria_01.with_metis=true.mpirun=3.output
new file mode 100644 (file)
index 0000000..1f30076
--- /dev/null
@@ -0,0 +1,41 @@
+
+DEAL:0:2d:: locally_owned_subdomain(): 0
+ n_active_cells: 7
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:0:2d::subdomains: 0 1 0 1 2 1 2 
+DEAL:0:3d:: locally_owned_subdomain(): 0
+ n_active_cells: 15
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:0:3d::subdomains: 0 2 0 1 0 0 0 2 2 2 2 1 1 1 1 
+
+DEAL:1:2d:: locally_owned_subdomain(): 1
+ n_active_cells: 7
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:1:2d::subdomains: 0 1 0 1 2 1 2 
+DEAL:1:3d:: locally_owned_subdomain(): 1
+ n_active_cells: 15
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:1:3d::subdomains: 0 2 0 1 0 0 0 2 2 2 2 1 1 1 1 
+
+
+DEAL:2:2d:: locally_owned_subdomain(): 2
+ n_active_cells: 7
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:2:2d::subdomains: 0 1 0 1 2 1 2 
+DEAL:2:3d:: locally_owned_subdomain(): 2
+ n_active_cells: 15
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:2:3d::subdomains: 0 2 0 1 0 0 0 2 2 2 2 1 1 1 1 
+
diff --git a/tests/sharedtria/tria_01.with_metis=true.output b/tests/sharedtria/tria_01.with_metis=true.output
new file mode 100644 (file)
index 0000000..d1b965d
--- /dev/null
@@ -0,0 +1,13 @@
+
+DEAL:0:2d:: locally_owned_subdomain(): 0
+ n_active_cells: 7
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:0:2d::subdomains: 0 0 0 0 0 0 0 
+DEAL:0:3d:: locally_owned_subdomain(): 0
+ n_active_cells: 15
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:0:3d::subdomains: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.