]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Implement parallel MGTransferPrebuilt for parallel::distributed::Vector
authorMartin Kronbichler <kronbichler@lnm.mw.tum.de>
Wed, 23 Dec 2015 22:08:59 +0000 (23:08 +0100)
committerMartin Kronbichler <kronbichler@lnm.mw.tum.de>
Wed, 6 Jan 2016 17:58:36 +0000 (18:58 +0100)
This commit includes a refactoring of MGTransferPrebuilt into a base class
MGLevelGlobalTransfer that contains the copy operation between the multigrid
hierarchy and the global degrees of freedom, including a specialization for
parallel::distributed::Vector that takes care of the particular behavior in
this vector type. It also allows us to use optimized access in terms of local
MPI indices.

include/deal.II/multigrid/mg_transfer.h
include/deal.II/multigrid/mg_transfer.templates.h
source/multigrid/CMakeLists.txt
source/multigrid/mg_level_global_transfer.cc [new file with mode: 0644]
source/multigrid/mg_level_global_transfer.inst.in [new file with mode: 0644]
source/multigrid/mg_transfer_prebuilt.cc
source/multigrid/mg_transfer_prebuilt.inst.in

index aef8748d5549992de3d46729889f8d193bdaee5f..89e3f57360cb2f7dec72ff087c218a436a00a0c8 100644 (file)
@@ -39,8 +39,6 @@
 DEAL_II_NAMESPACE_OPEN
 
 
-template <int dim, int spacedim> class DoFHandler;
-
 namespace internal
 {
   template <typename VectorType>
@@ -111,64 +109,25 @@ namespace internal
 /*!@addtogroup mg */
 /*@{*/
 
+
+
 /**
- * Implementation of the MGTransferBase interface for which the transfer
- * operations are prebuilt upon construction of the object of this class as
- * matrices. This is the fast way, since it only needs to build the operation
- * once by looping over all cells and storing the result in a matrix for each
- * level, but requires additional memory.
+ * Implementation of transfer between the global vectors and the multigrid
+ * levels for use in the derived class MGTransferPrebuilt and other classes.
  *
- * See MGTransferBase to find out which of the transfer classes is best for
- * your needs.
- *
- * @author Wolfgang Bangerth, Guido Kanschat
- * @date 1999, 2000, 2001, 2002, 2003, 2004, 2012
+ * @author Wolfgang Bangerth, Guido Kanschat, Timo Heister, Martin Kronbichler
+ * @date 1999, 2000, 2001, 2002, 2003, 2004, 2012, 2015
  */
 template <typename VectorType>
-class MGTransferPrebuilt : public MGTransferBase<VectorType>
+class MGLevelGlobalTransfer : public MGTransferBase<VectorType>
 {
 public:
-  /**
-   * Constructor without constraint matrices. Use this constructor only with
-   * discontinuous finite elements or with no local refinement.
-   */
-  MGTransferPrebuilt ();
-  /**
-   * Constructor with constraints. Equivalent to the default constructor
-   * followed by initialize_constraints().
-   */
-  MGTransferPrebuilt (const ConstraintMatrix &constraints,
-                      const MGConstrainedDoFs &mg_constrained_dofs);
-  /**
-   * Destructor.
-   */
-  virtual ~MGTransferPrebuilt ();
-
-  /**
-   * Initialize the constraints to be used in build_matrices().
-   */
-  void initialize_constraints (const ConstraintMatrix &constraints,
-                               const MGConstrainedDoFs &mg_constrained_dofs);
 
   /**
    * Reset the object to the state it had right after the default constructor.
    */
   void clear ();
 
-  /**
-   * Actually build the prolongation matrices for each level.
-   */
-  template <int dim, int spacedim>
-  void build_matrices (const DoFHandler<dim,spacedim> &mg_dof);
-
-  virtual void prolongate (const unsigned int to_level,
-                           VectorType         &dst,
-                           const VectorType   &src) const;
-
-  virtual void restrict_and_add (const unsigned int from_level,
-                                 VectorType         &dst,
-                                 const VectorType   &src) const;
-
   /**
    * Transfer from a vector on the global grid to vectors defined on each of
    * the levels separately, i.a. an @p MGVector.
@@ -222,63 +181,176 @@ public:
   set_component_to_block_map (const std::vector<unsigned int> &map);
 
   /**
-   * Finite element does not provide prolongation matrices.
+   * Memory used by this object.
    */
-  DeclException0(ExcNoProlongation);
+  std::size_t memory_consumption () const;
 
   /**
-   * You have to call build_matrices() before using this object.
+   * Print the copy index fields for debugging purposes.
    */
-  DeclException0(ExcMatricesNotBuilt);
+  void print_indices(std::ostream &os) const;
+
+protected:
 
   /**
-   * Memory used by this object.
+   * Internal function to @p fill copy_indices*. Called by derived classes.
    */
-  std::size_t memory_consumption () const;
+  template <int dim, int spacedim>
+  void fill_and_communicate_copy_indices(const DoFHandler<dim,spacedim> &mg_dof);
 
   /**
-   * Print all the matrices for debugging purposes.
+   * Sizes of the multi-level vectors.
    */
-  void print_matrices(std::ostream &os) const;
+  std::vector<types::global_dof_index> sizes;
 
   /**
-   * Print the copy index fields for debugging purposes.
+   * Mapping for the copy_to_mg() and copy_from_mg() functions. Here only
+   * index pairs locally owned
+   *
+   * The data is organized as follows: one vector per level. Each element of
+   * these vectors contains first the global index, then the level index.
    */
-  void print_indices(std::ostream &os) const;
+  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
+  copy_indices;
 
-private:
+  /**
+   * Additional degrees of freedom for the copy_to_mg() function. These are
+   * the ones where the global degree of freedom is locally owned and the
+   * level degree of freedom is not.
+   *
+   * Organization of the data is like for @p copy_indices_mine.
+   */
+  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
+  copy_indices_global_mine;
 
   /**
-   * Internal function to @p fill copy_indices*. Called by build_matrices().
+   * Additional degrees of freedom for the copy_from_mg() function. These are
+   * the ones where the level degree of freedom is locally owned and the
+   * global degree of freedom is not.
+   *
+   * Organization of the data is like for @p copy_indices_mine.
    */
-  template <int dim, int spacedim>
-  void fill_and_communicate_copy_indices(const DoFHandler<dim,spacedim> &mg_dof);
+  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
+  copy_indices_level_mine;
 
   /**
-   * Sizes of the multi-level vectors.
+   * The vector that stores what has been given to the
+   * set_component_to_block_map() function.
    */
-  std::vector<types::global_dof_index> sizes;
+  std::vector<unsigned int> component_to_block_map;
 
   /**
-   * Sparsity patterns for transfer matrices.
+   * The mg_constrained_dofs of the level systems.
    */
-  std::vector<std_cxx11::shared_ptr<typename internal::MatrixSelector<VectorType>::Sparsity> > prolongation_sparsities;
+  SmartPointer<const MGConstrainedDoFs, MGLevelGlobalTransfer<VectorType> > mg_constrained_dofs;
+};
+
+
+
+/**
+ * Implementation of transfer between the global vectors and the multigrid
+ * levels for use in the derived class MGTransferPrebuilt and other
+ * classes. This class is a specialization for the case of
+ * parallel::distributed::Vector that requires a few different calling
+ * routines as compared to the %parallel vectors in the PETScWrappers and
+ * TrilinosWrappers namespaces.
+ *
+ * @author Martin Kronbichler
+ * @date 2016
+ */
+template <typename Number>
+class MGLevelGlobalTransfer<parallel::distributed::Vector<Number> > : public MGTransferBase<parallel::distributed::Vector<Number> >
+{
+public:
 
   /**
-   * The actual prolongation matrix.  column indices belong to the dof indices
-   * of the mother cell, i.e. the coarse level.  while row indices belong to
-   * the child cell, i.e. the fine level.
+   * Reset the object to the state it had right after the default constructor.
    */
-  std::vector<std_cxx11::shared_ptr<typename internal::MatrixSelector<VectorType>::Matrix> > prolongation_matrices;
+  void clear ();
+
+  /**
+   * Transfer from a vector on the global grid to vectors defined on each of
+   * the levels separately, i.a. an @p MGVector.
+   */
+  template <int dim, typename Number2, int spacedim>
+  void
+  copy_to_mg (const DoFHandler<dim,spacedim>                        &mg_dof,
+              MGLevelObject<parallel::distributed::Vector<Number> > &dst,
+              const parallel::distributed::Vector<Number2>          &src) const;
+
+  /**
+   * Transfer from multi-level vector to normal vector.
+   *
+   * Copies data from active portions of an MGVector into the respective
+   * positions of a <tt>Vector<number></tt>. In order to keep the result
+   * consistent, constrained degrees of freedom are set to zero.
+   */
+  template <int dim, typename Number2, int spacedim>
+  void
+  copy_from_mg (const DoFHandler<dim,spacedim>                              &mg_dof,
+                parallel::distributed::Vector<Number2>                      &dst,
+                const MGLevelObject<parallel::distributed::Vector<Number> > &src) const;
+
+  /**
+   * Add a multi-level vector to a normal vector.
+   *
+   * Works as the previous function, but probably not for continuous elements.
+   */
+  template <int dim, typename Number2, int spacedim>
+  void
+  copy_from_mg_add (const DoFHandler<dim,spacedim>                              &mg_dof,
+                    parallel::distributed::Vector<Number2>                      &dst,
+                    const MGLevelObject<parallel::distributed::Vector<Number> > &src) const;
+
+  /**
+   * If this object operates on BlockVector objects, we need to describe how
+   * the individual vector components are mapped to the blocks of a vector.
+   * For example, for a Stokes system, we have dim+1 vector components for
+   * velocity and pressure, but we may want to use block vectors with only two
+   * blocks for all velocities in one block, and the pressure variables in the
+   * other.
+   *
+   * By default, if this function is not called, block vectors have as many
+   * blocks as the finite element has vector components. However, this can be
+   * changed by calling this function with an array that describes how vector
+   * components are to be grouped into blocks. The meaning of the argument is
+   * the same as the one given to the DoFTools::count_dofs_per_component
+   * function.
+   */
+  void
+  set_component_to_block_map (const std::vector<unsigned int> &map);
+
+  /**
+   * Memory used by this object.
+   */
+  std::size_t memory_consumption () const;
+
+  /**
+   * Print the copy index fields for debugging purposes.
+   */
+  void print_indices(std::ostream &os) const;
+
+protected:
+
+  /**
+   * Internal function to @p fill copy_indices*. Called by derived classes.
+   */
+  template <int dim, int spacedim>
+  void fill_and_communicate_copy_indices(const DoFHandler<dim,spacedim> &mg_dof);
+
+  /**
+   * Sizes of the multi-level vectors.
+   */
+  std::vector<types::global_dof_index> sizes;
 
   /**
    * Mapping for the copy_to_mg() and copy_from_mg() functions. Here only
-   * index pairs locally owned
+   * index pairs locally owned is stored.
    *
    * The data is organized as follows: one vector per level. Each element of
    * these vectors contains first the global index, then the level index.
    */
-  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
+  std::vector<std::vector<std::pair<unsigned int, unsigned int> > >
   copy_indices;
 
   /**
@@ -288,7 +360,7 @@ private:
    *
    * Organization of the data is like for @p copy_indices_mine.
    */
-  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
+  std::vector<std::vector<std::pair<unsigned int, unsigned int> > >
   copy_indices_global_mine;
 
   /**
@@ -298,9 +370,16 @@ private:
    *
    * Organization of the data is like for @p copy_indices_mine.
    */
-  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
+  std::vector<std::vector<std::pair<unsigned int, unsigned int> > >
   copy_indices_level_mine;
 
+  /**
+   * Stores whether the copy operation from the global to the level vector is
+   * actually a plain copy to the finest level. This means that the grid has
+   * no adaptive refinement and the numbering on the finest multigrid level is
+   * the same as in the global case.
+   */
+  bool perform_plain_copy;
 
   /**
    * The vector that stores what has been given to the
@@ -308,20 +387,131 @@ private:
    */
   std::vector<unsigned int> component_to_block_map;
 
+  /**
+   * The mg_constrained_dofs of the level systems.
+   */
+  SmartPointer<const MGConstrainedDoFs, MGLevelGlobalTransfer<parallel::distributed::Vector<Number> > > mg_constrained_dofs;
+
+  /**
+   * In the function copy_to_mg, we need to access ghosted entries of the
+   * global vector for inserting into the level vectors. This vector is
+   * populated with those entries.
+   */
+  mutable parallel::distributed::Vector<Number> ghosted_global_vector;
+
+  /**
+   * In the function copy_from_mg, we access all level vectors with certain
+   * ghost entries for inserting the result into a global vector.
+   */
+  mutable MGLevelObject<parallel::distributed::Vector<Number> > ghosted_level_vector;
+};
+
+
+
+/**
+ * Implementation of the MGTransferBase interface for which the transfer
+ * operations are prebuilt upon construction of the object of this class as
+ * matrices. This is the fast way, since it only needs to build the operation
+ * once by looping over all cells and storing the result in a matrix for each
+ * level, but requires additional memory.
+ *
+ * See MGTransferBase to find out which of the transfer classes is best for
+ * your needs.
+ *
+ * @author Wolfgang Bangerth, Guido Kanschat, Timo Heister, Martin Kronbichler
+ * @date 1999, 2000, 2001, 2002, 2003, 2004, 2012, 2015
+ */
+template <typename VectorType>
+class MGTransferPrebuilt : public MGLevelGlobalTransfer<VectorType>
+{
+public:
+  /**
+   * Constructor without constraint matrices. Use this constructor only with
+   * discontinuous finite elements or with no local refinement.
+   */
+  MGTransferPrebuilt ();
+
+  /**
+   * Constructor with constraints. Equivalent to the default constructor
+   * followed by initialize_constraints().
+   */
+  MGTransferPrebuilt (const ConstraintMatrix &constraints,
+                      const MGConstrainedDoFs &mg_constrained_dofs);
+
+  /**
+   * Destructor.
+   */
+  virtual ~MGTransferPrebuilt ();
+
+  /**
+   * Initialize the constraints to be used in build_matrices().
+   */
+  void initialize_constraints (const ConstraintMatrix &constraints,
+                               const MGConstrainedDoFs &mg_constrained_dofs);
+
+  /**
+   * Reset the object to the state it had right after the default constructor.
+   */
+  void clear ();
+
+  /**
+   * Actually build the prolongation matrices for each level.
+   */
+  template <int dim, int spacedim>
+  void build_matrices (const DoFHandler<dim,spacedim> &mg_dof);
+
+  virtual void prolongate (const unsigned int to_level,
+                           VectorType         &dst,
+                           const VectorType   &src) const;
+
+  virtual void restrict_and_add (const unsigned int from_level,
+                                 VectorType         &dst,
+                                 const VectorType   &src) const;
+
+  /**
+   * Finite element does not provide prolongation matrices.
+   */
+  DeclException0(ExcNoProlongation);
+
+  /**
+   * You have to call build_matrices() before using this object.
+   */
+  DeclException0(ExcMatricesNotBuilt);
+
+  /**
+   * Memory used by this object.
+   */
+  std::size_t memory_consumption () const;
+
+  /**
+   * Print all the matrices for debugging purposes.
+   */
+  void print_matrices(std::ostream &os) const;
+
+private:
+
+  /**
+   * Sparsity patterns for transfer matrices.
+   */
+  std::vector<std_cxx11::shared_ptr<typename internal::MatrixSelector<VectorType>::Sparsity> > prolongation_sparsities;
+
+  /**
+   * The actual prolongation matrix.  column indices belong to the dof indices
+   * of the mother cell, i.e. the coarse level.  while row indices belong to
+   * the child cell, i.e. the fine level.
+   */
+  std::vector<std_cxx11::shared_ptr<typename internal::MatrixSelector<VectorType>::Matrix> > prolongation_matrices;
+
   /**
    * Degrees of freedom on the refinement edge excluding those on the
    * boundary.
    */
   std::vector<std::vector<bool> > interface_dofs;
+
   /**
    * The constraints of the global system.
    */
   SmartPointer<const ConstraintMatrix, MGTransferPrebuilt<VectorType> > constraints;
-  /**
-   * The mg_constrained_dofs of the level systems.
-   */
-
-  SmartPointer<const MGConstrainedDoFs, MGTransferPrebuilt<VectorType> > mg_constrained_dofs;
 };
 
 
index 41630fe3bc9f2b4d70e42ec5d8754f82e9c8e6be..a3bd668bdf4aaa2d6514f1bd64ace334d0412cfc 100644 (file)
@@ -114,12 +114,10 @@ namespace
       (dynamic_cast<const parallel::Triangulation<dim,spacedim>*>
        (&mg_dof.get_triangulation()));
 
-    for (unsigned int level=v.min_level();
-         level<=v.max_level(); ++level)
+    for (unsigned int level=v.min_level(); level<=v.max_level(); ++level)
       {
-        const IndexSet vector_index_set = v[level].locally_owned_elements();
-        if (vector_index_set.size() != mg_dof.locally_owned_mg_dofs(level).size() ||
-            mg_dof.locally_owned_mg_dofs(level) != vector_index_set)
+        if (v[level].size() != mg_dof.locally_owned_mg_dofs(level).size() ||
+            v[level].local_size() != mg_dof.locally_owned_mg_dofs(level).n_elements())
           v[level].reinit(mg_dof.locally_owned_mg_dofs(level), tria != 0 ?
                           tria->get_communicator() : MPI_COMM_SELF);
         else
@@ -160,18 +158,16 @@ namespace
 
 
 
-/* --------------------- MGTransferPrebuilt -------------- */
-
-
+/* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
 
 
 template <typename VectorType>
 template <int dim, class InVector, int spacedim>
 void
-MGTransferPrebuilt<VectorType>::copy_to_mg
+MGLevelGlobalTransfer<VectorType>::copy_to_mg
 (const DoFHandler<dim,spacedim> &mg_dof_handler,
- MGLevelObject<VectorType>     &dst,
- const InVector                &src) const
+ MGLevelObject<VectorType>      &dst,
+ const InVector                 &src) const
 {
   reinit_vector(mg_dof_handler, component_to_block_map, dst);
   bool first = true;
@@ -182,22 +178,21 @@ MGTransferPrebuilt<VectorType>::copy_to_mg
   for (unsigned int level=mg_dof_handler.get_triangulation().n_global_levels(); level != 0;)
     {
       --level;
-      VectorType &dst_level = dst[level];
-
 #ifdef DEBUG_OUTPUT
       MPI_Barrier(MPI_COMM_WORLD);
 #endif
 
       typedef std::vector<std::pair<types::global_dof_index, types::global_dof_index> >::const_iterator dof_pair_iterator;
+      VectorType &dst_level = dst[level];
 
       // first copy local unknowns
-      for (dof_pair_iterator i= copy_indices[level].begin();
+      for (dof_pair_iterator i = copy_indices[level].begin();
            i != copy_indices[level].end(); ++i)
         dst_level(i->second) = src(i->first);
 
-      // Do the same for the indices where the global index is local,
-      // but the local index is not
-      for (dof_pair_iterator i= copy_indices_global_mine[level].begin();
+      // Do the same for the indices where the global index is local, but the
+      // local index is not
+      for (dof_pair_iterator i = copy_indices_global_mine[level].begin();
            i != copy_indices_global_mine[level].end(); ++i)
         dst_level(i->second) = src(i->first);
 
@@ -210,7 +205,7 @@ MGTransferPrebuilt<VectorType>::copy_to_mg
 
       if (!first)
         {
-          restrict_and_add (level+1, dst[level], dst[level+1]);
+          this->restrict_and_add (level+1, dst[level], dst[level+1]);
 #ifdef DEBUG_OUTPUT
           std::cout << "copy_to_mg restr&add " << level << " " << dst_level.l2_norm() << std::endl;
 #endif
@@ -225,18 +220,14 @@ MGTransferPrebuilt<VectorType>::copy_to_mg
 template <typename VectorType>
 template <int dim, class OutVector, int spacedim>
 void
-MGTransferPrebuilt<VectorType>::copy_from_mg
+MGLevelGlobalTransfer<VectorType>::copy_from_mg
 (const DoFHandler<dim,spacedim>  &mg_dof_handler,
  OutVector                       &dst,
  const MGLevelObject<VectorType> &src) const
 {
-  // For non-DG: degrees of
-  // freedom in the refinement
-  // face may need special
-  // attention, since they belong
-  // to the coarse level, but
-  // have fine level basis
-  // functions
+  // For non-DG: degrees of freedom in the refinement face may need special
+  // attention, since they belong to the coarse level, but have fine level
+  // basis functions
   dst = 0;
   for (unsigned int level=0; level<mg_dof_handler.get_triangulation().n_global_levels(); ++level)
     {
@@ -247,17 +238,18 @@ MGTransferPrebuilt<VectorType>::copy_from_mg
 #endif
 
       typedef std::vector<std::pair<types::global_dof_index, types::global_dof_index> >::const_iterator dof_pair_iterator;
+      const VectorType &src_level = src[level];
 
       // First copy all indices local to this process
-      for (dof_pair_iterator i= copy_indices[level].begin();
+      for (dof_pair_iterator i = copy_indices[level].begin();
            i != copy_indices[level].end(); ++i)
-        dst(i->first) = src[level](i->second);
+        dst(i->first) = src_level(i->second);
 
-      // Do the same for the indices where the level index is local,
-      // but the global index is not
-      for (dof_pair_iterator i= copy_indices_level_mine[level].begin();
+      // Do the same for the indices where the level index is local, but the
+      // global index is not
+      for (dof_pair_iterator i = copy_indices_level_mine[level].begin();
            i != copy_indices_level_mine[level].end(); ++i)
-        dst(i->first) = src[level](i->second);
+        dst(i->first) = src_level(i->second);
 
 #ifdef DEBUG_OUTPUT
       {
@@ -279,32 +271,29 @@ MGTransferPrebuilt<VectorType>::copy_from_mg
 template <typename VectorType>
 template <int dim, class OutVector, int spacedim>
 void
-MGTransferPrebuilt<VectorType>::copy_from_mg_add
+MGLevelGlobalTransfer<VectorType>::copy_from_mg_add
 (const DoFHandler<dim,spacedim>  &mg_dof_handler,
  OutVector                       &dst,
  const MGLevelObject<VectorType> &src) const
 {
-  // For non-DG: degrees of
-  // freedom in the refinement
-  // face may need special
-  // attention, since they belong
-  // to the coarse level, but
-  // have fine level basis
-  // functions
+  // For non-DG: degrees of freedom in the refinement face may need special
+  // attention, since they belong to the coarse level, but have fine level
+  // basis functions
   for (unsigned int level=0; level<mg_dof_handler.get_triangulation().n_global_levels(); ++level)
     {
       typedef std::vector<std::pair<types::global_dof_index, types::global_dof_index> >::const_iterator dof_pair_iterator;
+      const VectorType &src_level = src[level];
 
       // First add all indices local to this process
-      for (dof_pair_iterator i= copy_indices[level].begin();
+      for (dof_pair_iterator i = copy_indices[level].begin();
            i != copy_indices[level].end(); ++i)
-        dst(i->first) += src[level](i->second);
+        dst(i->first) += src_level(i->second);
 
-      // Do the same for the indices where the level index is local,
-      // but the global index is not
-      for (dof_pair_iterator i= copy_indices_level_mine[level].begin();
+      // Do the same for the indices where the level index is local, but the
+      // global index is not
+      for (dof_pair_iterator i = copy_indices_level_mine[level].begin();
            i != copy_indices_level_mine[level].end(); ++i)
-        dst(i->first) += src[level](i->second);
+        dst(i->first) += src_level(i->second);
     }
   dst.compress(VectorOperation::add);
 }
@@ -313,24 +302,192 @@ MGTransferPrebuilt<VectorType>::copy_from_mg_add
 
 template <typename VectorType>
 void
-MGTransferPrebuilt<VectorType>::
+MGLevelGlobalTransfer<VectorType>::
 set_component_to_block_map (const std::vector<unsigned int> &map)
 {
   component_to_block_map = map;
 }
 
-template <typename VectorType>
-std::size_t
-MGTransferPrebuilt<VectorType>::memory_consumption () const
+
+
+/* --------- MGLevelGlobalTransfer<parallel::distributed::Vector> ------- */
+
+template <typename Number>
+template <int dim, typename Number2, int spacedim>
+void
+MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::copy_to_mg
+(const DoFHandler<dim,spacedim>                        &mg_dof_handler,
+ MGLevelObject<parallel::distributed::Vector<Number> > &dst,
+ const parallel::distributed::Vector<Number2>          &src) const
 {
-  std::size_t result = sizeof(*this);
-  result += sizeof(unsigned int) * sizes.size();
+  reinit_vector(mg_dof_handler, component_to_block_map, dst);
+  bool first = true;
+
+  if (perform_plain_copy)
+    {
+      // In this case, we can simply copy the local range (in parallel by
+      // VectorView)
+      AssertDimension(dst[dst.max_level()].local_size(), src.local_size());
+      VectorView<Number>  dst_view (src.local_size(), dst[dst.max_level()].begin());
+      VectorView<Number2> src_view (src.local_size(), src.begin());
+      static_cast<Vector<Number> &>(dst_view) = static_cast<Vector<Number2> &>(src_view);
+      for (unsigned int level=mg_dof_handler.get_triangulation().n_global_levels()-1; level != 0; )
+        {
+          --level;
+          this->restrict_and_add (level+1, dst[level], dst[level+1]);
+        }
+      return;
+    }
+
+  // the ghosted vector should already have the correct local size (but
+  // different parallel layout)
+  AssertDimension(ghosted_global_vector.local_size(), src.local_size());
+
+  // copy the source vector to the temporary vector that we hold for the
+  // purpose of data exchange
+  ghosted_global_vector = src;
+  ghosted_global_vector.update_ghost_values();
+
+  for (unsigned int level=mg_dof_handler.get_triangulation().n_global_levels(); level != 0;)
+    {
+      --level;
+
+      typedef std::vector<std::pair<unsigned int, unsigned int> >::const_iterator dof_pair_iterator;
+      parallel::distributed::Vector<Number> &dst_level = dst[level];
+
+      // first copy local unknowns
+      for (dof_pair_iterator i = copy_indices[level].begin();
+           i != copy_indices[level].end(); ++i)
+        dst_level.local_element(i->second) = ghosted_global_vector.local_element(i->first);
+
+      // Do the same for the indices where the level index is local, but the
+      // global index is not
+      for (dof_pair_iterator i = copy_indices_level_mine[level].begin();
+           i != copy_indices_level_mine[level].end(); ++i)
+        dst_level.local_element(i->second) = ghosted_global_vector.local_element(i->first);
+
+      dst_level.compress(VectorOperation::insert);
+
+      if (!first)
+        {
+          this->restrict_and_add (level+1, dst_level, dst[level+1]);
+        }
+
+      first = false;
+    }
+}
+
 
-  for (unsigned int i=0; i<prolongation_matrices.size(); ++i)
-    result += prolongation_matrices[i]->memory_consumption()
-              + prolongation_sparsities[i]->memory_consumption();
 
-  return result;
+template <typename Number>
+template <int dim, typename Number2, int spacedim>
+void
+MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::copy_from_mg
+(const DoFHandler<dim,spacedim>                              &mg_dof_handler,
+ parallel::distributed::Vector<Number2>                      &dst,
+ const MGLevelObject<parallel::distributed::Vector<Number> > &src) const
+{
+  // For non-DG: degrees of freedom in the refinement face may need special
+  // attention, since they belong to the coarse level, but have fine level
+  // basis functions
+
+  if (perform_plain_copy)
+    {
+      // In this case, we can simply copy the local range (in parallel by
+      // VectorView). To avoid having stray data in ghost entries of the
+      // destination, make sure to clear them here.
+      dst.zero_out_ghosts();
+      AssertDimension(dst.local_size(), src[src.max_level()].local_size());
+      VectorView<Number2> dst_view (dst.local_size(), dst.begin());
+      VectorView<Number>  src_view (dst.local_size(), src[src.max_level()].begin());
+      static_cast<Vector<Number2> &>(dst_view) = static_cast<Vector<Number> &>(src_view);
+      return;
+    }
+
+  dst = 0;
+  for (unsigned int level=0; level<mg_dof_handler.get_triangulation().n_global_levels(); ++level)
+    {
+      typedef std::vector<std::pair<unsigned int, unsigned int> >::const_iterator dof_pair_iterator;
+
+      // the ghosted vector should already have the correct local size (but
+      // different parallel layout)
+      AssertDimension(ghosted_level_vector[level].local_size(),
+                      src[level].local_size());
+
+      // the first time around, we copy the source vector to the temporary
+      // vector that we hold for the purpose of data exchange
+      parallel::distributed::Vector<Number> &ghosted_vector =
+        ghosted_level_vector[level];
+      ghosted_vector = src[level];
+      ghosted_vector.update_ghost_values();
+
+      // first copy local unknowns
+      for (dof_pair_iterator i = copy_indices[level].begin();
+           i != copy_indices[level].end(); ++i)
+        dst.local_element(i->first) = ghosted_vector.local_element(i->second);
+
+      // Do the same for the indices where the level index is local, but the
+      // global index is not
+      for (dof_pair_iterator i = copy_indices_global_mine[level].begin();
+           i != copy_indices_global_mine[level].end(); ++i)
+        dst.local_element(i->first) = ghosted_vector.local_element(i->second);
+    }
+  dst.compress(VectorOperation::insert);
+}
+
+
+
+template <typename Number>
+template <int dim, typename Number2, int spacedim>
+void
+MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::copy_from_mg_add
+(const DoFHandler<dim,spacedim>                              &mg_dof_handler,
+ parallel::distributed::Vector<Number2>                      &dst,
+ const MGLevelObject<parallel::distributed::Vector<Number> > &src) const
+{
+  // For non-DG: degrees of freedom in the refinement face may need special
+  // attention, since they belong to the coarse level, but have fine level
+  // basis functions
+
+  dst.zero_out_ghosts();
+  for (unsigned int level=0; level<mg_dof_handler.get_triangulation().n_global_levels(); ++level)
+    {
+      typedef std::vector<std::pair<unsigned int, unsigned int> >::const_iterator dof_pair_iterator;
+
+      // the ghosted vector should already have the correct local size (but
+      // different parallel layout)
+      AssertDimension(ghosted_level_vector[level].local_size(),
+                      src[level].local_size());
+
+      // the first time around, we copy the source vector to the temporary
+      // vector that we hold for the purpose of data exchange
+      parallel::distributed::Vector<Number> &ghosted_vector =
+        ghosted_level_vector[level];
+      ghosted_vector = src[level];
+      ghosted_vector.update_ghost_values();
+
+      // first add local unknowns
+      for (dof_pair_iterator i= copy_indices[level].begin();
+           i != copy_indices[level].end(); ++i)
+        dst.local_element(i->first) += ghosted_vector.local_element(i->second);
+
+      // Do the same for the indices where the level index is local, but the
+      // global index is not
+      for (dof_pair_iterator i= copy_indices_global_mine[level].begin();
+           i != copy_indices_global_mine[level].end(); ++i)
+        dst.local_element(i->first) += ghosted_vector.local_element(i->second);
+    }
+  dst.compress(VectorOperation::add);
+}
+
+
+
+template <typename Number>
+void
+MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::
+set_component_to_block_map (const std::vector<unsigned int> &map)
+{
+  component_to_block_map = map;
 }
 
 
index b86de17f673a649730c4c8cf5e7984d128f3a75b..ac23a6af19566f6c6a5593cc28e49c4045b366ff 100644 (file)
@@ -17,6 +17,7 @@ INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_BINARY_DIR})
 
 SET(_src
   mg_base.cc
+  mg_level_global_transfer.cc
   mg_tools.cc
   mg_transfer_block.cc
   mg_transfer_component.cc
@@ -26,6 +27,7 @@ SET(_src
 
 SET(_inst
   mg_base.inst.in
+  mg_level_global_transfer.inst.in
   mg_tools.inst.in
   mg_transfer_block.inst.in
   mg_transfer_component.inst.in
diff --git a/source/multigrid/mg_level_global_transfer.cc b/source/multigrid/mg_level_global_transfer.cc
new file mode 100644 (file)
index 0000000..8e5b6b5
--- /dev/null
@@ -0,0 +1,539 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2003 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/function.h>
+
+#include <deal.II/lac/vector.h>
+#include <deal.II/lac/block_vector.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/trilinos_vector.h>
+#include <deal.II/lac/trilinos_block_vector.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/fe/fe.h>
+#include <deal.II/dofs/dof_accessor.h>
+#include <deal.II/multigrid/mg_tools.h>
+#include <deal.II/multigrid/mg_transfer.h>
+#include <deal.II/multigrid/mg_transfer.templates.h>
+
+#include <algorithm>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace
+{
+  /**
+   * Internal data structure that is used in the MPI communication in fill_and_communicate_copy_indices().
+   * It represents an entry in the copy_indices* map, that associates a level dof index with a global dof index.
+   */
+  struct DoFPair
+  {
+    unsigned int level;
+    types::global_dof_index global_dof_index;
+    types::global_dof_index level_dof_index;
+
+    DoFPair(const unsigned int level,
+            const types::global_dof_index global_dof_index,
+            const types::global_dof_index level_dof_index)
+      :
+      level(level), global_dof_index(global_dof_index), level_dof_index(level_dof_index)
+    {}
+
+    DoFPair()
+    {}
+  };
+
+
+
+  /**
+   * Internal function for filling the copy indices from global to level indices
+   */
+  template <int dim, int spacedim>
+  void fill_copy_indices(const DoFHandler<dim,spacedim> &mg_dof,
+                         const MGConstrainedDoFs        *mg_constrained_dofs,
+                         std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > &copy_indices,
+                         std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > &copy_indices_global_mine,
+                         std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > &copy_indices_level_mine)
+  {
+    // Now we are filling the variables copy_indices*, which are essentially
+    // maps from global to mgdof for each level stored as a std::vector of
+    // pairs. We need to split this map on each level depending on the ownership
+    // of the global and mgdof, so that we later not access non-local elements
+    // in copy_to/from_mg.
+    // We keep track in the bitfield dof_touched which global dof has
+    // been processed already (on the current level). This is the same as
+    // the multigrid running in serial.
+
+    // map cpu_index -> vector of data
+    // that will be copied into copy_indices_level_mine
+    std::vector<DoFPair> send_data_temp;
+
+    const unsigned int n_levels = mg_dof.get_triangulation().n_global_levels();
+    copy_indices.resize(n_levels);
+    copy_indices_global_mine.resize(n_levels);
+    copy_indices_level_mine.resize(n_levels);
+    IndexSet globally_relevant;
+    DoFTools::extract_locally_relevant_dofs(mg_dof, globally_relevant);
+
+    const unsigned int dofs_per_cell = mg_dof.get_fe().dofs_per_cell;
+    std::vector<types::global_dof_index> global_dof_indices (dofs_per_cell);
+    std::vector<types::global_dof_index> level_dof_indices  (dofs_per_cell);
+
+    for (unsigned int level=0; level<n_levels; ++level)
+      {
+        std::vector<bool> dof_touched(globally_relevant.n_elements(), false);
+        copy_indices[level].clear();
+        copy_indices_level_mine[level].clear();
+        copy_indices_global_mine[level].clear();
+
+        typename DoFHandler<dim,spacedim>::active_cell_iterator
+        level_cell = mg_dof.begin_active(level);
+        const typename DoFHandler<dim,spacedim>::active_cell_iterator
+        level_end  = mg_dof.end_active(level);
+
+        for (; level_cell!=level_end; ++level_cell)
+          {
+            if (mg_dof.get_triangulation().locally_owned_subdomain()!=numbers::invalid_subdomain_id
+                &&  (level_cell->level_subdomain_id()==numbers::artificial_subdomain_id
+                     ||  level_cell->subdomain_id()==numbers::artificial_subdomain_id)
+               )
+              continue;
+
+            // get the dof numbers of this cell for the global and the level-wise
+            // numbering
+            level_cell->get_dof_indices (global_dof_indices);
+            level_cell->get_mg_dof_indices (level_dof_indices);
+
+            for (unsigned int i=0; i<dofs_per_cell; ++i)
+              {
+                // we need to ignore if the DoF is on a refinement edge (hanging node)
+                if (mg_constrained_dofs != 0
+                    && mg_constrained_dofs->at_refinement_edge(level, level_dof_indices[i]))
+                  continue;
+                types::global_dof_index global_idx = globally_relevant.index_within_set(global_dof_indices[i]);
+                //skip if we did this global dof already (on this or a coarser level)
+                if (dof_touched[global_idx])
+                  continue;
+                bool global_mine = mg_dof.locally_owned_dofs().is_element(global_dof_indices[i]);
+                bool level_mine = mg_dof.locally_owned_mg_dofs(level).is_element(level_dof_indices[i]);
+
+
+                if (global_mine && level_mine)
+                  {
+                    copy_indices[level].push_back(
+                      std::make_pair (global_dof_indices[i], level_dof_indices[i]));
+                  }
+                else if (global_mine)
+                  {
+                    copy_indices_global_mine[level].push_back(
+                      std::make_pair (global_dof_indices[i], level_dof_indices[i]));
+
+                    //send this to the owner of the level_dof:
+                    send_data_temp.push_back(DoFPair(level, global_dof_indices[i], level_dof_indices[i]));
+                  }
+                else
+                  {
+                    // somebody will send those to me
+                  }
+
+                dof_touched[global_idx] = true;
+              }
+          }
+      }
+
+    const dealii::parallel::distributed::Triangulation<dim,spacedim> *tria =
+      (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
+       (&mg_dof.get_triangulation()));
+    AssertThrow(send_data_temp.size()==0 || tria!=NULL, ExcMessage("parallel Multigrid only works with a distributed Triangulation!"));
+
+#ifdef DEAL_II_WITH_MPI
+    if (tria)
+      {
+        // TODO: Searching the owner for every single DoF becomes quite
+        // inefficient. Please fix this, Timo.
+        std::set<unsigned int> neighbors = tria->level_ghost_owners();
+        std::map<int, std::vector<DoFPair> > send_data;
+
+        // * find owners of the level dofs and insert into send_data accordingly
+        for (typename std::vector<DoFPair>::iterator dofpair=send_data_temp.begin(); dofpair != send_data_temp.end(); ++dofpair)
+          {
+            for (std::set<unsigned int>::iterator it = neighbors.begin(); it != neighbors.end(); ++it)
+              {
+                if (mg_dof.locally_owned_mg_dofs_per_processor(dofpair->level)[*it].is_element(dofpair->level_dof_index))
+                  {
+                    send_data[*it].push_back(*dofpair);
+                    break;
+                  }
+              }
+          }
+
+        // * send
+        std::vector<MPI_Request> requests;
+        {
+          for (std::set<unsigned int>::iterator it = neighbors.begin(); it != neighbors.end(); ++it)
+            {
+              requests.push_back(MPI_Request());
+              unsigned int dest = *it;
+              std::vector<DoFPair> &data = send_data[dest];
+              if (data.size())
+                MPI_Isend(&data[0], data.size()*sizeof(data[0]), MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin());
+              else
+                MPI_Isend(NULL, 0, MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin());
+            }
+        }
+
+        // * receive
+        {
+          std::vector<DoFPair> receive_buffer;
+          for (unsigned int counter=0; counter<neighbors.size(); ++counter)
+            {
+              MPI_Status status;
+              int len;
+              MPI_Probe(MPI_ANY_SOURCE, 71, tria->get_communicator(), &status);
+              MPI_Get_count(&status, MPI_BYTE, &len);
+
+              if (len==0)
+                {
+                  int err = MPI_Recv(NULL, 0, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+                                     tria->get_communicator(), &status);
+                  AssertThrow(err==MPI_SUCCESS, ExcInternalError());
+                  continue;
+                }
+
+              int count = len / sizeof(DoFPair);
+              Assert(static_cast<int>(count * sizeof(DoFPair)) == len, ExcInternalError());
+              receive_buffer.resize(count);
+
+              void *ptr = &receive_buffer[0];
+              int err = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+                                 tria->get_communicator(), &status);
+              AssertThrow(err==MPI_SUCCESS, ExcInternalError());
+
+              for (unsigned int i=0; i<receive_buffer.size(); ++i)
+                {
+                  copy_indices_level_mine[receive_buffer[i].level].push_back(
+                    std::make_pair (receive_buffer[i].global_dof_index, receive_buffer[i].level_dof_index)
+                  );
+                }
+            }
+        }
+
+        // * wait for all MPI_Isend to complete
+        if (requests.size() > 0)
+          {
+            MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+            requests.clear();
+          }
+      }
+#endif
+
+    // Sort the indices. This will produce more reliable debug output for regression texts
+    // and likely won't hurt performance even in release mode.
+    std::less<std::pair<types::global_dof_index, types::global_dof_index> > compare;
+    for (unsigned int level=0; level<copy_indices.size(); ++level)
+      std::sort(copy_indices[level].begin(), copy_indices[level].end(), compare);
+    for (unsigned int level=0; level<copy_indices_level_mine.size(); ++level)
+      std::sort(copy_indices_level_mine[level].begin(), copy_indices_level_mine[level].end(), compare);
+    for (unsigned int level=0; level<copy_indices_global_mine.size(); ++level)
+      std::sort(copy_indices_global_mine[level].begin(), copy_indices_global_mine[level].end(), compare);
+  }
+}
+
+
+
+/* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
+
+
+template <typename VectorType>
+template <int dim, int spacedim>
+void
+MGLevelGlobalTransfer<VectorType>::fill_and_communicate_copy_indices
+(const DoFHandler<dim,spacedim> &mg_dof)
+{
+  fill_copy_indices(mg_dof, mg_constrained_dofs, copy_indices,
+                    copy_indices_global_mine, copy_indices_level_mine);
+}
+
+
+
+template <typename VectorType>
+void
+MGLevelGlobalTransfer<VectorType>::clear()
+{
+  sizes.resize(0);
+  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
+  empty0, empty1, empty2;
+  copy_indices.swap(empty0);
+  copy_indices_global_mine.swap(empty1);
+  copy_indices_level_mine.swap(empty2);
+  component_to_block_map.resize(0);
+  mg_constrained_dofs = 0;
+}
+
+
+
+template <typename VectorType>
+void
+MGLevelGlobalTransfer<VectorType>::print_indices (std::ostream &os) const
+{
+  for (unsigned int level = 0; level<copy_indices.size(); ++level)
+    {
+      for (unsigned int i=0; i<copy_indices[level].size(); ++i)
+        os << "copy_indices[" << level
+           << "]\t" << copy_indices[level][i].first << '\t' << copy_indices[level][i].second << std::endl;
+    }
+
+  for (unsigned int level = 0; level<copy_indices_level_mine.size(); ++level)
+    {
+      for (unsigned int i=0; i<copy_indices_level_mine[level].size(); ++i)
+        os << "copy_ifrom  [" << level
+           << "]\t" << copy_indices_level_mine[level][i].first << '\t' << copy_indices_level_mine[level][i].second << std::endl;
+    }
+  for (unsigned int level = 0; level<copy_indices_global_mine.size(); ++level)
+    {
+      for (unsigned int i=0; i<copy_indices_global_mine[level].size(); ++i)
+        os << "copy_ito    [" << level
+           << "]\t" << copy_indices_global_mine[level][i].first << '\t' << copy_indices_global_mine[level][i].second << std::endl;
+    }
+}
+
+
+
+template <typename VectorType>
+std::size_t
+MGLevelGlobalTransfer<VectorType>::memory_consumption () const
+{
+  std::size_t result = sizeof(*this);
+  result += MemoryConsumption::memory_consumption(sizes);
+  result += MemoryConsumption::memory_consumption(copy_indices);
+  result += MemoryConsumption::memory_consumption(copy_indices_global_mine);
+  result += MemoryConsumption::memory_consumption(copy_indices_level_mine);
+
+  return result;
+}
+
+
+
+/* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
+
+
+template <typename Number>
+template <int dim, int spacedim>
+void
+MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::fill_and_communicate_copy_indices
+(const DoFHandler<dim,spacedim> &mg_dof)
+{
+  // first go to the usual routine...
+  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
+  copy_indices;
+  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
+  copy_indices_global_mine;
+  std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
+  copy_indices_level_mine;
+
+  fill_copy_indices(mg_dof, mg_constrained_dofs, copy_indices,
+                    copy_indices_global_mine, copy_indices_level_mine);
+
+  // get all degrees of freedom that we need read access to in copy_to_mg
+  // and copy_from_mg, respectively. We fill an IndexSet once on each level
+  // (for the global_mine indices accessing remote level indices) and once
+  // globally (for the level_mine indices accessing remote global indices).
+
+  // the variables index_set and level_index_set are going to define the
+  // ghost indices of the respective vectors (due to construction, these are
+  // precisely the indices that we need)
+  const parallel::Triangulation<dim, spacedim> *ptria =
+    dynamic_cast<const parallel::Triangulation<dim, spacedim> *>
+    (&mg_dof.get_tria());
+  const MPI_Comm mpi_communicator = ptria != 0 ? ptria->get_communicator() :
+                                    MPI_COMM_SELF;
+
+  IndexSet index_set(mg_dof.locally_owned_dofs().size());
+  std::vector<types::global_dof_index> accessed_indices;
+  ghosted_level_vector.resize(0, mg_dof.get_triangulation().n_global_levels()-1);
+  std::vector<IndexSet> level_index_set(mg_dof.get_triangulation().n_global_levels());
+  for (unsigned int l=0; l<mg_dof.get_triangulation().n_global_levels(); ++l)
+    {
+      for (unsigned int i=0; i<copy_indices_level_mine[l].size(); ++i)
+        accessed_indices.push_back(copy_indices_level_mine[l][i].first);
+      std::vector<types::global_dof_index> accessed_level_indices;
+      for (unsigned int i=0; i<copy_indices_global_mine[l].size(); ++i)
+        accessed_level_indices.push_back(copy_indices_global_mine[l][i].second);
+      std::sort(accessed_level_indices.begin(), accessed_level_indices.end());
+      level_index_set[l].set_size(mg_dof.locally_owned_mg_dofs(l).size());
+      level_index_set[l].add_indices(accessed_level_indices.begin(),
+                                     accessed_level_indices.end());
+      level_index_set[l].compress();
+      ghosted_level_vector[l].reinit(mg_dof.locally_owned_mg_dofs(l),
+                                     level_index_set[l],
+                                     mpi_communicator);
+    }
+  std::sort(accessed_indices.begin(), accessed_indices.end());
+  index_set.add_indices(accessed_indices.begin(), accessed_indices.end());
+  index_set.compress();
+  ghosted_global_vector.reinit(mg_dof.locally_owned_dofs(),
+                               index_set,
+                               mpi_communicator);
+
+  // localize the copy indices for faster access. Since all access will be
+  // through the ghosted vector in 'data', we can use this (much faster)
+  // option
+  this->copy_indices.resize(mg_dof.get_triangulation().n_global_levels());
+  this->copy_indices_level_mine.resize(mg_dof.get_triangulation().n_global_levels());
+  this->copy_indices_global_mine.resize(mg_dof.get_triangulation().n_global_levels());
+  for (unsigned int level=0; level<mg_dof.get_triangulation().n_global_levels(); ++level)
+    {
+      const Utilities::MPI::Partitioner &global_partitioner =
+        *ghosted_global_vector.get_partitioner();
+      const Utilities::MPI::Partitioner &level_partitioner =
+        *ghosted_level_vector[level].get_partitioner();
+      // owned-owned case: the locally owned indices are going to control
+      // the local index
+      this->copy_indices[level].resize(copy_indices[level].size());
+      for (unsigned int i=0; i<copy_indices[level].size(); ++i)
+        this->copy_indices[level][i] =
+          std::pair<unsigned int,unsigned int>
+          (global_partitioner.global_to_local(copy_indices[level][i].first),
+           level_partitioner.global_to_local(copy_indices[level][i].second));
+
+      // remote-owned case: the locally owned indices for the level and the
+      // ghost dofs for the global indices set the local index
+      this->copy_indices_level_mine[level].
+      resize(copy_indices_level_mine[level].size());
+      for (unsigned int i=0; i<copy_indices_level_mine[level].size(); ++i)
+        this->copy_indices_level_mine[level][i] =
+          std::pair<unsigned int,unsigned int>
+          (global_partitioner.global_to_local(copy_indices_level_mine[level][i].first),
+           level_partitioner.global_to_local(copy_indices_level_mine[level][i].second));
+
+      // owned-remote case: the locally owned indices for the global dofs
+      // and the ghost dofs for the level indices set the local index
+      this->copy_indices_global_mine[level].
+      resize(copy_indices_global_mine[level].size());
+      for (unsigned int i=0; i<copy_indices_global_mine[level].size(); ++i)
+        this->copy_indices_global_mine[level][i] =
+          std::pair<unsigned int,unsigned int>
+          (global_partitioner.global_to_local(copy_indices_global_mine[level][i].first),
+           level_partitioner.global_to_local(copy_indices_global_mine[level][i].second));
+    }
+
+  perform_plain_copy = this->copy_indices.back().size()
+                       == mg_dof.locally_owned_dofs().n_elements();
+  if (perform_plain_copy)
+    {
+      AssertDimension(this->copy_indices_global_mine.back().size(), 0);
+      AssertDimension(this->copy_indices_level_mine.back().size(), 0);
+
+      // check whether there is a renumbering of degrees of freedom on
+      // either the finest level or the global dofs, which means that we
+      // cannot apply a plain copy
+      for (unsigned int i=0; i<this->copy_indices.back().size(); ++i)
+        if (this->copy_indices.back()[i].first !=
+            this->copy_indices.back()[i].second)
+          {
+            perform_plain_copy = false;
+            break;
+          }
+    }
+  perform_plain_copy =
+    Utilities::MPI::min(static_cast<int>(perform_plain_copy),
+                        mpi_communicator);
+
+  // if we do a plain copy, no need to hold additional ghosted vectors
+  if (perform_plain_copy)
+    {
+      ghosted_global_vector.reinit(0);
+      ghosted_level_vector.resize(0, 0);
+    }
+}
+
+
+
+template <typename Number>
+void
+MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::clear()
+{
+  sizes.resize(0);
+  std::vector<std::vector<std::pair<unsigned int, unsigned int> > >
+  empty0, empty1, empty2;
+  copy_indices.swap(empty0);
+  copy_indices_global_mine.swap(empty1);
+  copy_indices_level_mine.swap(empty2);
+  component_to_block_map.resize(0);
+  mg_constrained_dofs = 0;
+  ghosted_global_vector.reinit(0);
+  ghosted_level_vector.resize(0, 0);
+}
+
+
+
+template <typename Number>
+void
+MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::print_indices (std::ostream &os) const
+{
+  for (unsigned int level = 0; level<copy_indices.size(); ++level)
+    {
+      for (unsigned int i=0; i<copy_indices[level].size(); ++i)
+        os << "copy_indices[" << level
+           << "]\t" << copy_indices[level][i].first << '\t' << copy_indices[level][i].second << std::endl;
+    }
+
+  for (unsigned int level = 0; level<copy_indices_level_mine.size(); ++level)
+    {
+      for (unsigned int i=0; i<copy_indices_level_mine[level].size(); ++i)
+        os << "copy_ifrom  [" << level
+           << "]\t" << copy_indices_level_mine[level][i].first << '\t' << copy_indices_level_mine[level][i].second << std::endl;
+    }
+  for (unsigned int level = 0; level<copy_indices_global_mine.size(); ++level)
+    {
+      for (unsigned int i=0; i<copy_indices_global_mine[level].size(); ++i)
+        os << "copy_ito    [" << level
+           << "]\t" << copy_indices_global_mine[level][i].first << '\t' << copy_indices_global_mine[level][i].second << std::endl;
+    }
+}
+
+
+
+template <typename Number>
+std::size_t
+MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::memory_consumption () const
+{
+  std::size_t result = sizeof(*this);
+  result += MemoryConsumption::memory_consumption(sizes);
+  result += MemoryConsumption::memory_consumption(copy_indices);
+  result += MemoryConsumption::memory_consumption(copy_indices_global_mine);
+  result += MemoryConsumption::memory_consumption(copy_indices_level_mine);
+  result += ghosted_global_vector.memory_consumption();
+  for (unsigned int i=ghosted_level_vector.min_level();
+       i<=ghosted_level_vector.max_level(); ++i)
+    result += ghosted_level_vector[i].memory_consumption();
+
+  return result;
+}
+
+
+
+// explicit instantiation
+#include "mg_level_global_transfer.inst"
+
+
+DEAL_II_NAMESPACE_CLOSE
diff --git a/source/multigrid/mg_level_global_transfer.inst.in b/source/multigrid/mg_level_global_transfer.inst.in
new file mode 100644 (file)
index 0000000..f2f57a2
--- /dev/null
@@ -0,0 +1,70 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 1998 - 2014 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+for (V1 : VECTORS_WITH_MATRIX)
+  {
+    template class MGLevelGlobalTransfer< V1 >;
+  }
+
+for (deal_II_dimension : DIMENSIONS; V1 : VECTORS_WITH_MATRIX)
+  {
+    template
+      void MGTransferPrebuilt< V1 >::fill_and_communicate_copy_indices<deal_II_dimension,deal_II_dimension>(
+        const DoFHandler<deal_II_dimension,deal_II_dimension> &mg_dof);
+  }
+
+for (deal_II_dimension : DIMENSIONS; V1,V2 : DEAL_II_VEC_TEMPLATES; S1, S2 : REAL_SCALARS)
+  {
+    template void
+      MGLevelGlobalTransfer<V1<S1> >::copy_to_mg (
+        const DoFHandler<deal_II_dimension>&, MGLevelObject<V1<S1> >&, const V2<S2>&) const;
+    template void
+      MGLevelGlobalTransfer<V1<S1> >::copy_from_mg (const DoFHandler<deal_II_dimension>&, V2<S2>&,
+                                                    const MGLevelObject<V1<S1> >&) const;
+    template void
+      MGLevelGlobalTransfer<V1<S1> >::copy_from_mg_add (const DoFHandler<deal_II_dimension>&, V2<S2>&,
+                                                        const MGLevelObject<V1<S1> >&) const;
+  }
+
+for (deal_II_dimension : DIMENSIONS; S2 : REAL_SCALARS)
+  {
+    template void
+      MGLevelGlobalTransfer<parallel::distributed::Vector<double> >::copy_to_mg (
+        const DoFHandler<deal_II_dimension>&, MGLevelObject<parallel::distributed::Vector<double> >&, const parallel::distributed::Vector<S2>&) const;
+    template void
+      MGLevelGlobalTransfer<parallel::distributed::Vector<double> >::copy_from_mg (const DoFHandler<deal_II_dimension>&, parallel::distributed::Vector<S2>&,
+        const MGLevelObject<parallel::distributed::Vector<double> >&) const;
+    template void
+      MGLevelGlobalTransfer<parallel::distributed::Vector<double> >::copy_from_mg_add (const DoFHandler<deal_II_dimension>&, parallel::distributed::Vector<S2>&,
+        const MGLevelObject<parallel::distributed::Vector<double> >&) const;
+  }
+
+for(deal_II_dimension : DIMENSIONS)
+  {
+#ifdef DEAL_II_WITH_TRILINOS
+
+    template void
+      MGLevelGlobalTransfer<TrilinosWrappers::MPI::Vector>::copy_to_mg (
+        const DoFHandler<deal_II_dimension>&, MGLevelObject<TrilinosWrappers::MPI::Vector>&, const TrilinosWrappers::MPI::Vector&) const;
+    template void
+      MGLevelGlobalTransfer<TrilinosWrappers::MPI::Vector>::copy_from_mg (const DoFHandler<deal_II_dimension>&, TrilinosWrappers::MPI::Vector&,
+        const MGLevelObject<TrilinosWrappers::MPI::Vector>&) const;
+    template void
+      MGLevelGlobalTransfer<TrilinosWrappers::MPI::Vector>::copy_from_mg_add (const DoFHandler<deal_II_dimension>&, TrilinosWrappers::MPI::Vector&,
+        const MGLevelObject<TrilinosWrappers::MPI::Vector>&) const;
+#endif
+  }
index f7f7c08d008a5939bae06627e29fc87ec88dede0..0d2a5aa1dbc62b885612971ca7854d1325a54a95 100644 (file)
@@ -35,7 +35,6 @@
 #include <deal.II/dofs/dof_accessor.h>
 #include <deal.II/multigrid/mg_tools.h>
 #include <deal.II/multigrid/mg_transfer.h>
-#include <deal.II/multigrid/mg_transfer.templates.h>
 
 #include <algorithm>
 
@@ -47,12 +46,15 @@ MGTransferPrebuilt<VectorType>::MGTransferPrebuilt ()
 {}
 
 
+
 template<typename VectorType>
 MGTransferPrebuilt<VectorType>::MGTransferPrebuilt (const ConstraintMatrix &c, const MGConstrainedDoFs &mg_c)
   :
-  constraints(&c),
-  mg_constrained_dofs(&mg_c)
-{}
+  constraints(&c)
+{
+  this->mg_constrained_dofs = &mg_c;
+}
+
 
 
 template <typename VectorType>
@@ -60,31 +62,29 @@ MGTransferPrebuilt<VectorType>::~MGTransferPrebuilt ()
 {}
 
 
+
 template <typename VectorType>
 void MGTransferPrebuilt<VectorType>::initialize_constraints
 (const ConstraintMatrix &c, const MGConstrainedDoFs &mg_c)
 {
   constraints = &c;
-  mg_constrained_dofs = &mg_c;
+  this->mg_constrained_dofs = &mg_c;
 }
 
 
+
 template <typename VectorType>
 void MGTransferPrebuilt<VectorType>::clear ()
 {
-  sizes.resize(0);
+  MGLevelGlobalTransfer<VectorType>::clear();
   prolongation_matrices.resize(0);
   prolongation_sparsities.resize(0);
-  copy_indices.resize(0);
-  copy_indices_global_mine.resize(0);
-  copy_indices_level_mine.resize(0);
-  component_to_block_map.resize(0);
   interface_dofs.resize(0);
   constraints = 0;
-  mg_constrained_dofs = 0;
 }
 
 
+
 template <typename VectorType>
 void MGTransferPrebuilt<VectorType>::prolongate (const unsigned int to_level,
                                                  VectorType        &dst,
@@ -97,6 +97,7 @@ void MGTransferPrebuilt<VectorType>::prolongate (const unsigned int to_level,
 }
 
 
+
 template <typename VectorType>
 void MGTransferPrebuilt<VectorType>::restrict_and_add (const unsigned int from_level,
                                                        VectorType        &dst,
@@ -110,6 +111,7 @@ void MGTransferPrebuilt<VectorType>::restrict_and_add (const unsigned int from_l
 }
 
 
+
 template <typename VectorType>
 template <int dim, int spacedim>
 void MGTransferPrebuilt<VectorType>::build_matrices
@@ -118,9 +120,9 @@ void MGTransferPrebuilt<VectorType>::build_matrices
   const unsigned int n_levels      = mg_dof.get_triangulation().n_global_levels();
   const unsigned int dofs_per_cell = mg_dof.get_fe().dofs_per_cell;
 
-  sizes.resize(n_levels);
+  this->sizes.resize(n_levels);
   for (unsigned int l=0; l<n_levels; ++l)
-    sizes[l] = mg_dof.n_dofs(l);
+    this->sizes[l] = mg_dof.n_dofs(l);
 
   // reset the size of the array of
   // matrices. call resize(0) first,
@@ -167,8 +169,8 @@ void MGTransferPrebuilt<VectorType>::build_matrices
       IndexSet level_p1_relevant_dofs;
       DoFTools::extract_locally_relevant_level_dofs(mg_dof, level+1,
                                                     level_p1_relevant_dofs);
-      DynamicSparsityPattern dsp (sizes[level+1],
-                                  sizes[level],
+      DynamicSparsityPattern dsp (this->sizes[level+1],
+                                  this->sizes[level],
                                   level_p1_relevant_dofs);
       for (typename DoFHandler<dim,spacedim>::cell_iterator cell=mg_dof.begin(level);
            cell != mg_dof.end(level); ++cell)
@@ -235,9 +237,10 @@ void MGTransferPrebuilt<VectorType>::build_matrices
                   = mg_dof.get_fe().get_prolongation_matrix (child,
                                                              cell->refinement_case());
 
-                if (mg_constrained_dofs != 0 && mg_constrained_dofs->have_boundary_indices())
+                if (this->mg_constrained_dofs != 0 &&
+                    this->mg_constrained_dofs->have_boundary_indices())
                   for (unsigned int j=0; j<dofs_per_cell; ++j)
-                    if (mg_constrained_dofs->is_boundary_index(level, dof_indices_parent[j]))
+                    if (this->mg_constrained_dofs->is_boundary_index(level, dof_indices_parent[j]))
                       for (unsigned int i=0; i<dofs_per_cell; ++i)
                         prolongation(i,j) = 0.;
 
@@ -255,221 +258,10 @@ void MGTransferPrebuilt<VectorType>::build_matrices
       prolongation_matrices[level]->compress(VectorOperation::insert);
     }
 
-  fill_and_communicate_copy_indices(mg_dof);
-}
-
-namespace
-{
-  /**
-   * Internal data structure that is used in the MPI communication in fill_and_communicate_copy_indices().
-   * It represents an entry in the copy_indices* map, that associates a level dof index with a global dof index.
-   */
-  struct DoFPair
-  {
-    unsigned int level;
-    types::global_dof_index global_dof_index;
-    types::global_dof_index level_dof_index;
-
-    DoFPair(const unsigned int level,
-            const types::global_dof_index global_dof_index,
-            const types::global_dof_index level_dof_index)
-      :
-      level(level), global_dof_index(global_dof_index), level_dof_index(level_dof_index)
-    {}
-
-    DoFPair()
-    {}
-  };
+  this->fill_and_communicate_copy_indices(mg_dof);
 }
 
-template <typename VectorType>
-template <int dim, int spacedim>
-void
-MGTransferPrebuilt<VectorType>::fill_and_communicate_copy_indices
-(const DoFHandler<dim,spacedim> &mg_dof)
-{
-  // Now we are filling the variables copy_indices*, which are essentially
-  // maps from global to mgdof for each level stored as a std::vector of
-  // pairs. We need to split this map on each level depending on the ownership
-  // of the global and mgdof, so that we later not access non-local elements
-  // in copy_to/from_mg.
-  // We keep track in the bitfield dof_touched which global dof has
-  // been processed already (on the current level). This is the same as
-  // the multigrid running in serial.
-
-  // map cpu_index -> vector of data
-  // that will be copied into copy_indices_level_mine
-  std::vector<DoFPair> send_data_temp;
-
-  const unsigned int n_levels = mg_dof.get_triangulation().n_global_levels();
-  copy_indices.resize(n_levels);
-  copy_indices_global_mine.resize(n_levels);
-  copy_indices_level_mine.resize(n_levels);
-  IndexSet globally_relevant;
-  DoFTools::extract_locally_relevant_dofs(mg_dof, globally_relevant);
-
-  const unsigned int dofs_per_cell = mg_dof.get_fe().dofs_per_cell;
-  std::vector<types::global_dof_index> global_dof_indices (dofs_per_cell);
-  std::vector<types::global_dof_index> level_dof_indices  (dofs_per_cell);
-
-  for (unsigned int level=0; level<n_levels; ++level)
-    {
-      std::vector<bool> dof_touched(globally_relevant.n_elements(), false);
-      copy_indices[level].clear();
-      copy_indices_level_mine[level].clear();
-      copy_indices_global_mine[level].clear();
-
-      typename DoFHandler<dim,spacedim>::active_cell_iterator
-      level_cell = mg_dof.begin_active(level);
-      const typename DoFHandler<dim,spacedim>::active_cell_iterator
-      level_end  = mg_dof.end_active(level);
-
-      for (; level_cell!=level_end; ++level_cell)
-        {
-          if (mg_dof.get_triangulation().locally_owned_subdomain()!=numbers::invalid_subdomain_id
-              &&  (level_cell->level_subdomain_id()==numbers::artificial_subdomain_id
-                   ||  level_cell->subdomain_id()==numbers::artificial_subdomain_id)
-             )
-            continue;
-
-          // get the dof numbers of this cell for the global and the level-wise
-          // numbering
-          level_cell->get_dof_indices (global_dof_indices);
-          level_cell->get_mg_dof_indices (level_dof_indices);
-
-          for (unsigned int i=0; i<dofs_per_cell; ++i)
-            {
-              // we need to ignore if the DoF is on a refinement edge (hanging node)
-              if (mg_constrained_dofs != 0
-                  && mg_constrained_dofs->at_refinement_edge(level, level_dof_indices[i]))
-                continue;
-              types::global_dof_index global_idx = globally_relevant.index_within_set(global_dof_indices[i]);
-              //skip if we did this global dof already (on this or a coarser level)
-              if (dof_touched[global_idx])
-                continue;
-              bool global_mine = mg_dof.locally_owned_dofs().is_element(global_dof_indices[i]);
-              bool level_mine = mg_dof.locally_owned_mg_dofs(level).is_element(level_dof_indices[i]);
-
-
-              if (global_mine && level_mine)
-                {
-                  copy_indices[level].push_back(
-                    std::make_pair (global_dof_indices[i], level_dof_indices[i]));
-                }
-              else if (global_mine)
-                {
-                  copy_indices_global_mine[level].push_back(
-                    std::make_pair (global_dof_indices[i], level_dof_indices[i]));
-
-                  //send this to the owner of the level_dof:
-                  send_data_temp.push_back(DoFPair(level, global_dof_indices[i], level_dof_indices[i]));
-                }
-              else
-                {
-                  // somebody will send those to me
-                }
-
-              dof_touched[global_idx] = true;
-            }
-        }
-    }
-
-  const dealii::parallel::distributed::Triangulation<dim,spacedim> *tria =
-    (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
-     (&mg_dof.get_triangulation()));
-  AssertThrow(send_data_temp.size()==0 || tria!=NULL, ExcMessage("parallel Multigrid only works with a distributed Triangulation!"));
-
-#ifdef DEAL_II_WITH_MPI
-  if (tria)
-    {
-      // TODO: Searching the owner for every single DoF becomes quite
-      // inefficient. Please fix this, Timo.
-      std::set<unsigned int> neighbors = tria->level_ghost_owners();
-      std::map<int, std::vector<DoFPair> > send_data;
-
-      // * find owners of the level dofs and insert into send_data accordingly
-      for (typename std::vector<DoFPair>::iterator dofpair=send_data_temp.begin(); dofpair != send_data_temp.end(); ++dofpair)
-        {
-          for (std::set<unsigned int>::iterator it = neighbors.begin(); it != neighbors.end(); ++it)
-            {
-              if (mg_dof.locally_owned_mg_dofs_per_processor(dofpair->level)[*it].is_element(dofpair->level_dof_index))
-                {
-                  send_data[*it].push_back(*dofpair);
-                  break;
-                }
-            }
-        }
-
-      // * send
-      std::vector<MPI_Request> requests;
-      {
-        for (std::set<unsigned int>::iterator it = neighbors.begin(); it != neighbors.end(); ++it)
-          {
-            requests.push_back(MPI_Request());
-            unsigned int dest = *it;
-            std::vector<DoFPair> &data = send_data[dest];
-            if (data.size())
-              MPI_Isend(&data[0], data.size()*sizeof(data[0]), MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin());
-            else
-              MPI_Isend(NULL, 0, MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin());
-          }
-      }
-
-      // * receive
-      {
-        std::vector<DoFPair> receive_buffer;
-        for (unsigned int counter=0; counter<neighbors.size(); ++counter)
-          {
-            MPI_Status status;
-            int len;
-            MPI_Probe(MPI_ANY_SOURCE, 71, tria->get_communicator(), &status);
-            MPI_Get_count(&status, MPI_BYTE, &len);
-
-            if (len==0)
-              {
-                int err = MPI_Recv(NULL, 0, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
-                                   tria->get_communicator(), &status);
-                AssertThrow(err==MPI_SUCCESS, ExcInternalError());
-                continue;
-              }
-
-            int count = len / sizeof(DoFPair);
-            Assert(static_cast<int>(count * sizeof(DoFPair)) == len, ExcInternalError());
-            receive_buffer.resize(count);
 
-            void *ptr = &receive_buffer[0];
-            int err = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
-                               tria->get_communicator(), &status);
-            AssertThrow(err==MPI_SUCCESS, ExcInternalError());
-
-            for (unsigned int i=0; i<receive_buffer.size(); ++i)
-              {
-                copy_indices_level_mine[receive_buffer[i].level].push_back(
-                  std::make_pair (receive_buffer[i].global_dof_index, receive_buffer[i].level_dof_index)
-                );
-              }
-          }
-      }
-
-      // * wait for all MPI_Isend to complete
-      if (requests.size() > 0)
-        {
-          MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
-          requests.clear();
-        }
-    }
-#endif
-
-  // Sort the indices. This will produce more reliable debug output for regression texts
-  // and likely won't hurt performance even in release mode.
-  std::less<std::pair<types::global_dof_index, types::global_dof_index> > compare;
-  for (unsigned int level=0; level<copy_indices.size(); ++level)
-    std::sort(copy_indices[level].begin(), copy_indices[level].end(), compare);
-  for (unsigned int level=0; level<copy_indices_level_mine.size(); ++level)
-    std::sort(copy_indices_level_mine[level].begin(), copy_indices_level_mine[level].end(), compare);
-  for (unsigned int level=0; level<copy_indices_global_mine.size(); ++level)
-    std::sort(copy_indices_global_mine[level].begin(), copy_indices_global_mine[level].end(), compare);
-}
 
 template <typename VectorType>
 void
@@ -483,29 +275,18 @@ MGTransferPrebuilt<VectorType>::print_matrices (std::ostream &os) const
     }
 }
 
+
+
 template <typename VectorType>
-void
-MGTransferPrebuilt<VectorType>::print_indices (std::ostream &os) const
+std::size_t
+MGTransferPrebuilt<VectorType>::memory_consumption () const
 {
-  for (unsigned int level = 0; level<copy_indices.size(); ++level)
-    {
-      for (unsigned int i=0; i<copy_indices[level].size(); ++i)
-        os << "copy_indices[" << level
-           << "]\t" << copy_indices[level][i].first << '\t' << copy_indices[level][i].second << std::endl;
-    }
+  std::size_t result = MGLevelGlobalTransfer<VectorType>::memory_consumption();
+  for (unsigned int i=0; i<prolongation_matrices.size(); ++i)
+    result += prolongation_matrices[i]->memory_consumption()
+              + prolongation_sparsities[i]->memory_consumption();
 
-  for (unsigned int level = 0; level<copy_indices_level_mine.size(); ++level)
-    {
-      for (unsigned int i=0; i<copy_indices_level_mine[level].size(); ++i)
-        os << "copy_ifrom  [" << level
-           << "]\t" << copy_indices_level_mine[level][i].first << '\t' << copy_indices_level_mine[level][i].second << std::endl;
-    }
-  for (unsigned int level = 0; level<copy_indices_global_mine.size(); ++level)
-    {
-      for (unsigned int i=0; i<copy_indices_global_mine[level].size(); ++i)
-        os << "copy_ito    [" << level
-           << "]\t" << copy_indices_global_mine[level][i].first << '\t' << copy_indices_global_mine[level][i].second << std::endl;
-    }
+  return result;
 }
 
 
index 555809d41068c6bbd224974af19c718eb6362482..ad59adc12e429a1d78c709f18547422c4213c64e 100644 (file)
@@ -24,47 +24,5 @@ for (deal_II_dimension : DIMENSIONS; V1 : VECTORS_WITH_MATRIX)
   {
     template
       void MGTransferPrebuilt< V1 >::build_matrices<deal_II_dimension>(
-       const DoFHandler<deal_II_dimension> &mg_dof);
-  }
-
-for (deal_II_dimension : DIMENSIONS; V1,V2 : DEAL_II_VEC_TEMPLATES; S1, S2 : REAL_SCALARS)
-  {
-    template void
-      MGTransferPrebuilt<V1<S1> >::copy_to_mg (
-       const DoFHandler<deal_II_dimension>&, MGLevelObject<V1<S1> >&, const V2<S2>&) const;
-    template void
-      MGTransferPrebuilt<V1<S1> >::copy_from_mg (const DoFHandler<deal_II_dimension>&, V2<S2>&,
-                                            const MGLevelObject<V1<S1> >&) const;
-    template void
-      MGTransferPrebuilt<V1<S1> >::copy_from_mg_add (const DoFHandler<deal_II_dimension>&, V2<S2>&,
-                                                const MGLevelObject<V1<S1> >&) const;
-  }
-
-for (deal_II_dimension : DIMENSIONS; S2 : REAL_SCALARS)
-  {
-    template void
-      MGTransferPrebuilt<parallel::distributed::Vector<double> >::copy_to_mg (
-       const DoFHandler<deal_II_dimension>&, MGLevelObject<parallel::distributed::Vector<double> >&, const parallel::distributed::Vector<S2>&) const;
-    template void
-      MGTransferPrebuilt<parallel::distributed::Vector<double> >::copy_from_mg (const DoFHandler<deal_II_dimension>&, parallel::distributed::Vector<S2>&,
-                                            const MGLevelObject<parallel::distributed::Vector<double> >&) const;
-    template void
-      MGTransferPrebuilt<parallel::distributed::Vector<double> >::copy_from_mg_add (const DoFHandler<deal_II_dimension>&, parallel::distributed::Vector<S2>&,
-                                                const MGLevelObject<parallel::distributed::Vector<double> >&) const;
-  }
-
-for(deal_II_dimension : DIMENSIONS)
-  {
-#ifdef DEAL_II_WITH_TRILINOS
-
-    template void
-      MGTransferPrebuilt<TrilinosWrappers::MPI::Vector>::copy_to_mg (
-       const DoFHandler<deal_II_dimension>&, MGLevelObject<TrilinosWrappers::MPI::Vector>&, const TrilinosWrappers::MPI::Vector&) const;
-    template void
-      MGTransferPrebuilt<TrilinosWrappers::MPI::Vector>::copy_from_mg (const DoFHandler<deal_II_dimension>&, TrilinosWrappers::MPI::Vector&,
-                                            const MGLevelObject<TrilinosWrappers::MPI::Vector>&) const;
-    template void
-      MGTransferPrebuilt<TrilinosWrappers::MPI::Vector>::copy_from_mg_add (const DoFHandler<deal_II_dimension>&, TrilinosWrappers::MPI::Vector&,
-                                                const MGLevelObject<TrilinosWrappers::MPI::Vector>&) const;
-#endif
+        const DoFHandler<deal_II_dimension> &mg_dof);
   }

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.