From fa878b2a671b47753c97e0ad3ca50d18d076b80f Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Wed, 23 Dec 2015 23:08:59 +0100 Subject: [PATCH] Implement parallel MGTransferPrebuilt for parallel::distributed::Vector This commit includes a refactoring of MGTransferPrebuilt into a base class MGLevelGlobalTransfer that contains the copy operation between the multigrid hierarchy and the global degrees of freedom, including a specialization for parallel::distributed::Vector that takes care of the particular behavior in this vector type. It also allows us to use optimized access in terms of local MPI indices. --- include/deal.II/multigrid/mg_transfer.h | 348 ++++++++--- .../deal.II/multigrid/mg_transfer.templates.h | 269 +++++++-- source/multigrid/CMakeLists.txt | 2 + source/multigrid/mg_level_global_transfer.cc | 539 ++++++++++++++++++ .../mg_level_global_transfer.inst.in | 70 +++ source/multigrid/mg_transfer_prebuilt.cc | 279 +-------- source/multigrid/mg_transfer_prebuilt.inst.in | 44 +- 7 files changed, 1124 insertions(+), 427 deletions(-) create mode 100644 source/multigrid/mg_level_global_transfer.cc create mode 100644 source/multigrid/mg_level_global_transfer.inst.in diff --git a/include/deal.II/multigrid/mg_transfer.h b/include/deal.II/multigrid/mg_transfer.h index aef8748d55..89e3f57360 100644 --- a/include/deal.II/multigrid/mg_transfer.h +++ b/include/deal.II/multigrid/mg_transfer.h @@ -39,8 +39,6 @@ DEAL_II_NAMESPACE_OPEN -template class DoFHandler; - namespace internal { template @@ -111,64 +109,25 @@ namespace internal /*!@addtogroup mg */ /*@{*/ + + /** - * Implementation of the MGTransferBase interface for which the transfer - * operations are prebuilt upon construction of the object of this class as - * matrices. This is the fast way, since it only needs to build the operation - * once by looping over all cells and storing the result in a matrix for each - * level, but requires additional memory. + * Implementation of transfer between the global vectors and the multigrid + * levels for use in the derived class MGTransferPrebuilt and other classes. * - * See MGTransferBase to find out which of the transfer classes is best for - * your needs. - * - * @author Wolfgang Bangerth, Guido Kanschat - * @date 1999, 2000, 2001, 2002, 2003, 2004, 2012 + * @author Wolfgang Bangerth, Guido Kanschat, Timo Heister, Martin Kronbichler + * @date 1999, 2000, 2001, 2002, 2003, 2004, 2012, 2015 */ template -class MGTransferPrebuilt : public MGTransferBase +class MGLevelGlobalTransfer : public MGTransferBase { public: - /** - * Constructor without constraint matrices. Use this constructor only with - * discontinuous finite elements or with no local refinement. - */ - MGTransferPrebuilt (); - /** - * Constructor with constraints. Equivalent to the default constructor - * followed by initialize_constraints(). - */ - MGTransferPrebuilt (const ConstraintMatrix &constraints, - const MGConstrainedDoFs &mg_constrained_dofs); - /** - * Destructor. - */ - virtual ~MGTransferPrebuilt (); - - /** - * Initialize the constraints to be used in build_matrices(). - */ - void initialize_constraints (const ConstraintMatrix &constraints, - const MGConstrainedDoFs &mg_constrained_dofs); /** * Reset the object to the state it had right after the default constructor. */ void clear (); - /** - * Actually build the prolongation matrices for each level. - */ - template - void build_matrices (const DoFHandler &mg_dof); - - virtual void prolongate (const unsigned int to_level, - VectorType &dst, - const VectorType &src) const; - - virtual void restrict_and_add (const unsigned int from_level, - VectorType &dst, - const VectorType &src) const; - /** * Transfer from a vector on the global grid to vectors defined on each of * the levels separately, i.a. an @p MGVector. @@ -222,63 +181,176 @@ public: set_component_to_block_map (const std::vector &map); /** - * Finite element does not provide prolongation matrices. + * Memory used by this object. */ - DeclException0(ExcNoProlongation); + std::size_t memory_consumption () const; /** - * You have to call build_matrices() before using this object. + * Print the copy index fields for debugging purposes. */ - DeclException0(ExcMatricesNotBuilt); + void print_indices(std::ostream &os) const; + +protected: /** - * Memory used by this object. + * Internal function to @p fill copy_indices*. Called by derived classes. */ - std::size_t memory_consumption () const; + template + void fill_and_communicate_copy_indices(const DoFHandler &mg_dof); /** - * Print all the matrices for debugging purposes. + * Sizes of the multi-level vectors. */ - void print_matrices(std::ostream &os) const; + std::vector sizes; /** - * Print the copy index fields for debugging purposes. + * Mapping for the copy_to_mg() and copy_from_mg() functions. Here only + * index pairs locally owned + * + * The data is organized as follows: one vector per level. Each element of + * these vectors contains first the global index, then the level index. */ - void print_indices(std::ostream &os) const; + std::vector > > + copy_indices; -private: + /** + * Additional degrees of freedom for the copy_to_mg() function. These are + * the ones where the global degree of freedom is locally owned and the + * level degree of freedom is not. + * + * Organization of the data is like for @p copy_indices_mine. + */ + std::vector > > + copy_indices_global_mine; /** - * Internal function to @p fill copy_indices*. Called by build_matrices(). + * Additional degrees of freedom for the copy_from_mg() function. These are + * the ones where the level degree of freedom is locally owned and the + * global degree of freedom is not. + * + * Organization of the data is like for @p copy_indices_mine. */ - template - void fill_and_communicate_copy_indices(const DoFHandler &mg_dof); + std::vector > > + copy_indices_level_mine; /** - * Sizes of the multi-level vectors. + * The vector that stores what has been given to the + * set_component_to_block_map() function. */ - std::vector sizes; + std::vector component_to_block_map; /** - * Sparsity patterns for transfer matrices. + * The mg_constrained_dofs of the level systems. */ - std::vector::Sparsity> > prolongation_sparsities; + SmartPointer > mg_constrained_dofs; +}; + + + +/** + * Implementation of transfer between the global vectors and the multigrid + * levels for use in the derived class MGTransferPrebuilt and other + * classes. This class is a specialization for the case of + * parallel::distributed::Vector that requires a few different calling + * routines as compared to the %parallel vectors in the PETScWrappers and + * TrilinosWrappers namespaces. + * + * @author Martin Kronbichler + * @date 2016 + */ +template +class MGLevelGlobalTransfer > : public MGTransferBase > +{ +public: /** - * The actual prolongation matrix. column indices belong to the dof indices - * of the mother cell, i.e. the coarse level. while row indices belong to - * the child cell, i.e. the fine level. + * Reset the object to the state it had right after the default constructor. */ - std::vector::Matrix> > prolongation_matrices; + void clear (); + + /** + * Transfer from a vector on the global grid to vectors defined on each of + * the levels separately, i.a. an @p MGVector. + */ + template + void + copy_to_mg (const DoFHandler &mg_dof, + MGLevelObject > &dst, + const parallel::distributed::Vector &src) const; + + /** + * Transfer from multi-level vector to normal vector. + * + * Copies data from active portions of an MGVector into the respective + * positions of a Vector. In order to keep the result + * consistent, constrained degrees of freedom are set to zero. + */ + template + void + copy_from_mg (const DoFHandler &mg_dof, + parallel::distributed::Vector &dst, + const MGLevelObject > &src) const; + + /** + * Add a multi-level vector to a normal vector. + * + * Works as the previous function, but probably not for continuous elements. + */ + template + void + copy_from_mg_add (const DoFHandler &mg_dof, + parallel::distributed::Vector &dst, + const MGLevelObject > &src) const; + + /** + * If this object operates on BlockVector objects, we need to describe how + * the individual vector components are mapped to the blocks of a vector. + * For example, for a Stokes system, we have dim+1 vector components for + * velocity and pressure, but we may want to use block vectors with only two + * blocks for all velocities in one block, and the pressure variables in the + * other. + * + * By default, if this function is not called, block vectors have as many + * blocks as the finite element has vector components. However, this can be + * changed by calling this function with an array that describes how vector + * components are to be grouped into blocks. The meaning of the argument is + * the same as the one given to the DoFTools::count_dofs_per_component + * function. + */ + void + set_component_to_block_map (const std::vector &map); + + /** + * Memory used by this object. + */ + std::size_t memory_consumption () const; + + /** + * Print the copy index fields for debugging purposes. + */ + void print_indices(std::ostream &os) const; + +protected: + + /** + * Internal function to @p fill copy_indices*. Called by derived classes. + */ + template + void fill_and_communicate_copy_indices(const DoFHandler &mg_dof); + + /** + * Sizes of the multi-level vectors. + */ + std::vector sizes; /** * Mapping for the copy_to_mg() and copy_from_mg() functions. Here only - * index pairs locally owned + * index pairs locally owned is stored. * * The data is organized as follows: one vector per level. Each element of * these vectors contains first the global index, then the level index. */ - std::vector > > + std::vector > > copy_indices; /** @@ -288,7 +360,7 @@ private: * * Organization of the data is like for @p copy_indices_mine. */ - std::vector > > + std::vector > > copy_indices_global_mine; /** @@ -298,9 +370,16 @@ private: * * Organization of the data is like for @p copy_indices_mine. */ - std::vector > > + std::vector > > copy_indices_level_mine; + /** + * Stores whether the copy operation from the global to the level vector is + * actually a plain copy to the finest level. This means that the grid has + * no adaptive refinement and the numbering on the finest multigrid level is + * the same as in the global case. + */ + bool perform_plain_copy; /** * The vector that stores what has been given to the @@ -308,20 +387,131 @@ private: */ std::vector component_to_block_map; + /** + * The mg_constrained_dofs of the level systems. + */ + SmartPointer > > mg_constrained_dofs; + + /** + * In the function copy_to_mg, we need to access ghosted entries of the + * global vector for inserting into the level vectors. This vector is + * populated with those entries. + */ + mutable parallel::distributed::Vector ghosted_global_vector; + + /** + * In the function copy_from_mg, we access all level vectors with certain + * ghost entries for inserting the result into a global vector. + */ + mutable MGLevelObject > ghosted_level_vector; +}; + + + +/** + * Implementation of the MGTransferBase interface for which the transfer + * operations are prebuilt upon construction of the object of this class as + * matrices. This is the fast way, since it only needs to build the operation + * once by looping over all cells and storing the result in a matrix for each + * level, but requires additional memory. + * + * See MGTransferBase to find out which of the transfer classes is best for + * your needs. + * + * @author Wolfgang Bangerth, Guido Kanschat, Timo Heister, Martin Kronbichler + * @date 1999, 2000, 2001, 2002, 2003, 2004, 2012, 2015 + */ +template +class MGTransferPrebuilt : public MGLevelGlobalTransfer +{ +public: + /** + * Constructor without constraint matrices. Use this constructor only with + * discontinuous finite elements or with no local refinement. + */ + MGTransferPrebuilt (); + + /** + * Constructor with constraints. Equivalent to the default constructor + * followed by initialize_constraints(). + */ + MGTransferPrebuilt (const ConstraintMatrix &constraints, + const MGConstrainedDoFs &mg_constrained_dofs); + + /** + * Destructor. + */ + virtual ~MGTransferPrebuilt (); + + /** + * Initialize the constraints to be used in build_matrices(). + */ + void initialize_constraints (const ConstraintMatrix &constraints, + const MGConstrainedDoFs &mg_constrained_dofs); + + /** + * Reset the object to the state it had right after the default constructor. + */ + void clear (); + + /** + * Actually build the prolongation matrices for each level. + */ + template + void build_matrices (const DoFHandler &mg_dof); + + virtual void prolongate (const unsigned int to_level, + VectorType &dst, + const VectorType &src) const; + + virtual void restrict_and_add (const unsigned int from_level, + VectorType &dst, + const VectorType &src) const; + + /** + * Finite element does not provide prolongation matrices. + */ + DeclException0(ExcNoProlongation); + + /** + * You have to call build_matrices() before using this object. + */ + DeclException0(ExcMatricesNotBuilt); + + /** + * Memory used by this object. + */ + std::size_t memory_consumption () const; + + /** + * Print all the matrices for debugging purposes. + */ + void print_matrices(std::ostream &os) const; + +private: + + /** + * Sparsity patterns for transfer matrices. + */ + std::vector::Sparsity> > prolongation_sparsities; + + /** + * The actual prolongation matrix. column indices belong to the dof indices + * of the mother cell, i.e. the coarse level. while row indices belong to + * the child cell, i.e. the fine level. + */ + std::vector::Matrix> > prolongation_matrices; + /** * Degrees of freedom on the refinement edge excluding those on the * boundary. */ std::vector > interface_dofs; + /** * The constraints of the global system. */ SmartPointer > constraints; - /** - * The mg_constrained_dofs of the level systems. - */ - - SmartPointer > mg_constrained_dofs; }; diff --git a/include/deal.II/multigrid/mg_transfer.templates.h b/include/deal.II/multigrid/mg_transfer.templates.h index 41630fe3bc..a3bd668bdf 100644 --- a/include/deal.II/multigrid/mg_transfer.templates.h +++ b/include/deal.II/multigrid/mg_transfer.templates.h @@ -114,12 +114,10 @@ namespace (dynamic_cast*> (&mg_dof.get_triangulation())); - for (unsigned int level=v.min_level(); - level<=v.max_level(); ++level) + for (unsigned int level=v.min_level(); level<=v.max_level(); ++level) { - const IndexSet vector_index_set = v[level].locally_owned_elements(); - if (vector_index_set.size() != mg_dof.locally_owned_mg_dofs(level).size() || - mg_dof.locally_owned_mg_dofs(level) != vector_index_set) + if (v[level].size() != mg_dof.locally_owned_mg_dofs(level).size() || + v[level].local_size() != mg_dof.locally_owned_mg_dofs(level).n_elements()) v[level].reinit(mg_dof.locally_owned_mg_dofs(level), tria != 0 ? tria->get_communicator() : MPI_COMM_SELF); else @@ -160,18 +158,16 @@ namespace -/* --------------------- MGTransferPrebuilt -------------- */ - - +/* ------------------ MGLevelGlobalTransfer ----------------- */ template template void -MGTransferPrebuilt::copy_to_mg +MGLevelGlobalTransfer::copy_to_mg (const DoFHandler &mg_dof_handler, - MGLevelObject &dst, - const InVector &src) const + MGLevelObject &dst, + const InVector &src) const { reinit_vector(mg_dof_handler, component_to_block_map, dst); bool first = true; @@ -182,22 +178,21 @@ MGTransferPrebuilt::copy_to_mg for (unsigned int level=mg_dof_handler.get_triangulation().n_global_levels(); level != 0;) { --level; - VectorType &dst_level = dst[level]; - #ifdef DEBUG_OUTPUT MPI_Barrier(MPI_COMM_WORLD); #endif typedef std::vector >::const_iterator dof_pair_iterator; + VectorType &dst_level = dst[level]; // first copy local unknowns - for (dof_pair_iterator i= copy_indices[level].begin(); + for (dof_pair_iterator i = copy_indices[level].begin(); i != copy_indices[level].end(); ++i) dst_level(i->second) = src(i->first); - // Do the same for the indices where the global index is local, - // but the local index is not - for (dof_pair_iterator i= copy_indices_global_mine[level].begin(); + // Do the same for the indices where the global index is local, but the + // local index is not + for (dof_pair_iterator i = copy_indices_global_mine[level].begin(); i != copy_indices_global_mine[level].end(); ++i) dst_level(i->second) = src(i->first); @@ -210,7 +205,7 @@ MGTransferPrebuilt::copy_to_mg if (!first) { - restrict_and_add (level+1, dst[level], dst[level+1]); + this->restrict_and_add (level+1, dst[level], dst[level+1]); #ifdef DEBUG_OUTPUT std::cout << "copy_to_mg restr&add " << level << " " << dst_level.l2_norm() << std::endl; #endif @@ -225,18 +220,14 @@ MGTransferPrebuilt::copy_to_mg template template void -MGTransferPrebuilt::copy_from_mg +MGLevelGlobalTransfer::copy_from_mg (const DoFHandler &mg_dof_handler, OutVector &dst, const MGLevelObject &src) const { - // For non-DG: degrees of - // freedom in the refinement - // face may need special - // attention, since they belong - // to the coarse level, but - // have fine level basis - // functions + // For non-DG: degrees of freedom in the refinement face may need special + // attention, since they belong to the coarse level, but have fine level + // basis functions dst = 0; for (unsigned int level=0; level::copy_from_mg #endif typedef std::vector >::const_iterator dof_pair_iterator; + const VectorType &src_level = src[level]; // First copy all indices local to this process - for (dof_pair_iterator i= copy_indices[level].begin(); + for (dof_pair_iterator i = copy_indices[level].begin(); i != copy_indices[level].end(); ++i) - dst(i->first) = src[level](i->second); + dst(i->first) = src_level(i->second); - // Do the same for the indices where the level index is local, - // but the global index is not - for (dof_pair_iterator i= copy_indices_level_mine[level].begin(); + // Do the same for the indices where the level index is local, but the + // global index is not + for (dof_pair_iterator i = copy_indices_level_mine[level].begin(); i != copy_indices_level_mine[level].end(); ++i) - dst(i->first) = src[level](i->second); + dst(i->first) = src_level(i->second); #ifdef DEBUG_OUTPUT { @@ -279,32 +271,29 @@ MGTransferPrebuilt::copy_from_mg template template void -MGTransferPrebuilt::copy_from_mg_add +MGLevelGlobalTransfer::copy_from_mg_add (const DoFHandler &mg_dof_handler, OutVector &dst, const MGLevelObject &src) const { - // For non-DG: degrees of - // freedom in the refinement - // face may need special - // attention, since they belong - // to the coarse level, but - // have fine level basis - // functions + // For non-DG: degrees of freedom in the refinement face may need special + // attention, since they belong to the coarse level, but have fine level + // basis functions for (unsigned int level=0; level >::const_iterator dof_pair_iterator; + const VectorType &src_level = src[level]; // First add all indices local to this process - for (dof_pair_iterator i= copy_indices[level].begin(); + for (dof_pair_iterator i = copy_indices[level].begin(); i != copy_indices[level].end(); ++i) - dst(i->first) += src[level](i->second); + dst(i->first) += src_level(i->second); - // Do the same for the indices where the level index is local, - // but the global index is not - for (dof_pair_iterator i= copy_indices_level_mine[level].begin(); + // Do the same for the indices where the level index is local, but the + // global index is not + for (dof_pair_iterator i = copy_indices_level_mine[level].begin(); i != copy_indices_level_mine[level].end(); ++i) - dst(i->first) += src[level](i->second); + dst(i->first) += src_level(i->second); } dst.compress(VectorOperation::add); } @@ -313,24 +302,192 @@ MGTransferPrebuilt::copy_from_mg_add template void -MGTransferPrebuilt:: +MGLevelGlobalTransfer:: set_component_to_block_map (const std::vector &map) { component_to_block_map = map; } -template -std::size_t -MGTransferPrebuilt::memory_consumption () const + + +/* --------- MGLevelGlobalTransfer ------- */ + +template +template +void +MGLevelGlobalTransfer >::copy_to_mg +(const DoFHandler &mg_dof_handler, + MGLevelObject > &dst, + const parallel::distributed::Vector &src) const { - std::size_t result = sizeof(*this); - result += sizeof(unsigned int) * sizes.size(); + reinit_vector(mg_dof_handler, component_to_block_map, dst); + bool first = true; + + if (perform_plain_copy) + { + // In this case, we can simply copy the local range (in parallel by + // VectorView) + AssertDimension(dst[dst.max_level()].local_size(), src.local_size()); + VectorView dst_view (src.local_size(), dst[dst.max_level()].begin()); + VectorView src_view (src.local_size(), src.begin()); + static_cast &>(dst_view) = static_cast &>(src_view); + for (unsigned int level=mg_dof_handler.get_triangulation().n_global_levels()-1; level != 0; ) + { + --level; + this->restrict_and_add (level+1, dst[level], dst[level+1]); + } + return; + } + + // the ghosted vector should already have the correct local size (but + // different parallel layout) + AssertDimension(ghosted_global_vector.local_size(), src.local_size()); + + // copy the source vector to the temporary vector that we hold for the + // purpose of data exchange + ghosted_global_vector = src; + ghosted_global_vector.update_ghost_values(); + + for (unsigned int level=mg_dof_handler.get_triangulation().n_global_levels(); level != 0;) + { + --level; + + typedef std::vector >::const_iterator dof_pair_iterator; + parallel::distributed::Vector &dst_level = dst[level]; + + // first copy local unknowns + for (dof_pair_iterator i = copy_indices[level].begin(); + i != copy_indices[level].end(); ++i) + dst_level.local_element(i->second) = ghosted_global_vector.local_element(i->first); + + // Do the same for the indices where the level index is local, but the + // global index is not + for (dof_pair_iterator i = copy_indices_level_mine[level].begin(); + i != copy_indices_level_mine[level].end(); ++i) + dst_level.local_element(i->second) = ghosted_global_vector.local_element(i->first); + + dst_level.compress(VectorOperation::insert); + + if (!first) + { + this->restrict_and_add (level+1, dst_level, dst[level+1]); + } + + first = false; + } +} + - for (unsigned int i=0; imemory_consumption() - + prolongation_sparsities[i]->memory_consumption(); - return result; +template +template +void +MGLevelGlobalTransfer >::copy_from_mg +(const DoFHandler &mg_dof_handler, + parallel::distributed::Vector &dst, + const MGLevelObject > &src) const +{ + // For non-DG: degrees of freedom in the refinement face may need special + // attention, since they belong to the coarse level, but have fine level + // basis functions + + if (perform_plain_copy) + { + // In this case, we can simply copy the local range (in parallel by + // VectorView). To avoid having stray data in ghost entries of the + // destination, make sure to clear them here. + dst.zero_out_ghosts(); + AssertDimension(dst.local_size(), src[src.max_level()].local_size()); + VectorView dst_view (dst.local_size(), dst.begin()); + VectorView src_view (dst.local_size(), src[src.max_level()].begin()); + static_cast &>(dst_view) = static_cast &>(src_view); + return; + } + + dst = 0; + for (unsigned int level=0; level >::const_iterator dof_pair_iterator; + + // the ghosted vector should already have the correct local size (but + // different parallel layout) + AssertDimension(ghosted_level_vector[level].local_size(), + src[level].local_size()); + + // the first time around, we copy the source vector to the temporary + // vector that we hold for the purpose of data exchange + parallel::distributed::Vector &ghosted_vector = + ghosted_level_vector[level]; + ghosted_vector = src[level]; + ghosted_vector.update_ghost_values(); + + // first copy local unknowns + for (dof_pair_iterator i = copy_indices[level].begin(); + i != copy_indices[level].end(); ++i) + dst.local_element(i->first) = ghosted_vector.local_element(i->second); + + // Do the same for the indices where the level index is local, but the + // global index is not + for (dof_pair_iterator i = copy_indices_global_mine[level].begin(); + i != copy_indices_global_mine[level].end(); ++i) + dst.local_element(i->first) = ghosted_vector.local_element(i->second); + } + dst.compress(VectorOperation::insert); +} + + + +template +template +void +MGLevelGlobalTransfer >::copy_from_mg_add +(const DoFHandler &mg_dof_handler, + parallel::distributed::Vector &dst, + const MGLevelObject > &src) const +{ + // For non-DG: degrees of freedom in the refinement face may need special + // attention, since they belong to the coarse level, but have fine level + // basis functions + + dst.zero_out_ghosts(); + for (unsigned int level=0; level >::const_iterator dof_pair_iterator; + + // the ghosted vector should already have the correct local size (but + // different parallel layout) + AssertDimension(ghosted_level_vector[level].local_size(), + src[level].local_size()); + + // the first time around, we copy the source vector to the temporary + // vector that we hold for the purpose of data exchange + parallel::distributed::Vector &ghosted_vector = + ghosted_level_vector[level]; + ghosted_vector = src[level]; + ghosted_vector.update_ghost_values(); + + // first add local unknowns + for (dof_pair_iterator i= copy_indices[level].begin(); + i != copy_indices[level].end(); ++i) + dst.local_element(i->first) += ghosted_vector.local_element(i->second); + + // Do the same for the indices where the level index is local, but the + // global index is not + for (dof_pair_iterator i= copy_indices_global_mine[level].begin(); + i != copy_indices_global_mine[level].end(); ++i) + dst.local_element(i->first) += ghosted_vector.local_element(i->second); + } + dst.compress(VectorOperation::add); +} + + + +template +void +MGLevelGlobalTransfer >:: +set_component_to_block_map (const std::vector &map) +{ + component_to_block_map = map; } diff --git a/source/multigrid/CMakeLists.txt b/source/multigrid/CMakeLists.txt index b86de17f67..ac23a6af19 100644 --- a/source/multigrid/CMakeLists.txt +++ b/source/multigrid/CMakeLists.txt @@ -17,6 +17,7 @@ INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_BINARY_DIR}) SET(_src mg_base.cc + mg_level_global_transfer.cc mg_tools.cc mg_transfer_block.cc mg_transfer_component.cc @@ -26,6 +27,7 @@ SET(_src SET(_inst mg_base.inst.in + mg_level_global_transfer.inst.in mg_tools.inst.in mg_transfer_block.inst.in mg_transfer_component.inst.in diff --git a/source/multigrid/mg_level_global_transfer.cc b/source/multigrid/mg_level_global_transfer.cc new file mode 100644 index 0000000000..8e5b6b55b5 --- /dev/null +++ b/source/multigrid/mg_level_global_transfer.cc @@ -0,0 +1,539 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2003 - 2015 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +DEAL_II_NAMESPACE_OPEN + + +namespace +{ + /** + * Internal data structure that is used in the MPI communication in fill_and_communicate_copy_indices(). + * It represents an entry in the copy_indices* map, that associates a level dof index with a global dof index. + */ + struct DoFPair + { + unsigned int level; + types::global_dof_index global_dof_index; + types::global_dof_index level_dof_index; + + DoFPair(const unsigned int level, + const types::global_dof_index global_dof_index, + const types::global_dof_index level_dof_index) + : + level(level), global_dof_index(global_dof_index), level_dof_index(level_dof_index) + {} + + DoFPair() + {} + }; + + + + /** + * Internal function for filling the copy indices from global to level indices + */ + template + void fill_copy_indices(const DoFHandler &mg_dof, + const MGConstrainedDoFs *mg_constrained_dofs, + std::vector > > ©_indices, + std::vector > > ©_indices_global_mine, + std::vector > > ©_indices_level_mine) + { + // Now we are filling the variables copy_indices*, which are essentially + // maps from global to mgdof for each level stored as a std::vector of + // pairs. We need to split this map on each level depending on the ownership + // of the global and mgdof, so that we later not access non-local elements + // in copy_to/from_mg. + // We keep track in the bitfield dof_touched which global dof has + // been processed already (on the current level). This is the same as + // the multigrid running in serial. + + // map cpu_index -> vector of data + // that will be copied into copy_indices_level_mine + std::vector send_data_temp; + + const unsigned int n_levels = mg_dof.get_triangulation().n_global_levels(); + copy_indices.resize(n_levels); + copy_indices_global_mine.resize(n_levels); + copy_indices_level_mine.resize(n_levels); + IndexSet globally_relevant; + DoFTools::extract_locally_relevant_dofs(mg_dof, globally_relevant); + + const unsigned int dofs_per_cell = mg_dof.get_fe().dofs_per_cell; + std::vector global_dof_indices (dofs_per_cell); + std::vector level_dof_indices (dofs_per_cell); + + for (unsigned int level=0; level dof_touched(globally_relevant.n_elements(), false); + copy_indices[level].clear(); + copy_indices_level_mine[level].clear(); + copy_indices_global_mine[level].clear(); + + typename DoFHandler::active_cell_iterator + level_cell = mg_dof.begin_active(level); + const typename DoFHandler::active_cell_iterator + level_end = mg_dof.end_active(level); + + for (; level_cell!=level_end; ++level_cell) + { + if (mg_dof.get_triangulation().locally_owned_subdomain()!=numbers::invalid_subdomain_id + && (level_cell->level_subdomain_id()==numbers::artificial_subdomain_id + || level_cell->subdomain_id()==numbers::artificial_subdomain_id) + ) + continue; + + // get the dof numbers of this cell for the global and the level-wise + // numbering + level_cell->get_dof_indices (global_dof_indices); + level_cell->get_mg_dof_indices (level_dof_indices); + + for (unsigned int i=0; iat_refinement_edge(level, level_dof_indices[i])) + continue; + types::global_dof_index global_idx = globally_relevant.index_within_set(global_dof_indices[i]); + //skip if we did this global dof already (on this or a coarser level) + if (dof_touched[global_idx]) + continue; + bool global_mine = mg_dof.locally_owned_dofs().is_element(global_dof_indices[i]); + bool level_mine = mg_dof.locally_owned_mg_dofs(level).is_element(level_dof_indices[i]); + + + if (global_mine && level_mine) + { + copy_indices[level].push_back( + std::make_pair (global_dof_indices[i], level_dof_indices[i])); + } + else if (global_mine) + { + copy_indices_global_mine[level].push_back( + std::make_pair (global_dof_indices[i], level_dof_indices[i])); + + //send this to the owner of the level_dof: + send_data_temp.push_back(DoFPair(level, global_dof_indices[i], level_dof_indices[i])); + } + else + { + // somebody will send those to me + } + + dof_touched[global_idx] = true; + } + } + } + + const dealii::parallel::distributed::Triangulation *tria = + (dynamic_cast*> + (&mg_dof.get_triangulation())); + AssertThrow(send_data_temp.size()==0 || tria!=NULL, ExcMessage("parallel Multigrid only works with a distributed Triangulation!")); + +#ifdef DEAL_II_WITH_MPI + if (tria) + { + // TODO: Searching the owner for every single DoF becomes quite + // inefficient. Please fix this, Timo. + std::set neighbors = tria->level_ghost_owners(); + std::map > send_data; + + // * find owners of the level dofs and insert into send_data accordingly + for (typename std::vector::iterator dofpair=send_data_temp.begin(); dofpair != send_data_temp.end(); ++dofpair) + { + for (std::set::iterator it = neighbors.begin(); it != neighbors.end(); ++it) + { + if (mg_dof.locally_owned_mg_dofs_per_processor(dofpair->level)[*it].is_element(dofpair->level_dof_index)) + { + send_data[*it].push_back(*dofpair); + break; + } + } + } + + // * send + std::vector requests; + { + for (std::set::iterator it = neighbors.begin(); it != neighbors.end(); ++it) + { + requests.push_back(MPI_Request()); + unsigned int dest = *it; + std::vector &data = send_data[dest]; + if (data.size()) + MPI_Isend(&data[0], data.size()*sizeof(data[0]), MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin()); + else + MPI_Isend(NULL, 0, MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin()); + } + } + + // * receive + { + std::vector receive_buffer; + for (unsigned int counter=0; counterget_communicator(), &status); + MPI_Get_count(&status, MPI_BYTE, &len); + + if (len==0) + { + int err = MPI_Recv(NULL, 0, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, + tria->get_communicator(), &status); + AssertThrow(err==MPI_SUCCESS, ExcInternalError()); + continue; + } + + int count = len / sizeof(DoFPair); + Assert(static_cast(count * sizeof(DoFPair)) == len, ExcInternalError()); + receive_buffer.resize(count); + + void *ptr = &receive_buffer[0]; + int err = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, + tria->get_communicator(), &status); + AssertThrow(err==MPI_SUCCESS, ExcInternalError()); + + for (unsigned int i=0; i 0) + { + MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + requests.clear(); + } + } +#endif + + // Sort the indices. This will produce more reliable debug output for regression texts + // and likely won't hurt performance even in release mode. + std::less > compare; + for (unsigned int level=0; level ----------------- */ + + +template +template +void +MGLevelGlobalTransfer::fill_and_communicate_copy_indices +(const DoFHandler &mg_dof) +{ + fill_copy_indices(mg_dof, mg_constrained_dofs, copy_indices, + copy_indices_global_mine, copy_indices_level_mine); +} + + + +template +void +MGLevelGlobalTransfer::clear() +{ + sizes.resize(0); + std::vector > > + empty0, empty1, empty2; + copy_indices.swap(empty0); + copy_indices_global_mine.swap(empty1); + copy_indices_level_mine.swap(empty2); + component_to_block_map.resize(0); + mg_constrained_dofs = 0; +} + + + +template +void +MGLevelGlobalTransfer::print_indices (std::ostream &os) const +{ + for (unsigned int level = 0; level +std::size_t +MGLevelGlobalTransfer::memory_consumption () const +{ + std::size_t result = sizeof(*this); + result += MemoryConsumption::memory_consumption(sizes); + result += MemoryConsumption::memory_consumption(copy_indices); + result += MemoryConsumption::memory_consumption(copy_indices_global_mine); + result += MemoryConsumption::memory_consumption(copy_indices_level_mine); + + return result; +} + + + +/* ------------------ MGLevelGlobalTransfer ----------------- */ + + +template +template +void +MGLevelGlobalTransfer >::fill_and_communicate_copy_indices +(const DoFHandler &mg_dof) +{ + // first go to the usual routine... + std::vector > > + copy_indices; + std::vector > > + copy_indices_global_mine; + std::vector > > + copy_indices_level_mine; + + fill_copy_indices(mg_dof, mg_constrained_dofs, copy_indices, + copy_indices_global_mine, copy_indices_level_mine); + + // get all degrees of freedom that we need read access to in copy_to_mg + // and copy_from_mg, respectively. We fill an IndexSet once on each level + // (for the global_mine indices accessing remote level indices) and once + // globally (for the level_mine indices accessing remote global indices). + + // the variables index_set and level_index_set are going to define the + // ghost indices of the respective vectors (due to construction, these are + // precisely the indices that we need) + const parallel::Triangulation *ptria = + dynamic_cast *> + (&mg_dof.get_tria()); + const MPI_Comm mpi_communicator = ptria != 0 ? ptria->get_communicator() : + MPI_COMM_SELF; + + IndexSet index_set(mg_dof.locally_owned_dofs().size()); + std::vector accessed_indices; + ghosted_level_vector.resize(0, mg_dof.get_triangulation().n_global_levels()-1); + std::vector level_index_set(mg_dof.get_triangulation().n_global_levels()); + for (unsigned int l=0; l accessed_level_indices; + for (unsigned int i=0; icopy_indices.resize(mg_dof.get_triangulation().n_global_levels()); + this->copy_indices_level_mine.resize(mg_dof.get_triangulation().n_global_levels()); + this->copy_indices_global_mine.resize(mg_dof.get_triangulation().n_global_levels()); + for (unsigned int level=0; levelcopy_indices[level].resize(copy_indices[level].size()); + for (unsigned int i=0; icopy_indices[level][i] = + std::pair + (global_partitioner.global_to_local(copy_indices[level][i].first), + level_partitioner.global_to_local(copy_indices[level][i].second)); + + // remote-owned case: the locally owned indices for the level and the + // ghost dofs for the global indices set the local index + this->copy_indices_level_mine[level]. + resize(copy_indices_level_mine[level].size()); + for (unsigned int i=0; icopy_indices_level_mine[level][i] = + std::pair + (global_partitioner.global_to_local(copy_indices_level_mine[level][i].first), + level_partitioner.global_to_local(copy_indices_level_mine[level][i].second)); + + // owned-remote case: the locally owned indices for the global dofs + // and the ghost dofs for the level indices set the local index + this->copy_indices_global_mine[level]. + resize(copy_indices_global_mine[level].size()); + for (unsigned int i=0; icopy_indices_global_mine[level][i] = + std::pair + (global_partitioner.global_to_local(copy_indices_global_mine[level][i].first), + level_partitioner.global_to_local(copy_indices_global_mine[level][i].second)); + } + + perform_plain_copy = this->copy_indices.back().size() + == mg_dof.locally_owned_dofs().n_elements(); + if (perform_plain_copy) + { + AssertDimension(this->copy_indices_global_mine.back().size(), 0); + AssertDimension(this->copy_indices_level_mine.back().size(), 0); + + // check whether there is a renumbering of degrees of freedom on + // either the finest level or the global dofs, which means that we + // cannot apply a plain copy + for (unsigned int i=0; icopy_indices.back().size(); ++i) + if (this->copy_indices.back()[i].first != + this->copy_indices.back()[i].second) + { + perform_plain_copy = false; + break; + } + } + perform_plain_copy = + Utilities::MPI::min(static_cast(perform_plain_copy), + mpi_communicator); + + // if we do a plain copy, no need to hold additional ghosted vectors + if (perform_plain_copy) + { + ghosted_global_vector.reinit(0); + ghosted_level_vector.resize(0, 0); + } +} + + + +template +void +MGLevelGlobalTransfer >::clear() +{ + sizes.resize(0); + std::vector > > + empty0, empty1, empty2; + copy_indices.swap(empty0); + copy_indices_global_mine.swap(empty1); + copy_indices_level_mine.swap(empty2); + component_to_block_map.resize(0); + mg_constrained_dofs = 0; + ghosted_global_vector.reinit(0); + ghosted_level_vector.resize(0, 0); +} + + + +template +void +MGLevelGlobalTransfer >::print_indices (std::ostream &os) const +{ + for (unsigned int level = 0; level +std::size_t +MGLevelGlobalTransfer >::memory_consumption () const +{ + std::size_t result = sizeof(*this); + result += MemoryConsumption::memory_consumption(sizes); + result += MemoryConsumption::memory_consumption(copy_indices); + result += MemoryConsumption::memory_consumption(copy_indices_global_mine); + result += MemoryConsumption::memory_consumption(copy_indices_level_mine); + result += ghosted_global_vector.memory_consumption(); + for (unsigned int i=ghosted_level_vector.min_level(); + i<=ghosted_level_vector.max_level(); ++i) + result += ghosted_level_vector[i].memory_consumption(); + + return result; +} + + + +// explicit instantiation +#include "mg_level_global_transfer.inst" + + +DEAL_II_NAMESPACE_CLOSE diff --git a/source/multigrid/mg_level_global_transfer.inst.in b/source/multigrid/mg_level_global_transfer.inst.in new file mode 100644 index 0000000000..f2f57a2804 --- /dev/null +++ b/source/multigrid/mg_level_global_transfer.inst.in @@ -0,0 +1,70 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 1998 - 2014 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +for (V1 : VECTORS_WITH_MATRIX) + { + template class MGLevelGlobalTransfer< V1 >; + } + +for (deal_II_dimension : DIMENSIONS; V1 : VECTORS_WITH_MATRIX) + { + template + void MGTransferPrebuilt< V1 >::fill_and_communicate_copy_indices( + const DoFHandler &mg_dof); + } + +for (deal_II_dimension : DIMENSIONS; V1,V2 : DEAL_II_VEC_TEMPLATES; S1, S2 : REAL_SCALARS) + { + template void + MGLevelGlobalTransfer >::copy_to_mg ( + const DoFHandler&, MGLevelObject >&, const V2&) const; + template void + MGLevelGlobalTransfer >::copy_from_mg (const DoFHandler&, V2&, + const MGLevelObject >&) const; + template void + MGLevelGlobalTransfer >::copy_from_mg_add (const DoFHandler&, V2&, + const MGLevelObject >&) const; + } + +for (deal_II_dimension : DIMENSIONS; S2 : REAL_SCALARS) + { + template void + MGLevelGlobalTransfer >::copy_to_mg ( + const DoFHandler&, MGLevelObject >&, const parallel::distributed::Vector&) const; + template void + MGLevelGlobalTransfer >::copy_from_mg (const DoFHandler&, parallel::distributed::Vector&, + const MGLevelObject >&) const; + template void + MGLevelGlobalTransfer >::copy_from_mg_add (const DoFHandler&, parallel::distributed::Vector&, + const MGLevelObject >&) const; + } + +for(deal_II_dimension : DIMENSIONS) + { +#ifdef DEAL_II_WITH_TRILINOS + + template void + MGLevelGlobalTransfer::copy_to_mg ( + const DoFHandler&, MGLevelObject&, const TrilinosWrappers::MPI::Vector&) const; + template void + MGLevelGlobalTransfer::copy_from_mg (const DoFHandler&, TrilinosWrappers::MPI::Vector&, + const MGLevelObject&) const; + template void + MGLevelGlobalTransfer::copy_from_mg_add (const DoFHandler&, TrilinosWrappers::MPI::Vector&, + const MGLevelObject&) const; +#endif + } diff --git a/source/multigrid/mg_transfer_prebuilt.cc b/source/multigrid/mg_transfer_prebuilt.cc index f7f7c08d00..0d2a5aa1db 100644 --- a/source/multigrid/mg_transfer_prebuilt.cc +++ b/source/multigrid/mg_transfer_prebuilt.cc @@ -35,7 +35,6 @@ #include #include #include -#include #include @@ -47,12 +46,15 @@ MGTransferPrebuilt::MGTransferPrebuilt () {} + template MGTransferPrebuilt::MGTransferPrebuilt (const ConstraintMatrix &c, const MGConstrainedDoFs &mg_c) : - constraints(&c), - mg_constrained_dofs(&mg_c) -{} + constraints(&c) +{ + this->mg_constrained_dofs = &mg_c; +} + template @@ -60,31 +62,29 @@ MGTransferPrebuilt::~MGTransferPrebuilt () {} + template void MGTransferPrebuilt::initialize_constraints (const ConstraintMatrix &c, const MGConstrainedDoFs &mg_c) { constraints = &c; - mg_constrained_dofs = &mg_c; + this->mg_constrained_dofs = &mg_c; } + template void MGTransferPrebuilt::clear () { - sizes.resize(0); + MGLevelGlobalTransfer::clear(); prolongation_matrices.resize(0); prolongation_sparsities.resize(0); - copy_indices.resize(0); - copy_indices_global_mine.resize(0); - copy_indices_level_mine.resize(0); - component_to_block_map.resize(0); interface_dofs.resize(0); constraints = 0; - mg_constrained_dofs = 0; } + template void MGTransferPrebuilt::prolongate (const unsigned int to_level, VectorType &dst, @@ -97,6 +97,7 @@ void MGTransferPrebuilt::prolongate (const unsigned int to_level, } + template void MGTransferPrebuilt::restrict_and_add (const unsigned int from_level, VectorType &dst, @@ -110,6 +111,7 @@ void MGTransferPrebuilt::restrict_and_add (const unsigned int from_l } + template template void MGTransferPrebuilt::build_matrices @@ -118,9 +120,9 @@ void MGTransferPrebuilt::build_matrices const unsigned int n_levels = mg_dof.get_triangulation().n_global_levels(); const unsigned int dofs_per_cell = mg_dof.get_fe().dofs_per_cell; - sizes.resize(n_levels); + this->sizes.resize(n_levels); for (unsigned int l=0; lsizes[l] = mg_dof.n_dofs(l); // reset the size of the array of // matrices. call resize(0) first, @@ -167,8 +169,8 @@ void MGTransferPrebuilt::build_matrices IndexSet level_p1_relevant_dofs; DoFTools::extract_locally_relevant_level_dofs(mg_dof, level+1, level_p1_relevant_dofs); - DynamicSparsityPattern dsp (sizes[level+1], - sizes[level], + DynamicSparsityPattern dsp (this->sizes[level+1], + this->sizes[level], level_p1_relevant_dofs); for (typename DoFHandler::cell_iterator cell=mg_dof.begin(level); cell != mg_dof.end(level); ++cell) @@ -235,9 +237,10 @@ void MGTransferPrebuilt::build_matrices = mg_dof.get_fe().get_prolongation_matrix (child, cell->refinement_case()); - if (mg_constrained_dofs != 0 && mg_constrained_dofs->have_boundary_indices()) + if (this->mg_constrained_dofs != 0 && + this->mg_constrained_dofs->have_boundary_indices()) for (unsigned int j=0; jis_boundary_index(level, dof_indices_parent[j])) + if (this->mg_constrained_dofs->is_boundary_index(level, dof_indices_parent[j])) for (unsigned int i=0; i::build_matrices prolongation_matrices[level]->compress(VectorOperation::insert); } - fill_and_communicate_copy_indices(mg_dof); -} - -namespace -{ - /** - * Internal data structure that is used in the MPI communication in fill_and_communicate_copy_indices(). - * It represents an entry in the copy_indices* map, that associates a level dof index with a global dof index. - */ - struct DoFPair - { - unsigned int level; - types::global_dof_index global_dof_index; - types::global_dof_index level_dof_index; - - DoFPair(const unsigned int level, - const types::global_dof_index global_dof_index, - const types::global_dof_index level_dof_index) - : - level(level), global_dof_index(global_dof_index), level_dof_index(level_dof_index) - {} - - DoFPair() - {} - }; + this->fill_and_communicate_copy_indices(mg_dof); } -template -template -void -MGTransferPrebuilt::fill_and_communicate_copy_indices -(const DoFHandler &mg_dof) -{ - // Now we are filling the variables copy_indices*, which are essentially - // maps from global to mgdof for each level stored as a std::vector of - // pairs. We need to split this map on each level depending on the ownership - // of the global and mgdof, so that we later not access non-local elements - // in copy_to/from_mg. - // We keep track in the bitfield dof_touched which global dof has - // been processed already (on the current level). This is the same as - // the multigrid running in serial. - - // map cpu_index -> vector of data - // that will be copied into copy_indices_level_mine - std::vector send_data_temp; - - const unsigned int n_levels = mg_dof.get_triangulation().n_global_levels(); - copy_indices.resize(n_levels); - copy_indices_global_mine.resize(n_levels); - copy_indices_level_mine.resize(n_levels); - IndexSet globally_relevant; - DoFTools::extract_locally_relevant_dofs(mg_dof, globally_relevant); - - const unsigned int dofs_per_cell = mg_dof.get_fe().dofs_per_cell; - std::vector global_dof_indices (dofs_per_cell); - std::vector level_dof_indices (dofs_per_cell); - - for (unsigned int level=0; level dof_touched(globally_relevant.n_elements(), false); - copy_indices[level].clear(); - copy_indices_level_mine[level].clear(); - copy_indices_global_mine[level].clear(); - - typename DoFHandler::active_cell_iterator - level_cell = mg_dof.begin_active(level); - const typename DoFHandler::active_cell_iterator - level_end = mg_dof.end_active(level); - - for (; level_cell!=level_end; ++level_cell) - { - if (mg_dof.get_triangulation().locally_owned_subdomain()!=numbers::invalid_subdomain_id - && (level_cell->level_subdomain_id()==numbers::artificial_subdomain_id - || level_cell->subdomain_id()==numbers::artificial_subdomain_id) - ) - continue; - - // get the dof numbers of this cell for the global and the level-wise - // numbering - level_cell->get_dof_indices (global_dof_indices); - level_cell->get_mg_dof_indices (level_dof_indices); - - for (unsigned int i=0; iat_refinement_edge(level, level_dof_indices[i])) - continue; - types::global_dof_index global_idx = globally_relevant.index_within_set(global_dof_indices[i]); - //skip if we did this global dof already (on this or a coarser level) - if (dof_touched[global_idx]) - continue; - bool global_mine = mg_dof.locally_owned_dofs().is_element(global_dof_indices[i]); - bool level_mine = mg_dof.locally_owned_mg_dofs(level).is_element(level_dof_indices[i]); - - - if (global_mine && level_mine) - { - copy_indices[level].push_back( - std::make_pair (global_dof_indices[i], level_dof_indices[i])); - } - else if (global_mine) - { - copy_indices_global_mine[level].push_back( - std::make_pair (global_dof_indices[i], level_dof_indices[i])); - - //send this to the owner of the level_dof: - send_data_temp.push_back(DoFPair(level, global_dof_indices[i], level_dof_indices[i])); - } - else - { - // somebody will send those to me - } - - dof_touched[global_idx] = true; - } - } - } - - const dealii::parallel::distributed::Triangulation *tria = - (dynamic_cast*> - (&mg_dof.get_triangulation())); - AssertThrow(send_data_temp.size()==0 || tria!=NULL, ExcMessage("parallel Multigrid only works with a distributed Triangulation!")); - -#ifdef DEAL_II_WITH_MPI - if (tria) - { - // TODO: Searching the owner for every single DoF becomes quite - // inefficient. Please fix this, Timo. - std::set neighbors = tria->level_ghost_owners(); - std::map > send_data; - - // * find owners of the level dofs and insert into send_data accordingly - for (typename std::vector::iterator dofpair=send_data_temp.begin(); dofpair != send_data_temp.end(); ++dofpair) - { - for (std::set::iterator it = neighbors.begin(); it != neighbors.end(); ++it) - { - if (mg_dof.locally_owned_mg_dofs_per_processor(dofpair->level)[*it].is_element(dofpair->level_dof_index)) - { - send_data[*it].push_back(*dofpair); - break; - } - } - } - - // * send - std::vector requests; - { - for (std::set::iterator it = neighbors.begin(); it != neighbors.end(); ++it) - { - requests.push_back(MPI_Request()); - unsigned int dest = *it; - std::vector &data = send_data[dest]; - if (data.size()) - MPI_Isend(&data[0], data.size()*sizeof(data[0]), MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin()); - else - MPI_Isend(NULL, 0, MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin()); - } - } - - // * receive - { - std::vector receive_buffer; - for (unsigned int counter=0; counterget_communicator(), &status); - MPI_Get_count(&status, MPI_BYTE, &len); - - if (len==0) - { - int err = MPI_Recv(NULL, 0, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, - tria->get_communicator(), &status); - AssertThrow(err==MPI_SUCCESS, ExcInternalError()); - continue; - } - - int count = len / sizeof(DoFPair); - Assert(static_cast(count * sizeof(DoFPair)) == len, ExcInternalError()); - receive_buffer.resize(count); - void *ptr = &receive_buffer[0]; - int err = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, - tria->get_communicator(), &status); - AssertThrow(err==MPI_SUCCESS, ExcInternalError()); - - for (unsigned int i=0; i 0) - { - MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); - requests.clear(); - } - } -#endif - - // Sort the indices. This will produce more reliable debug output for regression texts - // and likely won't hurt performance even in release mode. - std::less > compare; - for (unsigned int level=0; level void @@ -483,29 +275,18 @@ MGTransferPrebuilt::print_matrices (std::ostream &os) const } } + + template -void -MGTransferPrebuilt::print_indices (std::ostream &os) const +std::size_t +MGTransferPrebuilt::memory_consumption () const { - for (unsigned int level = 0; level::memory_consumption(); + for (unsigned int i=0; imemory_consumption() + + prolongation_sparsities[i]->memory_consumption(); - for (unsigned int level = 0; level::build_matrices( - const DoFHandler &mg_dof); - } - -for (deal_II_dimension : DIMENSIONS; V1,V2 : DEAL_II_VEC_TEMPLATES; S1, S2 : REAL_SCALARS) - { - template void - MGTransferPrebuilt >::copy_to_mg ( - const DoFHandler&, MGLevelObject >&, const V2&) const; - template void - MGTransferPrebuilt >::copy_from_mg (const DoFHandler&, V2&, - const MGLevelObject >&) const; - template void - MGTransferPrebuilt >::copy_from_mg_add (const DoFHandler&, V2&, - const MGLevelObject >&) const; - } - -for (deal_II_dimension : DIMENSIONS; S2 : REAL_SCALARS) - { - template void - MGTransferPrebuilt >::copy_to_mg ( - const DoFHandler&, MGLevelObject >&, const parallel::distributed::Vector&) const; - template void - MGTransferPrebuilt >::copy_from_mg (const DoFHandler&, parallel::distributed::Vector&, - const MGLevelObject >&) const; - template void - MGTransferPrebuilt >::copy_from_mg_add (const DoFHandler&, parallel::distributed::Vector&, - const MGLevelObject >&) const; - } - -for(deal_II_dimension : DIMENSIONS) - { -#ifdef DEAL_II_WITH_TRILINOS - - template void - MGTransferPrebuilt::copy_to_mg ( - const DoFHandler&, MGLevelObject&, const TrilinosWrappers::MPI::Vector&) const; - template void - MGTransferPrebuilt::copy_from_mg (const DoFHandler&, TrilinosWrappers::MPI::Vector&, - const MGLevelObject&) const; - template void - MGTransferPrebuilt::copy_from_mg_add (const DoFHandler&, TrilinosWrappers::MPI::Vector&, - const MGLevelObject&) const; -#endif + const DoFHandler &mg_dof); } -- 2.39.5