From: Martin Kronbichler Date: Tue, 17 Oct 2017 16:43:36 +0000 (+0200) Subject: Add new MPI partitioner functionality. X-Git-Tag: v9.0.0-rc1~933^2~7 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=41716a377baaa7bda2f4b861a731ffdee0757ab9;p=dealii.git Add new MPI partitioner functionality. Support for embedding into larger ghost set. MPI import and export functions. --- diff --git a/include/deal.II/base/partitioner.h b/include/deal.II/base/partitioner.h index ba077be293..c38b32730e 100644 --- a/include/deal.II/base/partitioner.h +++ b/include/deal.II/base/partitioner.h @@ -17,11 +17,13 @@ #define dealii_partitioner_h #include -#include #include #include #include #include +#include +#include +#include #include #include @@ -113,8 +115,15 @@ namespace Utilities /** * Allows to set the ghost indices after the constructor has been * called. + * + * The optional parameter @p larger_ghost_index_set allows for defining + * an indirect addressing into a larger set of ghost indices. This setup + * is useful if a distributed vector is based on that larger ghost index + * set but only a tighter subset should be communicated according to the + * given index set. */ - void set_ghost_indices (const IndexSet &ghost_indices); + void set_ghost_indices (const IndexSet &ghost_indices, + const IndexSet &larger_ghost_index_set = IndexSet()); /** * Return the global size. @@ -189,6 +198,20 @@ namespace Utilities */ unsigned int n_ghost_indices() const; + /** + * In case the partitioner was built to define ghost indices as a subset + * of indices in a larger set of ghosts, this set returns the numbering + * in terms of ranges of that range. Similar structure as in an + * IndexSet, but tailored to be iterated over, and some indices may be + * duplicates. + * + * In case the partitioner did not take a second set of ghost indices + * into account, this subset is simply defined as the half-open interval + * local_size(),local_size().second+n_ghost_indices(). + */ + const std::vector > & + ghost_indices_within_larger_ghost_set() const; + /** * Return a list of processors (first entry) and the number of ghost degrees * of freedom owned by that processor (second entry). The sum of the @@ -227,6 +250,20 @@ namespace Utilities const std::vector > & import_targets() const; + /** + * Caches the number of chunks in the import indices per MPI rank. The + * length is import_indices_data.size()+1. + */ + const std::vector & + import_indices_chunks_by_rank() const; + + /** + * Caches the number of chunks in the ghost indices subsets per MPI + * rank. The length is ghost_indices_subset_data.size()+1. + */ + const std::vector & + ghost_indices_subset_chunks_by_rank() const; + /** * Check whether the given partitioner is compatible with the * partitioner used for this vector. Two partitioners are compatible if @@ -282,6 +319,65 @@ namespace Utilities */ bool ghost_indices_initialized() const; +#ifdef DEAL_II_WITH_MPI + /** + * Starts the exports of the data in a locally owned array to the range + * described by the ghost indices of this class. + * + * This functionality is used in + * LinearAlgebra::distributed::Vector::update_ghost_values(). + */ + template + void + export_to_ghosted_array_start(const unsigned int communication_channel, + const ArrayView &locally_owned_array, + const ArrayView &temporary_storage, + const ArrayView &ghost_array, + std::vector &requests) const; + + /** + * Starts the exports of the data in a locally owned array to the range + * described by the ghost indices of this class. + * + * This functionality is used in + * LinearAlgebra::distributed::Vector::update_ghost_values(). + */ + template + void + export_to_ghosted_array_finish(const ArrayView &ghost_array, + std::vector &requests) const; + + /** + * Imports the data on an array described by the + * ghost indices of this class into the locally owned array. The + * + * This functionality is used in + * LinearAlgebra::distributed::Vector::compress(). + */ + template + void + import_from_ghosted_array_start(const VectorOperation::values vector_operation, + const unsigned int communication_channel, + const ArrayView &ghost_array, + const ArrayView &temporary_storage, + std::vector &requests) const; + + /** + * Imports the data on an array described by the + * ghost indices of this class into the locally owned array. The + * + * This functionality is used in + * LinearAlgebra::distributed::Vector::compress(). + */ + template + void + import_from_ghosted_array_finish(const VectorOperation::values vector_operation, + const ArrayView &temporary_array, + const ArrayView &locally_owned_storage, + const ArrayView &ghost_array, + std::vector &requests) const; +#endif + /** * Compute the memory consumption of this structure. */ @@ -351,6 +447,31 @@ namespace Utilities */ std::vector > import_targets_data; + /** + * Caches the number of chunks in the import indices per MPI rank. The + * length is import_indices_data.size()+1. + */ + std::vector import_indices_chunks_by_rank_data; + + /** + * Caches the number of ghost indices in a larger set of indices given by + * the optional argument to set_ghost_indices(). + */ + unsigned int n_ghost_indices_in_larger_set; + + /** + * Caches the number of chunks in the import indices per MPI rank. The + * length is ghost_indices_subset_data.size()+1. + */ + std::vector ghost_indices_subset_chunks_by_rank_data; + + /** + * The set of indices that appear for an IndexSet that is a subset of a + * larger set. Similar structure as in an IndexSet within all ghost + * indices, but tailored to be iterated over. + */ + std::vector > ghost_indices_subset_data; + /** * The ID of the current processor in the MPI network */ @@ -488,6 +609,15 @@ namespace Utilities + inline + const std::vector > & + Partitioner::ghost_indices_within_larger_ghost_set() const + { + return ghost_indices_subset_data; + } + + + inline const std::vector > & Partitioner::ghost_targets() const @@ -523,6 +653,24 @@ namespace Utilities + inline + const std::vector & + Partitioner::import_indices_chunks_by_rank() const + { + return import_indices_chunks_by_rank_data; + } + + + + inline + const std::vector & + Partitioner::ghost_indices_subset_chunks_by_rank() const + { + return ghost_indices_subset_chunks_by_rank_data; + } + + + inline unsigned int Partitioner::this_mpi_process() const diff --git a/include/deal.II/base/partitioner.templates.h b/include/deal.II/base/partitioner.templates.h new file mode 100644 index 0000000000..49cf66cbc4 --- /dev/null +++ b/include/deal.II/base/partitioner.templates.h @@ -0,0 +1,439 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#ifndef dealii_partitioner_templates_h +#define dealii_partitioner_templates_h + +#include +#include +#include + + +DEAL_II_NAMESPACE_OPEN + +namespace Utilities +{ + namespace MPI + { + +#ifndef DOXYGEN + +#ifdef DEAL_II_WITH_MPI + + template + void + Partitioner::export_to_ghosted_array_start(const unsigned int communication_channel, + const ArrayView &locally_owned_array, + const ArrayView &temporary_storage, + const ArrayView &ghost_array, + std::vector &requests) const + { + AssertDimension(locally_owned_array.size(), local_size()); + AssertDimension(temporary_storage.size(), n_import_indices()); + Assert(ghost_array.size() == n_ghost_indices() || + ghost_array.size() == n_ghost_indices_in_larger_set, + ExcMessage(std::string("The size of the ghost index array (") + + + std::to_string(ghost_array.size()) + + + ") must either equal the number of ghost in the " + "partitioner (" + + + std::to_string(n_ghost_indices()) + + + ") or be equal in size to a more comprehensive index" + "set which contains " + + + std::to_string(n_ghost_indices_in_larger_set) + + + " elements for this partitioner.")); + + const unsigned int n_import_targets = import_targets_data.size(); + const unsigned int n_ghost_targets = ghost_targets_data.size(); + + Assert(requests.size() == 0, + ExcMessage("Another operation seems to still be running. " + "Call update_ghost_values_finish() first.")); + + // Need to send and receive the data. Use non-blocking communication, + // where it is usually less overhead to first initiate the receive and + // then actually send the data + requests.resize (n_import_targets+n_ghost_targets); + + // as a ghost array pointer, put the data at the end of the given ghost + // array in case we want to fill only a subset of the ghosts so that we + // can get move data to the right position in a forward loop in the + // _finish function. + AssertIndexRange(n_ghost_indices(), n_ghost_indices_in_larger_set+1); + Number *ghost_array_ptr = ghost_array.begin()+ + n_ghost_indices_in_larger_set- + n_ghost_indices(); + + for (unsigned int i=0; i >::const_iterator + my_imports = import_indices_data.begin()+import_indices_chunks_by_rank_data[i], + end_my_imports = import_indices_data.begin()+import_indices_chunks_by_rank_data[i+1]; + unsigned int index = 0; + for ( ; my_imports!= end_my_imports; ++my_imports) + for (unsigned int j=my_imports->first; jsecond; j++) + temp_array_ptr[index++] = locally_owned_array[j]; + AssertDimension(index, import_targets_data[i].second); + + // start the send operations + const int ierr = MPI_Isend (temp_array_ptr, + import_targets_data[i].second*sizeof(Number), + MPI_BYTE, + import_targets_data[i].first, + my_pid + communication_channel, + communicator, + &requests[n_ghost_targets+i]); + AssertThrowMPI (ierr); + temp_array_ptr += import_targets_data[i].second; + } + } + + + + template + void + Partitioner::export_to_ghosted_array_finish(const ArrayView &ghost_array, + std::vector &requests) const + { + Assert(ghost_array.size() == n_ghost_indices() || + ghost_array.size() == n_ghost_indices_in_larger_set, + ExcMessage(std::string("The size of the ghost index array (") + + + std::to_string(ghost_array.size()) + + + ") must either equal the number of ghost in the " + "partitioner (" + + + std::to_string(n_ghost_indices()) + + + ") or be equal in size to a more comprehensive index" + "set which contains " + + + std::to_string(n_ghost_indices_in_larger_set) + + + " elements for this partitioner.")); + + // wait for both sends and receives to complete, even though only + // receives are really necessary. this gives (much) better performance + AssertDimension (ghost_targets().size() + import_targets().size(), + requests.size()); + if (requests.size() > 0) + { + const int ierr = MPI_Waitall (requests.size(), + requests.data(), + MPI_STATUSES_IGNORE); + AssertThrowMPI (ierr); + } + requests.resize(0); + + // in case we only sent a subset of indices, we now need to move the data + // to the correct positions and delete the old content + if (n_ghost_indices_in_larger_set > n_ghost_indices() && + ghost_array.size() == n_ghost_indices_in_larger_set) + { + unsigned int offset = n_ghost_indices_in_larger_set - n_ghost_indices(); + // must copy ghost data into extended ghost array + for (std::vector >::const_iterator + my_ghosts = ghost_indices_subset_data.begin(); + my_ghosts != ghost_indices_subset_data.end(); + ++my_ghosts) + if (offset > my_ghosts->first) + for (unsigned int j=my_ghosts->first; jsecond; ++j, ++offset) + { + ghost_array[j] = ghost_array[offset]; + ghost_array[offset] = Number(); + } + else + Assert(offset == my_ghosts->first, ExcInternalError()); + } + } + + + + template + void + Partitioner::import_from_ghosted_array_start(const VectorOperation::values vector_operation, + const unsigned int communication_channel, + const ArrayView &ghost_array, + const ArrayView &temporary_storage, + std::vector &requests) const + { + AssertDimension(temporary_storage.size(), n_import_indices()); + Assert(ghost_array.size() == n_ghost_indices() || + ghost_array.size() == n_ghost_indices_in_larger_set, + ExcMessage(std::string("The size of the ghost index array (") + + + std::to_string(ghost_array.size()) + + + ") must either equal the number of ghost in the " + "partitioner (" + + + std::to_string(n_ghost_indices()) + + + ") or be equal in size to a more comprehensive index" + "set which contains " + + + std::to_string(n_ghost_indices_in_larger_set) + + + " elements for this partitioner.")); + + (void)vector_operation; + + // nothing to do for insert (only need to zero ghost entries in + // compress_finish()). in debug mode we want to check consistency of the + // inserted data, therefore the communication is still initialized. + // Having different code in debug and optimized mode is somewhat + // dangerous, but it really saves communication so do it anyway +#ifndef DEBUG + if (vector_operation == VectorOperation::insert) + return; +#endif + + // nothing to do when we neither have import + // nor ghost indices. + if (n_ghost_indices()==0 && n_import_indices()==0) + return; + + const unsigned int n_import_targets = import_targets_data.size(); + const unsigned int n_ghost_targets = ghost_targets_data.size(); + + Assert(requests.size() == 0, + ExcMessage("Another compress operation seems to still be running. " + "Call compress_finish() first.")); + + // Need to send and receive the data. Use non-blocking communication, + // where it is generally less overhead to first initiate the receive and + // then actually send the data + + // set channels in different range from update_ghost_values channels + const unsigned int channel = communication_channel + 401; + requests.resize (n_import_targets + n_ghost_targets); + + // initiate the receive operations + Number *temp_array_ptr = temporary_storage.begin(); + for (unsigned int i=0; i(import_targets_data[i].second)* + sizeof(Number) < + static_cast(std::numeric_limits::max()), + ExcMessage("Index overflow: Maximum message size in MPI is 2GB. " + "The number of ghost entries times the size of 'Number' " + "exceeds this value. This is not supported.")); + const int ierr = MPI_Irecv (temp_array_ptr, + import_targets_data[i].second*sizeof(Number), + MPI_BYTE, + import_targets_data[i].first, + import_targets_data[i].first + channel, + communicator, + &requests[i]); + AssertThrowMPI (ierr); + temp_array_ptr += import_targets_data[i].second; + } + + // initiate the send operations + + // in case we want to import only from a subset of the ghosts we want to + // move the data to send to the front of the array + AssertIndexRange(n_ghost_indices(), n_ghost_indices_in_larger_set+1); + Number *ghost_array_ptr = ghost_array.begin(); + for (unsigned int i=0; i n_ghost_indices() && + ghost_array.size() == n_ghost_indices_in_larger_set) + { + std::vector >::const_iterator + my_ghosts = ghost_indices_subset_data.begin()+ghost_indices_subset_chunks_by_rank_data[i], + end_my_ghosts = ghost_indices_subset_data.begin()+ghost_indices_subset_chunks_by_rank_data[i+1]; + unsigned int offset = 0; + for ( ; my_ghosts != end_my_ghosts; ++my_ghosts) + if (ghost_array_ptr + offset != ghost_array.begin() + my_ghosts->first) + for (unsigned int j=my_ghosts->first; jsecond; ++j, ++offset) + { + ghost_array_ptr[offset] = ghost_array[j]; + ghost_array[j] = Number(); + } + else + offset += my_ghosts->second - my_ghosts->first; + AssertDimension(offset, ghost_targets_data[i].second); + } + + AssertThrow (static_cast(ghost_targets_data[i].second)* + sizeof(Number) < + static_cast(std::numeric_limits::max()), + ExcMessage("Index overflow: Maximum message size in MPI is 2GB. " + "The number of ghost entries times the size of 'Number' " + "exceeds this value. This is not supported.")); + const int ierr = MPI_Isend (ghost_array_ptr, + ghost_targets_data[i].second*sizeof(Number), + MPI_BYTE, + ghost_targets_data[i].first, + this_mpi_process() + channel, + communicator, + &requests[n_import_targets+i]); + AssertThrowMPI (ierr); + + ghost_array_ptr += ghost_targets_data[i].second; + } + } + + + + template + void + Partitioner::import_from_ghosted_array_finish(const VectorOperation::values vector_operation, + const ArrayView &temporary_storage, + const ArrayView &locally_owned_array, + const ArrayView &ghost_array, + std::vector &requests) const + { + AssertDimension(locally_owned_array.size(), local_size()); + AssertDimension(temporary_storage.size(), n_import_indices()); + Assert(ghost_array.size() == n_ghost_indices() || + ghost_array.size() == n_ghost_indices_in_larger_set, + ExcMessage(std::string("The size of the ghost index array (") + + + std::to_string(ghost_array.size()) + + + ") must either equal the number of ghost in the " + "partitioner (" + + + std::to_string(n_ghost_indices()) + + + ") or be equal in size to a more comprehensive index" + "set which contains " + + + std::to_string(n_ghost_indices_in_larger_set) + + + " elements for this partitioner.")); + + // in optimized mode, no communication was started, so leave the + // function directly (and only clear ghosts) +#ifndef DEBUG + if (vector_operation == VectorOperation::insert) + { + Assert(requests.empty(), + ExcInternalError("Did not expect a non-empty communication " + "request when inserting. Check that the same " + "vector_operation argument was passed to " + "import_from_ghosted_array_start as is passed " + "to import_from_ghosted_array_finish.")); + std::memset(ghost_array.begin(), 0, sizeof(Number)*ghost_array.size()); + return; + } +#endif + + // nothing to do when we neither have import nor ghost indices. + if (n_ghost_indices()==0 && n_import_indices()==0) + return; + + const unsigned int n_import_targets = import_targets_data.size(); + const unsigned int n_ghost_targets = ghost_targets_data.size(); + + if (vector_operation != dealii::VectorOperation::insert) + AssertDimension (n_ghost_targets+n_import_targets, + requests.size()); + + // first wait for the receive to complete + if (requests.size() > 0 && n_import_targets > 0) + { + const int ierr = MPI_Waitall (n_import_targets, requests.data(), + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + + const Number *read_position = temporary_storage.begin(); + std::vector >::const_iterator + my_imports = import_indices_data.begin(); + + // If the operation is no insertion, add the imported data to the + // local values. For insert, nothing is done here (but in debug mode + // we assert that the specified value is either zero or matches with + // the ones already present + if (vector_operation != dealii::VectorOperation::insert) + for ( ; my_imports!=import_indices_data.end(); ++my_imports) + for (unsigned int j=my_imports->first; jsecond; j++) + locally_owned_array[j] += *read_position++; + else + for ( ; my_imports!=import_indices_data.end(); ++my_imports) + for (unsigned int j=my_imports->first; jsecond; + j++, read_position++) + // Below we use relatively large precision in units in the last place (ULP) as + // this Assert can be easily triggered in p::d::SolutionTransfer. + // The rationale is that during interpolation on two elements sharing + // the face, values on this face obtained from each side might + // be different due to additions being done in different order. + Assert(*read_position == Number() || + std::abs(locally_owned_array[j] - *read_position) <= + std::abs(locally_owned_array[j] + *read_position) * + 100000. * + std::numeric_limits::real_type>::epsilon(), + typename LinearAlgebra::distributed::Vector:: + ExcNonMatchingElements(*read_position, locally_owned_array[j], + my_pid)); + AssertDimension(read_position-temporary_storage.begin(), n_import_indices()); + } + + // wait for the send operations to complete + if (requests.size() > 0 && n_ghost_targets > 0) + { + const int ierr = MPI_Waitall (n_ghost_targets, + &requests[n_import_targets], + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } + else + AssertDimension (n_ghost_indices(), 0); + + std::memset(ghost_array.begin(), 0, sizeof(Number)*n_ghost_indices()); + + // clear the compress requests + requests.resize(0); + } + + +#endif // ifdef DEAL_II_WITH_MPI +#endif // ifndef DOXYGEN + + } // end of namespace MPI + +} // end of namespace Utilities + + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/source/base/CMakeLists.txt b/source/base/CMakeLists.txt index a525a152e6..39fef19053 100644 --- a/source/base/CMakeLists.txt +++ b/source/base/CMakeLists.txt @@ -100,6 +100,7 @@ SET(_inst function.inst.in function_time.inst.in mpi.inst.in + partitioner.inst.in polynomials_rannacher_turek.inst.in symmetric_tensor.inst.in tensor.inst.in diff --git a/source/base/partitioner.cc b/source/base/partitioner.cc index d4da3a5144..2e88679e56 100644 --- a/source/base/partitioner.cc +++ b/source/base/partitioner.cc @@ -14,6 +14,7 @@ // --------------------------------------------------------------------- #include +#include DEAL_II_NAMESPACE_OPEN @@ -139,7 +140,8 @@ namespace Utilities void - Partitioner::set_ghost_indices (const IndexSet &ghost_indices_in) + Partitioner::set_ghost_indices (const IndexSet &ghost_indices_in, + const IndexSet &larger_ghost_index_set) { // Set ghost indices from input. To be sure that no entries from the // locally owned range are present, subtract the locally owned indices @@ -322,25 +324,32 @@ namespace Utilities // transform import indices to local index space and compress // contiguous indices in form of ranges { - types::global_dof_index last_index = numbers::invalid_dof_index-1; + import_indices_chunks_by_rank_data.resize(import_targets_data.size()+1); + import_indices_chunks_by_rank_data[0] = 0; std::vector > compressed_import_indices; - for (unsigned int i=0; i= local_range_data.first && - expanded_import_indices[i] < local_range_data.second, - ExcIndexRange(expanded_import_indices[i], local_range_data.first, - local_range_data.second)); - types::global_dof_index new_index = (expanded_import_indices[i] - - local_range_data.first); - Assert(new_index= local_range_data.first && + expanded_import_indices[i] < local_range_data.second, + ExcIndexRange(expanded_import_indices[i], local_range_data.first, + local_range_data.second)); + types::global_dof_index new_index = (expanded_import_indices[i] - + local_range_data.first); + Assert(new_index expanded_numbering; + for (IndexSet::ElementIterator it=ghost_indices_data.begin(); + it != ghost_indices_data.end(); ++it) + { + Assert(larger_ghost_index_set.is_element(*it), + ExcMessage("The given larger ghost index set must contain" + "all indices in the actual index set.")); + expanded_numbering.push_back(larger_ghost_index_set.index_within_set(*it)); + } + + std::vector > ghost_indices_subset; + ghost_indices_subset_chunks_by_rank_data.resize(ghost_targets_data.size()+1); + ghost_indices_subset_chunks_by_rank_data[0] = 0; + unsigned int shift = 0; + for (unsigned int p=0; p(const unsigned int , + const ArrayView &, + const ArrayView &, + const ArrayView &, + std::vector &) const; + template void Utilities::MPI::Partitioner::export_to_ghosted_array_finish(const ArrayView &, + std::vector &) const; + template void Utilities::MPI::Partitioner::import_from_ghosted_array_start(const VectorOperation::values , + const unsigned int , + const ArrayView &, + const ArrayView &, + std::vector &) const; + template void Utilities::MPI::Partitioner::import_from_ghosted_array_finish(const VectorOperation::values , + const ArrayView &, + const ArrayView &, + const ArrayView &, + std::vector &) const; +} + + +for (SCALAR : COMPLEX_SCALARS) +{ + template void Utilities::MPI::Partitioner::export_to_ghosted_array_start(const unsigned int , + const ArrayView &, + const ArrayView &, + const ArrayView &, + std::vector &) const; + template void Utilities::MPI::Partitioner::export_to_ghosted_array_finish(const ArrayView &, + std::vector &) const; + template void Utilities::MPI::Partitioner::import_from_ghosted_array_start(const VectorOperation::values , + const unsigned int , + const ArrayView &, + const ArrayView &, + std::vector &) const; + template void Utilities::MPI::Partitioner::import_from_ghosted_array_finish(const VectorOperation::values , + const ArrayView &, + const ArrayView &, + const ArrayView &, + std::vector &) const; +}