#define dealii_partitioner_h
#include <deal.II/base/config.h>
-#include <deal.II/base/index_set.h>
#include <deal.II/base/mpi.h>
#include <deal.II/base/types.h>
#include <deal.II/base/utilities.h>
#include <deal.II/base/memory_consumption.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/array_view.h>
+#include <deal.II/lac/vector.h>
#include <deal.II/lac/communication_pattern_base.h>
#include <limits>
/**
* Allows to set the ghost indices after the constructor has been
* called.
+ *
+ * The optional parameter @p larger_ghost_index_set allows for defining
+ * an indirect addressing into a larger set of ghost indices. This setup
+ * is useful if a distributed vector is based on that larger ghost index
+ * set but only a tighter subset should be communicated according to the
+ * given index set.
*/
- void set_ghost_indices (const IndexSet &ghost_indices);
+ void set_ghost_indices (const IndexSet &ghost_indices,
+ const IndexSet &larger_ghost_index_set = IndexSet());
/**
* Return the global size.
*/
unsigned int n_ghost_indices() const;
+ /**
+ * In case the partitioner was built to define ghost indices as a subset
+ * of indices in a larger set of ghosts, this set returns the numbering
+ * in terms of ranges of that range. Similar structure as in an
+ * IndexSet, but tailored to be iterated over, and some indices may be
+ * duplicates.
+ *
+ * In case the partitioner did not take a second set of ghost indices
+ * into account, this subset is simply defined as the half-open interval
+ * <code>local_size(),local_size().second+n_ghost_indices()</code>.
+ */
+ const std::vector<std::pair<unsigned int, unsigned int> > &
+ ghost_indices_within_larger_ghost_set() const;
+
/**
* Return a list of processors (first entry) and the number of ghost degrees
* of freedom owned by that processor (second entry). The sum of the
const std::vector<std::pair<unsigned int, unsigned int> > &
import_targets() const;
+ /**
+ * Caches the number of chunks in the import indices per MPI rank. The
+ * length is import_indices_data.size()+1.
+ */
+ const std::vector<unsigned int> &
+ import_indices_chunks_by_rank() const;
+
+ /**
+ * Caches the number of chunks in the ghost indices subsets per MPI
+ * rank. The length is ghost_indices_subset_data.size()+1.
+ */
+ const std::vector<unsigned int> &
+ ghost_indices_subset_chunks_by_rank() const;
+
/**
* Check whether the given partitioner is compatible with the
* partitioner used for this vector. Two partitioners are compatible if
*/
bool ghost_indices_initialized() const;
+#ifdef DEAL_II_WITH_MPI
+ /**
+ * Starts the exports of the data in a locally owned array to the range
+ * described by the ghost indices of this class.
+ *
+ * This functionality is used in
+ * LinearAlgebra::distributed::Vector::update_ghost_values().
+ */
+ template <typename Number>
+ void
+ export_to_ghosted_array_start(const unsigned int communication_channel,
+ const ArrayView<const Number> &locally_owned_array,
+ const ArrayView<Number> &temporary_storage,
+ const ArrayView<Number> &ghost_array,
+ std::vector<MPI_Request> &requests) const;
+
+ /**
+ * Starts the exports of the data in a locally owned array to the range
+ * described by the ghost indices of this class.
+ *
+ * This functionality is used in
+ * LinearAlgebra::distributed::Vector::update_ghost_values().
+ */
+ template <typename Number>
+ void
+ export_to_ghosted_array_finish(const ArrayView<Number> &ghost_array,
+ std::vector<MPI_Request> &requests) const;
+
+ /**
+ * Imports the data on an array described by the
+ * ghost indices of this class into the locally owned array. The
+ *
+ * This functionality is used in
+ * LinearAlgebra::distributed::Vector::compress().
+ */
+ template <typename Number>
+ void
+ import_from_ghosted_array_start(const VectorOperation::values vector_operation,
+ const unsigned int communication_channel,
+ const ArrayView<Number> &ghost_array,
+ const ArrayView<Number> &temporary_storage,
+ std::vector<MPI_Request> &requests) const;
+
+ /**
+ * Imports the data on an array described by the
+ * ghost indices of this class into the locally owned array. The
+ *
+ * This functionality is used in
+ * LinearAlgebra::distributed::Vector::compress().
+ */
+ template <typename Number>
+ void
+ import_from_ghosted_array_finish(const VectorOperation::values vector_operation,
+ const ArrayView<const Number> &temporary_array,
+ const ArrayView<Number> &locally_owned_storage,
+ const ArrayView<Number> &ghost_array,
+ std::vector<MPI_Request> &requests) const;
+#endif
+
/**
* Compute the memory consumption of this structure.
*/
*/
std::vector<std::pair<unsigned int, unsigned int> > import_targets_data;
+ /**
+ * Caches the number of chunks in the import indices per MPI rank. The
+ * length is import_indices_data.size()+1.
+ */
+ std::vector<unsigned int> import_indices_chunks_by_rank_data;
+
+ /**
+ * Caches the number of ghost indices in a larger set of indices given by
+ * the optional argument to set_ghost_indices().
+ */
+ unsigned int n_ghost_indices_in_larger_set;
+
+ /**
+ * Caches the number of chunks in the import indices per MPI rank. The
+ * length is ghost_indices_subset_data.size()+1.
+ */
+ std::vector<unsigned int> ghost_indices_subset_chunks_by_rank_data;
+
+ /**
+ * The set of indices that appear for an IndexSet that is a subset of a
+ * larger set. Similar structure as in an IndexSet within all ghost
+ * indices, but tailored to be iterated over.
+ */
+ std::vector<std::pair<unsigned int, unsigned int> > ghost_indices_subset_data;
+
/**
* The ID of the current processor in the MPI network
*/
+ inline
+ const std::vector<std::pair<unsigned int, unsigned int> > &
+ Partitioner::ghost_indices_within_larger_ghost_set() const
+ {
+ return ghost_indices_subset_data;
+ }
+
+
+
inline
const std::vector<std::pair<unsigned int, unsigned int> > &
Partitioner::ghost_targets() const
+ inline
+ const std::vector<unsigned int> &
+ Partitioner::import_indices_chunks_by_rank() const
+ {
+ return import_indices_chunks_by_rank_data;
+ }
+
+
+
+ inline
+ const std::vector<unsigned int> &
+ Partitioner::ghost_indices_subset_chunks_by_rank() const
+ {
+ return ghost_indices_subset_chunks_by_rank_data;
+ }
+
+
+
inline
unsigned int
Partitioner::this_mpi_process() const
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_partitioner_templates_h
+#define dealii_partitioner_templates_h
+
+#include <deal.II/base/config.h>
+#include <deal.II/base/partitioner.h>
+#include <deal.II/lac/la_parallel_vector.h>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace Utilities
+{
+ namespace MPI
+ {
+
+#ifndef DOXYGEN
+
+#ifdef DEAL_II_WITH_MPI
+
+ template <typename Number>
+ void
+ Partitioner::export_to_ghosted_array_start(const unsigned int communication_channel,
+ const ArrayView<const Number> &locally_owned_array,
+ const ArrayView<Number> &temporary_storage,
+ const ArrayView<Number> &ghost_array,
+ std::vector<MPI_Request> &requests) const
+ {
+ AssertDimension(locally_owned_array.size(), local_size());
+ AssertDimension(temporary_storage.size(), n_import_indices());
+ Assert(ghost_array.size() == n_ghost_indices() ||
+ ghost_array.size() == n_ghost_indices_in_larger_set,
+ ExcMessage(std::string("The size of the ghost index array (")
+ +
+ std::to_string(ghost_array.size())
+ +
+ ") must either equal the number of ghost in the "
+ "partitioner ("
+ +
+ std::to_string(n_ghost_indices())
+ +
+ ") or be equal in size to a more comprehensive index"
+ "set which contains "
+ +
+ std::to_string(n_ghost_indices_in_larger_set)
+ +
+ " elements for this partitioner."));
+
+ const unsigned int n_import_targets = import_targets_data.size();
+ const unsigned int n_ghost_targets = ghost_targets_data.size();
+
+ Assert(requests.size() == 0,
+ ExcMessage("Another operation seems to still be running. "
+ "Call update_ghost_values_finish() first."));
+
+ // Need to send and receive the data. Use non-blocking communication,
+ // where it is usually less overhead to first initiate the receive and
+ // then actually send the data
+ requests.resize (n_import_targets+n_ghost_targets);
+
+ // as a ghost array pointer, put the data at the end of the given ghost
+ // array in case we want to fill only a subset of the ghosts so that we
+ // can get move data to the right position in a forward loop in the
+ // _finish function.
+ AssertIndexRange(n_ghost_indices(), n_ghost_indices_in_larger_set+1);
+ Number *ghost_array_ptr = ghost_array.begin()+
+ n_ghost_indices_in_larger_set-
+ n_ghost_indices();
+
+ for (unsigned int i=0; i<n_ghost_targets; i++)
+ {
+ // allow writing into ghost indices even though we are in a
+ // const function
+ const int ierr = MPI_Irecv (ghost_array_ptr,
+ ghost_targets_data[i].second*sizeof(Number),
+ MPI_BYTE,
+ ghost_targets_data[i].first,
+ ghost_targets_data[i].first + communication_channel,
+ communicator,
+ &requests[i]);
+ AssertThrowMPI (ierr);
+ ghost_array_ptr += ghost_targets()[i].second;
+ }
+
+ Number *temp_array_ptr = temporary_storage.begin();
+ for (unsigned int i=0; i<n_import_targets; i++)
+ {
+ // copy the data to be sent to the import_data field
+ std::vector<std::pair<unsigned int, unsigned int> >::const_iterator
+ my_imports = import_indices_data.begin()+import_indices_chunks_by_rank_data[i],
+ end_my_imports = import_indices_data.begin()+import_indices_chunks_by_rank_data[i+1];
+ unsigned int index = 0;
+ for ( ; my_imports!= end_my_imports; ++my_imports)
+ for (unsigned int j=my_imports->first; j<my_imports->second; j++)
+ temp_array_ptr[index++] = locally_owned_array[j];
+ AssertDimension(index, import_targets_data[i].second);
+
+ // start the send operations
+ const int ierr = MPI_Isend (temp_array_ptr,
+ import_targets_data[i].second*sizeof(Number),
+ MPI_BYTE,
+ import_targets_data[i].first,
+ my_pid + communication_channel,
+ communicator,
+ &requests[n_ghost_targets+i]);
+ AssertThrowMPI (ierr);
+ temp_array_ptr += import_targets_data[i].second;
+ }
+ }
+
+
+
+ template <typename Number>
+ void
+ Partitioner::export_to_ghosted_array_finish(const ArrayView<Number> &ghost_array,
+ std::vector<MPI_Request> &requests) const
+ {
+ Assert(ghost_array.size() == n_ghost_indices() ||
+ ghost_array.size() == n_ghost_indices_in_larger_set,
+ ExcMessage(std::string("The size of the ghost index array (")
+ +
+ std::to_string(ghost_array.size())
+ +
+ ") must either equal the number of ghost in the "
+ "partitioner ("
+ +
+ std::to_string(n_ghost_indices())
+ +
+ ") or be equal in size to a more comprehensive index"
+ "set which contains "
+ +
+ std::to_string(n_ghost_indices_in_larger_set)
+ +
+ " elements for this partitioner."));
+
+ // wait for both sends and receives to complete, even though only
+ // receives are really necessary. this gives (much) better performance
+ AssertDimension (ghost_targets().size() + import_targets().size(),
+ requests.size());
+ if (requests.size() > 0)
+ {
+ const int ierr = MPI_Waitall (requests.size(),
+ requests.data(),
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI (ierr);
+ }
+ requests.resize(0);
+
+ // in case we only sent a subset of indices, we now need to move the data
+ // to the correct positions and delete the old content
+ if (n_ghost_indices_in_larger_set > n_ghost_indices() &&
+ ghost_array.size() == n_ghost_indices_in_larger_set)
+ {
+ unsigned int offset = n_ghost_indices_in_larger_set - n_ghost_indices();
+ // must copy ghost data into extended ghost array
+ for (std::vector<std::pair<unsigned int, unsigned int> >::const_iterator
+ my_ghosts = ghost_indices_subset_data.begin();
+ my_ghosts != ghost_indices_subset_data.end();
+ ++my_ghosts)
+ if (offset > my_ghosts->first)
+ for (unsigned int j=my_ghosts->first; j<my_ghosts->second; ++j, ++offset)
+ {
+ ghost_array[j] = ghost_array[offset];
+ ghost_array[offset] = Number();
+ }
+ else
+ Assert(offset == my_ghosts->first, ExcInternalError());
+ }
+ }
+
+
+
+ template <typename Number>
+ void
+ Partitioner::import_from_ghosted_array_start(const VectorOperation::values vector_operation,
+ const unsigned int communication_channel,
+ const ArrayView<Number> &ghost_array,
+ const ArrayView<Number> &temporary_storage,
+ std::vector<MPI_Request> &requests) const
+ {
+ AssertDimension(temporary_storage.size(), n_import_indices());
+ Assert(ghost_array.size() == n_ghost_indices() ||
+ ghost_array.size() == n_ghost_indices_in_larger_set,
+ ExcMessage(std::string("The size of the ghost index array (")
+ +
+ std::to_string(ghost_array.size())
+ +
+ ") must either equal the number of ghost in the "
+ "partitioner ("
+ +
+ std::to_string(n_ghost_indices())
+ +
+ ") or be equal in size to a more comprehensive index"
+ "set which contains "
+ +
+ std::to_string(n_ghost_indices_in_larger_set)
+ +
+ " elements for this partitioner."));
+
+ (void)vector_operation;
+
+ // nothing to do for insert (only need to zero ghost entries in
+ // compress_finish()). in debug mode we want to check consistency of the
+ // inserted data, therefore the communication is still initialized.
+ // Having different code in debug and optimized mode is somewhat
+ // dangerous, but it really saves communication so do it anyway
+#ifndef DEBUG
+ if (vector_operation == VectorOperation::insert)
+ return;
+#endif
+
+ // nothing to do when we neither have import
+ // nor ghost indices.
+ if (n_ghost_indices()==0 && n_import_indices()==0)
+ return;
+
+ const unsigned int n_import_targets = import_targets_data.size();
+ const unsigned int n_ghost_targets = ghost_targets_data.size();
+
+ Assert(requests.size() == 0,
+ ExcMessage("Another compress operation seems to still be running. "
+ "Call compress_finish() first."));
+
+ // Need to send and receive the data. Use non-blocking communication,
+ // where it is generally less overhead to first initiate the receive and
+ // then actually send the data
+
+ // set channels in different range from update_ghost_values channels
+ const unsigned int channel = communication_channel + 401;
+ requests.resize (n_import_targets + n_ghost_targets);
+
+ // initiate the receive operations
+ Number *temp_array_ptr = temporary_storage.begin();
+ for (unsigned int i=0; i<n_import_targets; i++)
+ {
+ AssertThrow (static_cast<std::size_t>(import_targets_data[i].second)*
+ sizeof(Number) <
+ static_cast<std::size_t>(std::numeric_limits<int>::max()),
+ ExcMessage("Index overflow: Maximum message size in MPI is 2GB. "
+ "The number of ghost entries times the size of 'Number' "
+ "exceeds this value. This is not supported."));
+ const int ierr = MPI_Irecv (temp_array_ptr,
+ import_targets_data[i].second*sizeof(Number),
+ MPI_BYTE,
+ import_targets_data[i].first,
+ import_targets_data[i].first + channel,
+ communicator,
+ &requests[i]);
+ AssertThrowMPI (ierr);
+ temp_array_ptr += import_targets_data[i].second;
+ }
+
+ // initiate the send operations
+
+ // in case we want to import only from a subset of the ghosts we want to
+ // move the data to send to the front of the array
+ AssertIndexRange(n_ghost_indices(), n_ghost_indices_in_larger_set+1);
+ Number *ghost_array_ptr = ghost_array.begin();
+ for (unsigned int i=0; i<n_ghost_targets; i++)
+ {
+ // in case we only sent a subset of indices, we now need to move the data
+ // to the correct positions and delete the old content
+ if (n_ghost_indices_in_larger_set > n_ghost_indices() &&
+ ghost_array.size() == n_ghost_indices_in_larger_set)
+ {
+ std::vector<std::pair<unsigned int, unsigned int> >::const_iterator
+ my_ghosts = ghost_indices_subset_data.begin()+ghost_indices_subset_chunks_by_rank_data[i],
+ end_my_ghosts = ghost_indices_subset_data.begin()+ghost_indices_subset_chunks_by_rank_data[i+1];
+ unsigned int offset = 0;
+ for ( ; my_ghosts != end_my_ghosts; ++my_ghosts)
+ if (ghost_array_ptr + offset != ghost_array.begin() + my_ghosts->first)
+ for (unsigned int j=my_ghosts->first; j<my_ghosts->second; ++j, ++offset)
+ {
+ ghost_array_ptr[offset] = ghost_array[j];
+ ghost_array[j] = Number();
+ }
+ else
+ offset += my_ghosts->second - my_ghosts->first;
+ AssertDimension(offset, ghost_targets_data[i].second);
+ }
+
+ AssertThrow (static_cast<std::size_t>(ghost_targets_data[i].second)*
+ sizeof(Number) <
+ static_cast<std::size_t>(std::numeric_limits<int>::max()),
+ ExcMessage("Index overflow: Maximum message size in MPI is 2GB. "
+ "The number of ghost entries times the size of 'Number' "
+ "exceeds this value. This is not supported."));
+ const int ierr = MPI_Isend (ghost_array_ptr,
+ ghost_targets_data[i].second*sizeof(Number),
+ MPI_BYTE,
+ ghost_targets_data[i].first,
+ this_mpi_process() + channel,
+ communicator,
+ &requests[n_import_targets+i]);
+ AssertThrowMPI (ierr);
+
+ ghost_array_ptr += ghost_targets_data[i].second;
+ }
+ }
+
+
+
+ template <typename Number>
+ void
+ Partitioner::import_from_ghosted_array_finish(const VectorOperation::values vector_operation,
+ const ArrayView<const Number> &temporary_storage,
+ const ArrayView<Number> &locally_owned_array,
+ const ArrayView<Number> &ghost_array,
+ std::vector<MPI_Request> &requests) const
+ {
+ AssertDimension(locally_owned_array.size(), local_size());
+ AssertDimension(temporary_storage.size(), n_import_indices());
+ Assert(ghost_array.size() == n_ghost_indices() ||
+ ghost_array.size() == n_ghost_indices_in_larger_set,
+ ExcMessage(std::string("The size of the ghost index array (")
+ +
+ std::to_string(ghost_array.size())
+ +
+ ") must either equal the number of ghost in the "
+ "partitioner ("
+ +
+ std::to_string(n_ghost_indices())
+ +
+ ") or be equal in size to a more comprehensive index"
+ "set which contains "
+ +
+ std::to_string(n_ghost_indices_in_larger_set)
+ +
+ " elements for this partitioner."));
+
+ // in optimized mode, no communication was started, so leave the
+ // function directly (and only clear ghosts)
+#ifndef DEBUG
+ if (vector_operation == VectorOperation::insert)
+ {
+ Assert(requests.empty(),
+ ExcInternalError("Did not expect a non-empty communication "
+ "request when inserting. Check that the same "
+ "vector_operation argument was passed to "
+ "import_from_ghosted_array_start as is passed "
+ "to import_from_ghosted_array_finish."));
+ std::memset(ghost_array.begin(), 0, sizeof(Number)*ghost_array.size());
+ return;
+ }
+#endif
+
+ // nothing to do when we neither have import nor ghost indices.
+ if (n_ghost_indices()==0 && n_import_indices()==0)
+ return;
+
+ const unsigned int n_import_targets = import_targets_data.size();
+ const unsigned int n_ghost_targets = ghost_targets_data.size();
+
+ if (vector_operation != dealii::VectorOperation::insert)
+ AssertDimension (n_ghost_targets+n_import_targets,
+ requests.size());
+
+ // first wait for the receive to complete
+ if (requests.size() > 0 && n_import_targets > 0)
+ {
+ const int ierr = MPI_Waitall (n_import_targets, requests.data(),
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+
+ const Number *read_position = temporary_storage.begin();
+ std::vector<std::pair<unsigned int, unsigned int> >::const_iterator
+ my_imports = import_indices_data.begin();
+
+ // If the operation is no insertion, add the imported data to the
+ // local values. For insert, nothing is done here (but in debug mode
+ // we assert that the specified value is either zero or matches with
+ // the ones already present
+ if (vector_operation != dealii::VectorOperation::insert)
+ for ( ; my_imports!=import_indices_data.end(); ++my_imports)
+ for (unsigned int j=my_imports->first; j<my_imports->second; j++)
+ locally_owned_array[j] += *read_position++;
+ else
+ for ( ; my_imports!=import_indices_data.end(); ++my_imports)
+ for (unsigned int j=my_imports->first; j<my_imports->second;
+ j++, read_position++)
+ // Below we use relatively large precision in units in the last place (ULP) as
+ // this Assert can be easily triggered in p::d::SolutionTransfer.
+ // The rationale is that during interpolation on two elements sharing
+ // the face, values on this face obtained from each side might
+ // be different due to additions being done in different order.
+ Assert(*read_position == Number() ||
+ std::abs(locally_owned_array[j] - *read_position) <=
+ std::abs(locally_owned_array[j] + *read_position) *
+ 100000. *
+ std::numeric_limits<typename numbers::NumberTraits<Number>::real_type>::epsilon(),
+ typename LinearAlgebra::distributed::Vector<Number>::
+ ExcNonMatchingElements(*read_position, locally_owned_array[j],
+ my_pid));
+ AssertDimension(read_position-temporary_storage.begin(), n_import_indices());
+ }
+
+ // wait for the send operations to complete
+ if (requests.size() > 0 && n_ghost_targets > 0)
+ {
+ const int ierr = MPI_Waitall (n_ghost_targets,
+ &requests[n_import_targets],
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
+ else
+ AssertDimension (n_ghost_indices(), 0);
+
+ std::memset(ghost_array.begin(), 0, sizeof(Number)*n_ghost_indices());
+
+ // clear the compress requests
+ requests.resize(0);
+ }
+
+
+#endif // ifdef DEAL_II_WITH_MPI
+#endif // ifndef DOXYGEN
+
+ } // end of namespace MPI
+
+} // end of namespace Utilities
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
// ---------------------------------------------------------------------
#include <deal.II/base/partitioner.h>
+#include <deal.II/base/partitioner.templates.h>
DEAL_II_NAMESPACE_OPEN
void
- Partitioner::set_ghost_indices (const IndexSet &ghost_indices_in)
+ Partitioner::set_ghost_indices (const IndexSet &ghost_indices_in,
+ const IndexSet &larger_ghost_index_set)
{
// Set ghost indices from input. To be sure that no entries from the
// locally owned range are present, subtract the locally owned indices
// transform import indices to local index space and compress
// contiguous indices in form of ranges
{
- types::global_dof_index last_index = numbers::invalid_dof_index-1;
+ import_indices_chunks_by_rank_data.resize(import_targets_data.size()+1);
+ import_indices_chunks_by_rank_data[0] = 0;
std::vector<std::pair<unsigned int,unsigned int> > compressed_import_indices;
- for (unsigned int i=0; i<n_import_indices_data; i++)
+ unsigned int shift = 0;
+ for (unsigned int p=0; p<import_targets_data.size(); ++p)
{
- Assert (expanded_import_indices[i] >= local_range_data.first &&
- expanded_import_indices[i] < local_range_data.second,
- ExcIndexRange(expanded_import_indices[i], local_range_data.first,
- local_range_data.second));
- types::global_dof_index new_index = (expanded_import_indices[i] -
- local_range_data.first);
- Assert(new_index<numbers::invalid_unsigned_int,
- ExcNotImplemented());
- if (new_index == last_index+1)
- compressed_import_indices.back().second++;
- else
+ types::global_dof_index last_index = numbers::invalid_dof_index-1;
+ for (unsigned int ii=0; ii<import_targets_data[p].second; ++ii)
{
- compressed_import_indices.emplace_back (new_index,new_index+1);
+ const unsigned int i = shift + ii;
+ Assert (expanded_import_indices[i] >= local_range_data.first &&
+ expanded_import_indices[i] < local_range_data.second,
+ ExcIndexRange(expanded_import_indices[i], local_range_data.first,
+ local_range_data.second));
+ types::global_dof_index new_index = (expanded_import_indices[i] -
+ local_range_data.first);
+ Assert(new_index<numbers::invalid_unsigned_int,
+ ExcNotImplemented());
+ if (new_index == last_index+1)
+ compressed_import_indices.back().second++;
+ else
+ compressed_import_indices.emplace_back (new_index,new_index+1);
+ last_index = new_index;
}
- last_index = new_index;
+ shift += import_targets_data[p].second;
+ import_indices_chunks_by_rank_data[p+1] = compressed_import_indices.size();
}
import_indices_data = compressed_import_indices;
#endif
}
}
-#endif
+#endif // #ifdef DEAL_II_WITH_MPI
+
+ if (larger_ghost_index_set.size() == 0)
+ {
+ ghost_indices_subset_chunks_by_rank_data.clear();
+ ghost_indices_subset_data.push_back(std::make_pair(local_size(),
+ local_size()+n_ghost_indices()));
+ n_ghost_indices_in_larger_set = n_ghost_indices_data;
+ }
+ else
+ {
+ AssertDimension(larger_ghost_index_set.size(), ghost_indices_data.size());
+ Assert((larger_ghost_index_set & locally_owned_range_data).n_elements()==0,
+ ExcMessage("Ghost index set should not overlap with owned set."));
+ Assert((larger_ghost_index_set & ghost_indices_data) == ghost_indices_data,
+ ExcMessage("Larger ghost index set must contain the tight "
+ "ghost index set."));
+
+ n_ghost_indices_in_larger_set = larger_ghost_index_set.n_elements();
+
+ std::vector<unsigned int> expanded_numbering;
+ for (IndexSet::ElementIterator it=ghost_indices_data.begin();
+ it != ghost_indices_data.end(); ++it)
+ {
+ Assert(larger_ghost_index_set.is_element(*it),
+ ExcMessage("The given larger ghost index set must contain"
+ "all indices in the actual index set."));
+ expanded_numbering.push_back(larger_ghost_index_set.index_within_set(*it));
+ }
+
+ std::vector<std::pair<unsigned int,unsigned int> > ghost_indices_subset;
+ ghost_indices_subset_chunks_by_rank_data.resize(ghost_targets_data.size()+1);
+ ghost_indices_subset_chunks_by_rank_data[0] = 0;
+ unsigned int shift = 0;
+ for (unsigned int p=0; p<ghost_targets_data.size(); ++p)
+ {
+ unsigned int last_index = numbers::invalid_unsigned_int-1;
+ for (unsigned int ii=0; ii<ghost_targets_data[p].second; ii++)
+ {
+ const unsigned int i = shift + ii;
+ if (expanded_numbering[i] == last_index+1)
+ ghost_indices_subset.back().second++;
+ else
+ ghost_indices_subset.emplace_back(expanded_numbering[i],
+ expanded_numbering[i]+1);
+ last_index = expanded_numbering[i];
+ }
+ shift += ghost_targets_data[p].second;
+ ghost_indices_subset_chunks_by_rank_data[p+1] = ghost_indices_subset.size();
+ }
+ ghost_indices_subset_data = ghost_indices_subset;
+ }
}
memory += MemoryConsumption::memory_consumption(ghost_targets_data);
memory += MemoryConsumption::memory_consumption(import_targets_data);
memory += MemoryConsumption::memory_consumption(import_indices_data);
+ memory += MemoryConsumption::memory_consumption(import_indices_chunks_by_rank_data);
+ memory += MemoryConsumption::memory_consumption(ghost_indices_subset_chunks_by_rank_data);
+ memory += MemoryConsumption::memory_consumption(ghost_indices_subset_data);
memory += MemoryConsumption::memory_consumption(ghost_indices_data);
return memory;
}
} // end of namespace Utilities
+
+// explicit instantiations from .templates.h file
+#include "partitioner.inst"
+
DEAL_II_NAMESPACE_CLOSE