*
* @author Peter Munch, 2020
*/
- template <typename Number = double>
class NoncontiguousPartitioner
: public dealii::LinearAlgebra::CommunicationPatternBase
{
/**
* Constructor. Set up point-to-point communication pattern based on the
- * IndexSets arguments @p indexset_has and @p indexset_want for the MPI
+ * IndexSets arguments @p indexset_locally_owned and @p indexset_ghost for the MPI
* communicator @p communicator.
*/
- NoncontiguousPartitioner(const IndexSet &indexset_has,
- const IndexSet &indexset_want,
+ NoncontiguousPartitioner(const IndexSet &indexset_locally_owned,
+ const IndexSet &indexset_ghost,
const MPI_Comm &communicator);
/**
- * Constructor. Same as above but for vectors of indices @p indices_has
- * and @p indices_want. This allows the indices to not be sorted and the
+ * Constructor. Same as above but for vectors of indices @p indices_locally_owned
+ * and @p indices_ghost. This allows the indices to not be sorted and the
* values are read and written automatically at the right position of
* the vector during update_values(), update_values_start(), and
* update_values_finish(). It is allowed to include entries with the
* exchange but are present in the data vectors as padding.
*/
NoncontiguousPartitioner(
- const std::vector<types::global_dof_index> &indices_has,
- const std::vector<types::global_dof_index> &indices_want,
+ const std::vector<types::global_dof_index> &indices_locally_owned,
+ const std::vector<types::global_dof_index> &indices_ghost,
const MPI_Comm & communicator);
/**
- * Fill the vector @p dst according to the precomputed communication
- * pattern with values from @p src.
+ * Fill the vector @p ghost_array according to the precomputed communication
+ * pattern with values from @p locally_owned_array.
*
* @pre The vectors only have to provide a method begin(), which allows
* to access their raw data.
*
+ * @pre The size of both vectors must be at least as large as the number
+ * of entries in the index sets passed to the constructors or the
+ * reinit() functions.
+ *
* @note This function calls the methods update_values_start() and
* update_values_finish() in sequence. Users can call these two
* functions separately and hereby overlap communication and
* computation.
*/
- template <typename VectorType>
+ template <typename Number>
+ void
+ export_to_ghosted_array(
+ const ArrayView<const Number> &locally_owned_array,
+ const ArrayView<Number> & ghost_array) const;
+
+ /**
+ * Same as above but with an interface similar to
+ * Utilities::MPI::Partitioner::export_to_ghosted_array_start and
+ * Utilities::MPI::Partitioner::export_to_ghosted_array_finish. In this
+ * function, the user can provide the temporary data structures to be
+ * used.
+ *
+ * @pre The size of the @p temporary_storage vector has to be at least
+ * as large as the sum of the number of entries in the index sets
+ * passed to the constructor and the reinit() functions. The reason
+ * for this is that this vector is used as buffer for both sending
+ * and receiving data.
+ */
+ template <typename Number>
void
- update_values(VectorType &dst, const VectorType &src) const;
+ export_to_ghosted_array(
+ const unsigned int communication_channel,
+ const ArrayView<const Number> &locally_owned_array,
+ const ArrayView<Number> & temporary_storage,
+ const ArrayView<Number> & ghost_array,
+ std::vector<MPI_Request> & requests) const;
/**
* Start update: Data is packed, non-blocking send and receives
* are started.
+ *
+ * @note In contrast to the function
+ * Utilities::MPI::Partitioner::export_to_ghosted_array_start, the user
+ * does not pass a reference to the destination vector, since the data
+ * is received into a designated part of the buffer @p temporary_storage. This
+ * allows for padding and other post-processing of the received data.
+ *
+ * @pre The required size of the vectors are the same as in the functions
+ * above.
*/
- template <typename VectorType>
+ template <typename Number>
void
- update_values_start(const VectorType &src, const unsigned int tag) const;
+ export_to_ghosted_array_start(
+ const unsigned int communication_channel,
+ const ArrayView<const Number> &locally_owned_array,
+ const ArrayView<Number> & temporary_storage,
+ std::vector<MPI_Request> & requests) const;
/**
* Finish update. The method waits until all data has been sent and
* received. Once data from any process is received it is processed and
* placed at the right position of the vector @p dst.
+ *
+ * @note In contrast to the function
+ * Utilities::MPI::Partitioner::export_to_ghosted_array_finish, the user
+ * also has to pass a reference to the buffer @p temporary_storage,
+ * since the data has been received into the buffer and not into the
+ * destination vector.
+ *
+ * @pre The required size of the vectors are the same as in the functions
+ * above.
*/
- template <typename VectorType>
+ template <typename Number>
void
- update_values_finish(VectorType &dst, const unsigned int tag) const;
+ export_to_ghosted_array_finish(
+ const ArrayView<const Number> &temporary_storage,
+ const ArrayView<Number> & ghost_array,
+ std::vector<MPI_Request> & requests) const;
/**
* Returns the number of processes this process sends data to and the
* Initialize the inner data structures.
*/
void
- reinit(const IndexSet &indexset_has,
- const IndexSet &indexset_want,
+ reinit(const IndexSet &indexset_locally_owned,
+ const IndexSet &indexset_ghost,
const MPI_Comm &communicator) override;
/**
* Initialize the inner data structures.
*/
void
- reinit(const std::vector<types::global_dof_index> &indices_has,
- const std::vector<types::global_dof_index> &indices_want,
+ reinit(const std::vector<types::global_dof_index> &indices_locally_owned,
+ const std::vector<types::global_dof_index> &indices_ghost,
const MPI_Comm & communicator);
private:
*/
std::vector<types::global_dof_index> send_indices;
- /**
- * Buffer containing the values sorted accoding to the ranks.
- */
- mutable std::vector<Number> send_buffers;
-
- /**
- * MPI requests for sending.
- */
- mutable std::vector<MPI_Request> send_requests;
-
/**
* The ranks this process receives data from.
*/
std::vector<types::global_dof_index> recv_indices;
/**
- * Buffer containing the values sorted by rank.
+ * Buffer containing the values sorted by rank for sending and receiving.
+ *
+ * @note Only allocated if not provided externally by user.
+ *
+ * @note At this place we do not know the type of the data to be sent. So
+ * we use an arbitrary type of size 1 byte. The type is cast to the
+ * requested type in the relevant functions.
*/
- mutable std::vector<Number> recv_buffers;
+ mutable std::vector<uint8_t> buffers;
/**
- * MPI requests for receiving.
+ * MPI requests for sending and receiving.
+ *
+ * @note Only allocated if not provided externally by user.
*/
- mutable std::vector<MPI_Request> recv_requests;
+ mutable std::vector<MPI_Request> requests;
};
} // namespace MPI
{
namespace MPI
{
- template <typename Number>
- NoncontiguousPartitioner<Number>::NoncontiguousPartitioner(
+ NoncontiguousPartitioner::NoncontiguousPartitioner(
const IndexSet &indexset_has,
const IndexSet &indexset_want,
const MPI_Comm &communicator)
- template <typename Number>
- NoncontiguousPartitioner<Number>::NoncontiguousPartitioner(
+ NoncontiguousPartitioner::NoncontiguousPartitioner(
const std::vector<types::global_dof_index> &indices_has,
const std::vector<types::global_dof_index> &indices_want,
const MPI_Comm & communicator)
- template <typename Number>
std::pair<unsigned int, unsigned int>
- NoncontiguousPartitioner<Number>::n_targets()
+ NoncontiguousPartitioner::n_targets()
{
return {send_ranks.size(), recv_ranks.size()};
}
- template <typename Number>
types::global_dof_index
- NoncontiguousPartitioner<Number>::memory_consumption()
+ NoncontiguousPartitioner::memory_consumption()
{
return MemoryConsumption::memory_consumption(send_ranks) +
MemoryConsumption::memory_consumption(send_ptr) +
MemoryConsumption::memory_consumption(send_indices) +
- MemoryConsumption::memory_consumption(send_buffers) +
- MemoryConsumption::memory_consumption(send_requests) +
MemoryConsumption::memory_consumption(recv_ranks) +
MemoryConsumption::memory_consumption(recv_ptr) +
MemoryConsumption::memory_consumption(recv_indices) +
- MemoryConsumption::memory_consumption(recv_buffers) +
- MemoryConsumption::memory_consumption(recv_requests);
+ MemoryConsumption::memory_consumption(buffers) +
+ MemoryConsumption::memory_consumption(requests);
}
- template <typename Number>
const MPI_Comm &
- NoncontiguousPartitioner<Number>::get_mpi_communicator() const
+ NoncontiguousPartitioner::get_mpi_communicator() const
{
return communicator;
}
- template <typename Number>
void
- NoncontiguousPartitioner<Number>::reinit(const IndexSet &indexset_has,
- const IndexSet &indexset_want,
- const MPI_Comm &communicator)
+ NoncontiguousPartitioner::reinit(const IndexSet &indexset_has,
+ const IndexSet &indexset_want,
+ const MPI_Comm &communicator)
{
this->communicator = communicator;
send_ranks.clear();
send_ptr.clear();
send_indices.clear();
- send_buffers.clear();
- send_requests.clear();
recv_ranks.clear();
recv_ptr.clear();
recv_indices.clear();
- recv_buffers.clear();
- recv_requests.clear();
+ buffers.clear();
+ requests.clear();
// setup communication pattern
std::vector<unsigned int> owning_ranks_of_ghosts(
recv_ptr.push_back(recv_indices.size());
}
-
- recv_buffers.resize(recv_indices.size());
- recv_requests.resize(recv_map.size());
}
{
const auto targets_with_indexset = process.get_requesters();
- send_ptr.push_back(send_indices.size() /*=0*/);
+ send_ptr.push_back(recv_ptr.back());
for (const auto &target_with_indexset : targets_with_indexset)
{
send_ranks.push_back(target_with_indexset.first);
for (const auto &cell_index : target_with_indexset.second)
send_indices.push_back(indexset_has.index_within_set(cell_index));
- send_ptr.push_back(send_indices.size());
+ send_ptr.push_back(send_indices.size() + recv_ptr.back());
}
-
- send_buffers.resize(send_indices.size());
- send_requests.resize(targets_with_indexset.size());
}
}
- template <typename Number>
void
- NoncontiguousPartitioner<Number>::reinit(
+ NoncontiguousPartitioner::reinit(
const std::vector<types::global_dof_index> &indices_has,
const std::vector<types::global_dof_index> &indices_want,
const MPI_Comm & communicator)
template <typename Number>
- template <typename VectorType>
void
- NoncontiguousPartitioner<Number>::update_values(VectorType & dst,
- const VectorType &src) const
+ NoncontiguousPartitioner::export_to_ghosted_array(
+ const ArrayView<const Number> &src,
+ const ArrayView<Number> & dst) const
{
- const auto tag = internal::Tags::noncontiguous_partitioner_update_values;
+ // allocate internal memory since needed
+ if (requests.size() != send_ranks.size() + recv_ranks.size())
+ requests.resize(send_ranks.size() + recv_ranks.size());
+
+ if (this->buffers.size() != send_ptr.back() * sizeof(Number))
+ this->buffers.resize(send_ptr.back() * sizeof(Number), 0);
+
+ // perform actual exchange
+ this->template export_to_ghosted_array<Number>(
+ 0,
+ src,
+ ArrayView<Number>(reinterpret_cast<Number *>(this->buffers.data()),
+ send_ptr.back()),
+ dst,
+ this->requests);
+ }
+
- this->update_values_start(src, tag);
- this->update_values_finish(dst, tag);
+ template <typename Number>
+ void
+ NoncontiguousPartitioner::export_to_ghosted_array(
+ const unsigned int communication_channel,
+ const ArrayView<const Number> &locally_owned_array,
+ const ArrayView<Number> & temporary_storage,
+ const ArrayView<Number> & ghost_array,
+ std::vector<MPI_Request> & requests) const
+ {
+ this->template export_to_ghosted_array_start<Number>(
+ communication_channel,
+ locally_owned_array,
+ temporary_storage,
+ requests);
+ this->template export_to_ghosted_array_finish<Number>(temporary_storage,
+ ghost_array,
+ requests);
}
template <typename Number>
- template <typename VectorType>
void
- NoncontiguousPartitioner<Number>::update_values_start(
- const VectorType & src,
- const unsigned int tag) const
+ NoncontiguousPartitioner::export_to_ghosted_array_start(
+ const unsigned int communication_channel,
+ const ArrayView<const Number> &src,
+ const ArrayView<Number> & buffers,
+ std::vector<MPI_Request> & requests) const
{
#ifndef DEAL_II_WITH_MPI
+ (void)communication_channel;
(void)src;
- (void)tag;
+ (void)buffers;
+ (void)requests;
Assert(false, ExcNeedsMPI());
#else
+ AssertIndexRange(communication_channel, 10);
+
+ const auto tag =
+ communication_channel +
+ internal::Tags::noncontiguous_partitioner_update_ghost_values;
+
// post recv
for (types::global_dof_index i = 0; i < recv_ranks.size(); i++)
{
- const auto ierr = MPI_Irecv(recv_buffers.data() + recv_ptr[i],
- recv_ptr[i + 1] - recv_ptr[i],
- Utilities::MPI::internal::mpi_type_id(
- recv_buffers.data()),
- recv_ranks[i],
- tag,
- communicator,
- &recv_requests[i]);
+ const auto ierr =
+ MPI_Irecv(buffers.data() + recv_ptr[i],
+ recv_ptr[i + 1] - recv_ptr[i],
+ Utilities::MPI::internal::mpi_type_id(buffers.data()),
+ recv_ranks[i],
+ tag,
+ communicator,
+ &requests[i + send_ranks.size()]);
AssertThrowMPI(ierr);
}
auto src_iterator = src.begin();
// post send
- for (types::global_dof_index i = 0; i < send_ranks.size(); i++)
+ for (types::global_dof_index i = 0, k = 0; i < send_ranks.size(); i++)
{
// collect data to be send
- for (types::global_dof_index j = send_ptr[i], c = 0;
- j < send_ptr[i + 1];
+ for (types::global_dof_index j = send_ptr[i]; j < send_ptr[i + 1];
j++)
- send_buffers[send_ptr[i] + c++] = src_iterator[send_indices[j]];
+ buffers[j] = src_iterator[send_indices[k++]];
// send data
- const auto ierr = MPI_Isend(send_buffers.data() + send_ptr[i],
- send_ptr[i + 1] - send_ptr[i],
- Utilities::MPI::internal::mpi_type_id(
- send_buffers.data()),
- send_ranks[i],
- tag,
- communicator,
- &send_requests[i]);
+ const auto ierr =
+ MPI_Isend(buffers.data() + send_ptr[i],
+ send_ptr[i + 1] - send_ptr[i],
+ Utilities::MPI::internal::mpi_type_id(buffers.data()),
+ send_ranks[i],
+ tag,
+ communicator,
+ &requests[i]);
AssertThrowMPI(ierr);
}
#endif
template <typename Number>
- template <typename VectorType>
void
- NoncontiguousPartitioner<Number>::update_values_finish(
- VectorType & dst,
- const unsigned int tag) const
+ NoncontiguousPartitioner::export_to_ghosted_array_finish(
+ const ArrayView<const Number> &buffers,
+ const ArrayView<Number> & dst,
+ std::vector<MPI_Request> & requests) const
{
- (void)tag;
-
#ifndef DEAL_II_WITH_MPI
+ (void)buffers;
(void)dst;
+ (void)requests;
Assert(false, ExcNeedsMPI());
#else
auto dst_iterator = dst.begin();
// receive all data packages and copy data from buffers
- for (types::global_dof_index proc = 0; proc < recv_requests.size();
- proc++)
+ for (types::global_dof_index proc = 0; proc < recv_ranks.size(); proc++)
{
int i;
MPI_Status status;
- const auto ierr = MPI_Waitany(recv_requests.size(),
- recv_requests.data(),
+ const auto ierr = MPI_Waitany(recv_ranks.size(),
+ requests.data() + send_ranks.size(),
&i,
&status);
AssertThrowMPI(ierr);
for (types::global_dof_index j = recv_ptr[i], c = 0;
j < recv_ptr[i + 1];
j++)
- dst_iterator[recv_indices[j]] = recv_buffers[recv_ptr[i] + c++];
+ dst_iterator[recv_indices[j]] = buffers[recv_ptr[i] + c++];
}
// wait that all data packages have been sent
- const auto ierr = MPI_Waitall(send_requests.size(),
- send_requests.data(),
- MPI_STATUSES_IGNORE);
+ const auto ierr =
+ MPI_Waitall(send_ranks.size(), requests.data(), MPI_STATUSES_IGNORE);
AssertThrowMPI(ierr);
#endif
}
partitioner_export_end = partitioner_export_start + 200,
/// NoncontiguousPartitioner::update_values
- noncontiguous_partitioner_update_values,
+ noncontiguous_partitioner_update_ghost_values,
// Utilities::MPI::compute_union
compute_union,
\{
namespace MPI
\{
- template class NoncontiguousPartitioner<S>;
-
- template void
- NoncontiguousPartitioner<S>::update_values(
- std::vector<S> & dst,
- const std::vector<S> &src) const;
-
- template void
- NoncontiguousPartitioner<S>::update_values(
- AlignedVector<S> & dst,
- const AlignedVector<S> &src) const;
-
- template void
- NoncontiguousPartitioner<S>::update_values(
- ArrayView<S> & dst,
- const ArrayView<S> &src) const;
-
- template void
- NoncontiguousPartitioner<S>::update_values(
- LinearAlgebra::Vector<S> & dst,
- const LinearAlgebra::Vector<S> &src) const;
-
template void
- NoncontiguousPartitioner<S>::update_values(
- LinearAlgebra::distributed::Vector<S> & dst,
- const LinearAlgebra::distributed::Vector<S> &src) const;
+ NoncontiguousPartitioner::export_to_ghosted_array(
+ const ArrayView<const S> &src,
+ const ArrayView<S> & dst) const;
\}
\}
}
index_set_want.add_index(2);
}
- Utilities::MPI::NoncontiguousPartitioner<double> vector(index_set_has,
- index_set_want,
- comm);
+ Utilities::MPI::NoncontiguousPartitioner vector(index_set_has,
+ index_set_want,
+ comm);
AlignedVector<double> src(index_set_has.n_elements());
AlignedVector<double> dst(index_set_want.n_elements());
src[0] = Utilities::MPI::this_mpi_process(comm) * 100 + 1;
- vector.update_values(dst, src);
+ vector.export_to_ghosted_array(ArrayView<const double>(src.data(),
+ src.size()),
+ ArrayView<double>(dst.data(), dst.size()));
for (size_t i = 0; i < src.size(); i++)
deallog << static_cast<int>(src[i]) << " ";
if (do_revert)
std::reverse(indices_want.begin(), indices_want.end());
- Utilities::MPI::NoncontiguousPartitioner<double> vector(indices_has,
- indices_want,
- comm);
+ Utilities::MPI::NoncontiguousPartitioner vector(indices_has,
+ indices_want,
+ comm);
AlignedVector<double> src(indices_has.size());
for (unsigned int i = 0; i < indices_has.size(); i++)
AlignedVector<double> dst(indices_want.size());
- vector.update_values(dst, src);
+ vector.export_to_ghosted_array(ArrayView<const double>(src.data(),
+ src.size()),
+ ArrayView<double>(dst.data(), dst.size()));
for (size_t i = 0; i < src.size(); i++)
deallog << static_cast<int>(src[i]) << " ";
std::vector<types::global_dof_index> index_set_has,
std::vector<types::global_dof_index> index_set_want)
{
- Utilities::MPI::NoncontiguousPartitioner<double> vector;
+ Utilities::MPI::NoncontiguousPartitioner vector;
vector.reinit(index_set_has, index_set_want, comm);
AlignedVector<double> src(index_set_has.size(), 0);
for (unsigned int i = 0; i < index_set_has.size(); i++)
src[i] = Utilities::MPI::this_mpi_process(comm) * 100 + i;
- vector.update_values(dst, src);
+ vector.export_to_ghosted_array(ArrayView<const double>(src.data(),
+ src.size()),
+ ArrayView<double>(dst.data(), dst.size()));
for (size_t i = 0; i < src.size(); i++)
deallog << static_cast<int>(src[i]) << " ";