* Fill the vector @p ghost_array according to the precomputed communication
* pattern with values from @p locally_owned_array.
*
+ * In the default case, only one object is communicated per entry
+ * (`n_components_templated == 1'). If you want to communicate more
+ * entries, you can increase the value of @p n_components_templated in the
+ * case that you know the size at compile time. If you want to set the
+ * size during runtime, you can set @p n_components. However,
+ * @p n_components_templated has to be set to `0` in this case. Either
+ * @p n_components_templated or @p n_components can be set.
+ *
* @pre The vectors only have to provide a method begin(), which allows
* to access their raw data.
*
* functions separately and hereby overlap communication and
* computation.
*/
- template <typename Number>
+ template <typename Number, unsigned int n_components_templated = 1>
void
export_to_ghosted_array(
const ArrayView<const Number> &locally_owned_array,
- const ArrayView<Number> &ghost_array) const;
+ const ArrayView<Number> &ghost_array,
+ const unsigned int n_components = 0) const;
/**
* Same as above but with an interface similar to
* @note Any value less than 10 is a valid value of
* @p communication_channel.
*/
- template <typename Number>
+ template <typename Number, unsigned int n_components_templated = 1>
void
export_to_ghosted_array(
const unsigned int communication_channel,
const ArrayView<const Number> &locally_owned_array,
const ArrayView<Number> &temporary_storage,
const ArrayView<Number> &ghost_array,
- std::vector<MPI_Request> &requests) const;
+ std::vector<MPI_Request> &requests,
+ const unsigned int n_components = 0) const;
/**
* Start update: Data is packed, non-blocking send and receives
* @note Any value less than 10 is a valid value of
* @p communication_channel.
*/
- template <typename Number>
+ template <typename Number, unsigned int n_components_templated = 1>
void
export_to_ghosted_array_start(
const unsigned int communication_channel,
const ArrayView<const Number> &locally_owned_array,
const ArrayView<Number> &temporary_storage,
- std::vector<MPI_Request> &requests) const;
+ std::vector<MPI_Request> &requests,
+ const unsigned int n_components = 0) const;
/**
* Finish update. The method waits until all data has been sent and
* @pre The required size of the vectors are the same as in the functions
* above.
*/
- template <typename Number>
+ template <typename Number, unsigned int n_components_templated = 1>
void
export_to_ghosted_array_finish(
const ArrayView<const Number> &temporary_storage,
const ArrayView<Number> &ghost_array,
- std::vector<MPI_Request> &requests) const;
+ std::vector<MPI_Request> &requests,
+ const unsigned int n_components = 0) const;
/**
* Similar to the above functions but for importing vector entries
{
namespace MPI
{
- template <typename Number>
+ template <typename Number, unsigned int n_components_templated>
void
NoncontiguousPartitioner::export_to_ghosted_array(
const ArrayView<const Number> &src,
- const ArrayView<Number> &dst) const
+ const ArrayView<Number> &dst,
+ const unsigned int n_components) const
{
+ Assert((n_components_templated != 0) != (n_components != 0),
+ ExcNotImplemented());
+
+ const unsigned int n_components_to_be_used =
+ (n_components_templated != 0) ? n_components_templated : n_components;
+
// allocate internal memory since needed
if (requests.size() != send_ranks.size() + recv_ranks.size())
requests.resize(send_ranks.size() + recv_ranks.size());
- if (this->buffers.size() != send_ptr.back() * sizeof(Number))
- this->buffers.resize(this->temporary_storage_size() * sizeof(Number));
+ if (this->buffers.size() !=
+ send_ptr.back() * sizeof(Number) * n_components_to_be_used)
+ this->buffers.resize(this->temporary_storage_size() * sizeof(Number) *
+ n_components_to_be_used);
// perform actual exchange
- this->template export_to_ghosted_array<Number>(
+ this->template export_to_ghosted_array<Number, n_components_templated>(
0,
src,
ArrayView<Number>(reinterpret_cast<Number *>(this->buffers.data()),
- send_ptr.back()),
+ send_ptr.back() * n_components_to_be_used),
dst,
- this->requests);
+ this->requests,
+ n_components);
}
- template <typename Number>
+ template <typename Number, unsigned int n_components_templated>
void
NoncontiguousPartitioner::export_to_ghosted_array(
const unsigned int communication_channel,
const ArrayView<const Number> &locally_owned_array,
const ArrayView<Number> &temporary_storage,
const ArrayView<Number> &ghost_array,
- std::vector<MPI_Request> &requests) const
+ std::vector<MPI_Request> &requests,
+ const unsigned int n_components) const
{
- this->template export_to_ghosted_array_start<Number>(
+ this->template export_to_ghosted_array_start<Number,
+ n_components_templated>(
communication_channel,
locally_owned_array,
temporary_storage,
- requests);
- this->template export_to_ghosted_array_finish<Number>(temporary_storage,
- ghost_array,
- requests);
+ requests,
+ n_components);
+ this->template export_to_ghosted_array_finish<Number,
+ n_components_templated>(
+ temporary_storage, ghost_array, requests, n_components);
}
- template <typename Number>
+ template <typename Number, unsigned int n_components_templated>
void
NoncontiguousPartitioner::export_to_ghosted_array_start(
const unsigned int communication_channel,
const ArrayView<const Number> &src,
const ArrayView<Number> &buffers,
- std::vector<MPI_Request> &requests) const
+ std::vector<MPI_Request> &requests,
+ const unsigned int n_components) const
{
#ifndef DEAL_II_WITH_MPI
(void)communication_channel;
(void)src;
(void)buffers;
(void)requests;
+ (void)n_components;
Assert(false, ExcNeedsMPI());
#else
AssertDimension(requests.size(), recv_ranks.size() + send_ranks.size());
+ Assert((n_components_templated != 0) != (n_components != 0),
+ ExcNotImplemented());
+
+ const unsigned int n_components_to_be_used =
+ (n_components_templated != 0) ? n_components_templated : n_components;
+
const auto tag =
communication_channel +
internal::Tags::noncontiguous_partitioner_update_ghost_values_start;
for (types::global_dof_index i = 0; i < recv_ranks.size(); ++i)
{
const int ierr =
- MPI_Irecv(buffers.data() + recv_ptr[i],
- recv_ptr[i + 1] - recv_ptr[i],
+ MPI_Irecv(buffers.data() + recv_ptr[i] * n_components_to_be_used,
+ (recv_ptr[i + 1] - recv_ptr[i]) * n_components_to_be_used,
Utilities::MPI::mpi_type_id_for_type<Number>,
recv_ranks[i],
tag,
{
// collect data to be send
for (types::global_dof_index j = send_ptr[i]; j < send_ptr[i + 1];
- j++)
+ ++j, ++k)
{
AssertIndexRange(k, send_indices.size());
- buffers[j] = src[send_indices[k]];
- ++k;
+ for (unsigned int comp = 0; comp < n_components_to_be_used;
+ ++comp)
+ buffers[j * n_components_to_be_used + comp] =
+ src[send_indices[k] * n_components_to_be_used + comp];
}
// send data
- Assert((send_ptr[i] < buffers.size()) ||
- (send_ptr[i] == buffers.size() &&
- send_ptr[i + 1] == send_ptr[i]),
+ Assert((send_ptr[i] * n_components_to_be_used < buffers.size()) ||
+ (send_ptr[i] * n_components_to_be_used == buffers.size() &&
+ send_ptr[i + 1] * n_components_to_be_used ==
+ send_ptr[i] * n_components_to_be_used),
ExcMessage("The input buffer doesn't contain enough entries"));
const int ierr =
- MPI_Isend(buffers.data() + send_ptr[i],
- send_ptr[i + 1] - send_ptr[i],
+ MPI_Isend(buffers.data() + send_ptr[i] * n_components_to_be_used,
+ (send_ptr[i + 1] - send_ptr[i]) * n_components_to_be_used,
Utilities::MPI::mpi_type_id_for_type<Number>,
send_ranks[i],
tag,
- template <typename Number>
+ template <typename Number, unsigned int n_components_templated>
void
NoncontiguousPartitioner::export_to_ghosted_array_finish(
const ArrayView<const Number> &buffers,
const ArrayView<Number> &dst,
- std::vector<MPI_Request> &requests) const
+ std::vector<MPI_Request> &requests,
+ const unsigned int n_components) const
{
#ifndef DEAL_II_WITH_MPI
(void)buffers;
(void)dst;
(void)requests;
+ (void)n_components;
Assert(false, ExcNeedsMPI());
#else
+
+ Assert((n_components_templated != 0) != (n_components != 0),
+ ExcNotImplemented());
+
+ const unsigned int n_components_to_be_used =
+ (n_components_templated != 0) ? n_components_templated : n_components;
+
// receive all data packages and copy data from buffers
for (types::global_dof_index proc = 0; proc < recv_ranks.size(); ++proc)
{
AssertIndexRange(i + 1, recv_ptr.size());
for (types::global_dof_index j = recv_ptr[i], c = 0;
j < recv_ptr[i + 1];
- j++)
- dst[recv_indices[j]] = buffers[recv_ptr[i] + c++];
+ ++j, ++c)
+ for (unsigned int comp = 0; comp < n_components_to_be_used; ++comp)
+ dst[recv_indices[j] * n_components_to_be_used + comp] =
+ buffers[(recv_ptr[i] + c) * n_components_to_be_used + comp];
}
// wait that all data packages have been sent
--- /dev/null
+// ------------------------------------------------------------------------
+//
+// SPDX-License-Identifier: LGPL-2.1-or-later
+// Copyright (C) 2024 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// Part of the source code is dual licensed under Apache-2.0 WITH
+// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
+// governing the source code and code contributions can be found in
+// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
+//
+// ------------------------------------------------------------------------
+
+
+// Test Utilities::MPI::NoncontiguousPartitioner for non-contiguous index space
+// and multiple components.
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/mpi_noncontiguous_partitioner.h>
+#include <deal.II/base/mpi_noncontiguous_partitioner.templates.h>
+
+#include "../tests.h"
+
+
+void
+test(const MPI_Comm comm)
+{
+ const unsigned int n_components = 2;
+
+ IndexSet index_set_has(4);
+ IndexSet index_set_want(4);
+
+ if (Utilities::MPI::this_mpi_process(comm) == 0)
+ {
+ index_set_has.add_index(1);
+ index_set_want.add_index(2);
+ }
+ else
+ {
+ index_set_has.add_index(2);
+ index_set_want.add_index(1);
+ index_set_want.add_index(2);
+ }
+
+ Utilities::MPI::NoncontiguousPartitioner vector(index_set_has,
+ index_set_want,
+ comm);
+
+ AlignedVector<double> src(n_components * index_set_has.n_elements());
+ AlignedVector<double> dst(n_components * index_set_want.n_elements());
+
+ src[0] = Utilities::MPI::this_mpi_process(comm) * 100 + 1;
+ src[1] = Utilities::MPI::this_mpi_process(comm) * 100 + 2;
+
+ vector.export_to_ghosted_array<double, n_components>(
+ ArrayView<const double>(src.data(), src.size()),
+ ArrayView<double>(dst.data(), dst.size()));
+
+ for (size_t i = 0; i < src.size(); ++i)
+ deallog << static_cast<int>(src[i]) << ' ';
+ deallog << std::endl;
+ for (size_t i = 0; i < dst.size(); ++i)
+ deallog << static_cast<int>(dst[i]) << ' ';
+ deallog << std::endl;
+
+ dst.fill(0.0);
+ vector.export_to_ghosted_array<double, 0>(
+ ArrayView<const double>(src.data(), src.size()),
+ ArrayView<double>(dst.data(), dst.size()),
+ n_components);
+
+ for (size_t i = 0; i < dst.size(); ++i)
+ deallog << static_cast<int>(dst[i]) << ' ';
+ deallog << std::endl;
+}
+
+int
+main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll all;
+
+ const MPI_Comm comm = MPI_COMM_WORLD;
+
+ {
+ deallog.push("all");
+ test(comm);
+ deallog.pop();
+ }
+}