const ArrayView<Number> & ghost_array,
std::vector<MPI_Request> & requests) const;
+ /**
+ * Similar to the above functions but for importing vector entries
+ * from @p ghost_array to @p locally_owned_storage.
+ *
+ * @note In contrast to the functions in
+ * Utilities::MPI::Partitioner, this function expects that
+ * locally_owned_storage is empty.
+ */
+ template <typename Number>
+ void
+ import_from_ghosted_array(
+ const VectorOperation::values vector_operation,
+ const ArrayView<Number> & ghost_array,
+ const ArrayView<Number> & locally_owned_storage) const;
+
+ /**
+ * Similar to the above function with the difference that
+ * users can provide temporaty arrays. This function calls
+ * import_from_ghosted_array_start() and
+ * import_from_ghosted_array_finish() in sequence.
+ */
+ template <typename Number>
+ void
+ import_from_ghosted_array(const VectorOperation::values vector_operation,
+ const unsigned int communication_channel,
+ const ArrayView<Number> & ghost_array,
+ const ArrayView<Number> & temporary_storage,
+ const ArrayView<Number> & locally_owned_storage,
+ std::vector<MPI_Request> &requests) const;
+
+ /**
+ * Start update for importig values: Data is packed, non-blocking send
+ * and receives are started.
+ */
+ template <typename Number>
+ void
+ import_from_ghosted_array_start(
+ const VectorOperation::values vector_operation,
+ const unsigned int communication_channel,
+ const ArrayView<Number> & ghost_array,
+ const ArrayView<Number> & temporary_storage,
+ std::vector<MPI_Request> & requests) const;
+
+ /**
+ * Finish update for importing values. The method waits until all data has
+ * been sent and received. Once data from any process is received it is
+ * processed and placed at the right position of the vector
+ * @p locally_owned_storage.
+ */
+ template <typename Number>
+ void
+ import_from_ghosted_array_finish(
+ const VectorOperation::values vector_operation,
+ const ArrayView<const Number> &temporary_storage,
+ const ArrayView<Number> & locally_owned_storage,
+ std::vector<MPI_Request> & requests) const;
+
/**
* Returns the number of processes this process sends data to and the
* number of processes this process receives data from.
#endif
}
+
+
+ template <typename Number>
+ void
+ NoncontiguousPartitioner::import_from_ghosted_array(
+ const VectorOperation::values vector_operation,
+ const ArrayView<Number> & src,
+ const ArrayView<Number> & dst) const
+ {
+ // allocate internal memory since needed
+ if (requests.size() != send_ranks.size() + recv_ranks.size())
+ requests.resize(send_ranks.size() + recv_ranks.size());
+
+ if (this->buffers.size() != send_ptr.back() * sizeof(Number))
+ this->buffers.resize(this->temporary_storage_size() * sizeof(Number));
+
+ // perform actual exchange
+ this->template import_from_ghosted_array<Number>(
+ vector_operation,
+ 0,
+ src,
+ ArrayView<Number>(reinterpret_cast<Number *>(this->buffers.data()),
+ send_ptr.back()),
+ dst,
+ this->requests);
+ }
+
+
+
+ template <typename Number>
+ void
+ NoncontiguousPartitioner::import_from_ghosted_array(
+ const VectorOperation::values vector_operation,
+ const unsigned int communication_channel,
+ const ArrayView<Number> & ghost_array,
+ const ArrayView<Number> & temporary_storage,
+ const ArrayView<Number> & locally_owned_array,
+ std::vector<MPI_Request> & requests) const
+ {
+ this->template import_from_ghosted_array_start<Number>(
+ vector_operation,
+ communication_channel,
+ ghost_array,
+ temporary_storage,
+ requests);
+ this->template import_from_ghosted_array_finish<Number>(
+ vector_operation, temporary_storage, locally_owned_array, requests);
+ }
+
+
+
+ template <typename Number>
+ void
+ NoncontiguousPartitioner::import_from_ghosted_array_start(
+ const VectorOperation::values vector_operation,
+ const unsigned int communication_channel,
+ const ArrayView<Number> & src,
+ const ArrayView<Number> & buffers,
+ std::vector<MPI_Request> & requests) const
+ {
+#ifndef DEAL_II_WITH_MPI
+ (void)vector_operation;
+ (void)communication_channel;
+ (void)src;
+ (void)buffers;
+ (void)requests;
+ Assert(false, ExcNeedsMPI());
+#else
+ (void)vector_operation; // nothing to do here
+
+ AssertDimension(requests.size(), recv_ranks.size() + send_ranks.size());
+
+ const auto tag =
+ communication_channel +
+ internal::Tags::noncontiguous_partitioner_update_ghost_values_start;
+
+ AssertIndexRange(
+ tag,
+ internal::Tags::noncontiguous_partitioner_update_ghost_values_end + 1);
+
+ // post recv
+ AssertIndexRange(send_ranks.size(), send_ptr.size());
+ for (types::global_dof_index i = 0; i < send_ranks.size(); ++i)
+ {
+ const int ierr =
+ MPI_Irecv(buffers.data() + send_ptr[i],
+ send_ptr[i + 1] - send_ptr[i],
+ Utilities::MPI::mpi_type_id_for_type<Number>,
+ send_ranks[i],
+ tag,
+ communicator,
+ &requests[i + send_ranks.size()]);
+ AssertThrowMPI(ierr);
+ }
+
+ // pack data and send away
+ for (types::global_dof_index i = 0; i < recv_ranks.size(); ++i)
+ {
+ AssertIndexRange(i + 1, recv_ptr.size());
+ for (types::global_dof_index j = recv_ptr[i], c = 0;
+ j < recv_ptr[i + 1];
+ j++)
+ buffers[recv_ptr[i] + c++] = src[recv_indices[j]];
+
+ // send data
+ Assert((recv_ptr[i] < buffers.size()) ||
+ (recv_ptr[i] == buffers.size() &&
+ recv_ptr[i + 1] == recv_ptr[i]),
+ ExcMessage("The input buffer doesn't contain enough entries"));
+ const int ierr =
+ MPI_Isend(buffers.data() + recv_ptr[i],
+ recv_ptr[i + 1] - recv_ptr[i],
+ Utilities::MPI::mpi_type_id_for_type<Number>,
+ recv_ranks[i],
+ tag,
+ communicator,
+ &requests[i]);
+ AssertThrowMPI(ierr);
+ }
+#endif
+ }
+
+
+
+ template <typename Number>
+ void
+ NoncontiguousPartitioner::import_from_ghosted_array_finish(
+ const VectorOperation::values vector_operation,
+ const ArrayView<const Number> &buffers,
+ const ArrayView<Number> & dst,
+ std::vector<MPI_Request> & requests) const
+ {
+#ifndef DEAL_II_WITH_MPI
+ (void)vector_operation;
+ (void)buffers;
+ (void)dst;
+ (void)requests;
+ Assert(false, ExcNeedsMPI());
+#else
+ Assert(vector_operation == VectorOperation::add, ExcNotImplemented());
+
+ AssertDimension(requests.size(), recv_ranks.size() + send_ranks.size());
+
+ Assert(std::accumulate(dst.begin(), dst.end(), 0) == 0,
+ ExcMessage("The destination vector has to be empty."));
+
+ // wait that all data packages have been received
+ // note: this for-loop cold be merged with the next for-loop,
+ // however, for this send_indices would be needed to stored
+ // rank by rank
+ for (types::global_dof_index proc = 0; proc < send_ranks.size(); ++proc)
+ {
+ int i;
+ MPI_Status status;
+ const auto ierr = MPI_Waitany(recv_ranks.size(),
+ requests.data() + send_ranks.size(),
+ &i,
+ &status);
+ AssertThrowMPI(ierr);
+ }
+
+ // write data into destination vector
+ for (types::global_dof_index i = 0, k = 0; i < send_ranks.size(); ++i)
+ {
+ // collect data to be send
+ for (types::global_dof_index j = send_ptr[i]; j < send_ptr[i + 1];
+ j++)
+ {
+ AssertIndexRange(k, send_indices.size());
+ dst[send_indices[k]] += buffers[j];
+ ++k;
+ }
+ }
+
+ // wait that all data packages have been sent
+ const int ierr =
+ MPI_Waitall(send_ranks.size(), requests.data(), MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+#endif
+ }
+
} // namespace MPI
} // namespace Utilities
NoncontiguousPartitioner::export_to_ghosted_array(
const ArrayView<const S> &src,
const ArrayView<S> & dst) const;
+
+ template void
+ NoncontiguousPartitioner::import_from_ghosted_array(
+ const VectorOperation::values vector_operation,
+ const ArrayView<S> & src,
+ const ArrayView<S> & dst) const;
\}
\}
}
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// Test Utilities::MPI::NoncontiguousPartitioner::import_from_ghosted_array().
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/mpi_noncontiguous_partitioner.h>
+
+#include "../tests.h"
+
+
+void
+test(const MPI_Comm comm)
+{
+ std::vector<types::global_dof_index> index_set_has;
+ std::vector<types::global_dof_index> index_set_want;
+
+ if (Utilities::MPI::this_mpi_process(comm) == 0)
+ {
+ index_set_has.push_back(0);
+ index_set_has.push_back(1);
+ index_set_has.push_back(2);
+
+ index_set_want.push_back(0);
+ index_set_want.push_back(1);
+ index_set_want.push_back(2);
+ index_set_want.push_back(3);
+ index_set_want.push_back(5);
+ }
+ else
+ {
+ index_set_has.push_back(3);
+ index_set_has.push_back(4);
+ index_set_has.push_back(5);
+
+ index_set_want.push_back(0);
+ index_set_want.push_back(2);
+ index_set_want.push_back(3);
+ index_set_want.push_back(4);
+ index_set_want.push_back(5);
+ }
+
+ Utilities::MPI::NoncontiguousPartitioner vector(index_set_has,
+ index_set_want,
+ comm);
+
+ AlignedVector<double> src(index_set_want.size());
+ AlignedVector<double> dst(index_set_has.size());
+
+ for (unsigned int i = 0; i < src.size(); ++i)
+ src[i] = i + Utilities::MPI::this_mpi_process(comm) * 100 + 1;
+
+ vector.import_from_ghosted_array(VectorOperation::add,
+ ArrayView<double>(src.data(), src.size()),
+ ArrayView<double>(dst.data(), dst.size()));
+
+ for (size_t i = 0; i < src.size(); ++i)
+ deallog << static_cast<int>(src[i]) << ' ';
+ deallog << std::endl;
+ for (size_t i = 0; i < dst.size(); ++i)
+ deallog << static_cast<int>(dst[i]) << ' ';
+ deallog << std::endl;
+}
+
+int
+main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll all;
+
+ const MPI_Comm comm = MPI_COMM_WORLD;
+
+ {
+ deallog.push("all");
+ test(comm);
+ deallog.pop();
+ }
+}
--- /dev/null
+
+DEAL:0:all::1 2 3 4 5
+DEAL:0:all::102 2 105
+
+DEAL:1:all::101 102 103 104 105
+DEAL:1:all::107 104 110
+