--- /dev/null
+New: The function Utilities::MPI::compute_index_owner() provides the owner of
+the ghost degrees of freedom relative to a set of locally owned ones.
+<br>
+(Peter Munch, 2019/06/03)
--- /dev/null
+Improved: The function Utilities::MPI::compute_index_owner() is used in
+Utilities::MPI::Partitioner to avoid global all-to-all communication,
+speeding up computations on many MPI ranks.
+<br>
+(Peter Munch, 2019/06/03)
#include <map>
#include <numeric>
+#include <set>
#include <vector>
#if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
const T & object_to_send,
const unsigned int root_process = 0);
+ /**
+ * An interface to be able to use the ConsensusAlgorithm classes. The main
+ * functionality of the implementations is to return a list of process ranks
+ * this process wants data from and to deal with the optional payload of the
+ * messages sent/received by the ConsensusAlgorithm classes.
+ *
+ * There are two kinds of messages:
+ * - send/request message: A message consisting of a data request
+ * which should be answered by another process. This message is
+ * considered as a request message by the receiving rank.
+ * - recv message: The answer to a send/request message.
+ *
+ * @tparam T1 the type of the elements of the vector to sent
+ * @tparam T2 the type of the elements of the vector to received
+ *
+ * @note Since the payloads of the messages are optional, users have
+ * to deal with buffers themselves. The ConsensusAlgorithm classes 1)
+ * deliver only references to empty vectors (of size 0)
+ * the data to be sent can be inserted to or read from, and
+ * 2) communicate these vectors blindly.
+ *
+ * @author Peter Munch, 2019
+ */
+ template <typename T1, typename T2>
+ class ConsensusAlgorithmProcess
+ {
+ public:
+ /**
+ * @return A vector of ranks this process wants to send a request to.
+ *
+ * @note This is the only method which has to be implemented since the
+ * payloads of the messages are optional.
+ */
+ virtual std::vector<unsigned int>
+ compute_targets() = 0;
+
+ /**
+ * Add to the request to the process with the specified rank a payload.
+ *
+ * @param[in] other_rank rank of the process
+ * @param[out] send_buffer data to be sent part of the request (optional)
+ *
+ * @note The buffer is empty. Before using it, you have to set its size.
+ */
+ virtual void
+ pack_recv_buffer(const int other_rank, std::vector<T1> &send_buffer);
+
+ /**
+ * Prepare the buffer where the payload of the answer of the request to
+ * the process with the specified rank is saved in. The most obvious task
+ * is to resize the buffer, since it is empty when the function is called.
+ *
+ * @param[in] other_rank rank of the process
+ * @param[out] recv_buffer data to be sent part of the request (optional)
+ */
+ virtual void
+ prepare_recv_buffer(const int other_rank, std::vector<T2> &recv_buffer);
+
+ /**
+ * Prepare the buffer where the payload of the answer of the request to
+ * the process with the specified rank is saved in.
+ *
+ * @param[in] other_rank rank of the process
+ * @param[in] buffer_recv received payload (optional)
+ * @param[out] request_buffer payload to be sent as part of the request
+ * (optional)
+ *
+ * @note The request_buffer is empty. Before using it, you have to set
+ * its size.
+ */
+ virtual void
+ process_request(const unsigned int other_rank,
+ const std::vector<T1> &buffer_recv,
+ std::vector<T2> & request_buffer);
+
+ /**
+ * Process the payload of the answer of the request to the process with
+ * the specified rank.
+ *
+ * @param[in] other_rank rank of the process
+ * @param[in] recv_buffer data to be sent part of the request (optional)
+ */
+ virtual void
+ unpack_recv_buffer(const int other_rank,
+ const std::vector<T2> &recv_buffer);
+ };
+
+ /**
+ * The task of the implementations of this class is to provide the
+ * communication patterns to retrieve data from other processes in a
+ * dynamic-sparse way.
+ *
+ * Dynamic-sparse means in this context:
+ * - by the time this function is called, the other processes do
+ * not know yet that they have to answer requests
+ * - each process only has to communicate with a small subset of
+ * processes of the MPI communicator
+ *
+ * Naturally, the user has to provide:
+ * - a communicator
+ * - for each rank a list of ranks of processes this process should
+ * communicate to
+ * - a functionality to pack/unpack data to be sent/received
+ *
+ * The latter two features should be implemented in a class derived from
+ * ConsensusAlgorithmProcess.
+ *
+ * @tparam T1 the type of the elements of the vector to sent
+ * @tparam T2 the type of the elements of the vector to received
+ *
+ * @author Peter Munch, 2019
+ */
+ template <typename T1, typename T2>
+ class ConsensusAlgorithm
+ {
+ public:
+ ConsensusAlgorithm(ConsensusAlgorithmProcess<T1, T2> &process,
+ const MPI_Comm & comm);
+
+ /**
+ * Destructor.
+ */
+ virtual ~ConsensusAlgorithm() = default;
+
+ virtual void
+ run() = 0;
+
+ protected:
+ /**
+ * Reference to the process provided by the user.
+ */
+ ConsensusAlgorithmProcess<T1, T2> &process;
+
+ /**
+ * MPI communicator.
+ */
+ const MPI_Comm &comm;
+
+ /**
+ * Rank of this process.
+ */
+ const unsigned int my_rank;
+
+ /**
+ * Number of processes in the communicator.
+ */
+ const unsigned int n_procs;
+ };
+
+ /**
+ * This class implements ConsensusAlgorithm, using only point-to-point
+ * communications and a single IBarrier.
+ *
+ * @note This class closely follows the paper Hoefner et. al. "Scalable
+ * Communication Protocols for Dynamic Sparse Data Exchange".
+ * Since the algorithm shown there is not considering payloads, the
+ * algorithm has been modified here in such a way that synchronous
+ * sends (Issend) have been replaced by equivalent Isend/Irecv, where
+ * Irecv receives the answer to a request (with payload).
+ *
+ * @tparam T1 the type of the elements of the vector to sent
+ * @tparam T2 the type of the elements of the vector to received
+ *
+ * @author Peter Munch, 2019
+ */
+ template <typename T1, typename T2>
+ class ConsensusAlgorithm_NBX : public ConsensusAlgorithm<T1, T2>
+ {
+ public:
+ // Unique tags to be used during Isend and Irecv
+ static const unsigned int tag_request = 12;
+ static const unsigned int tag_delivery = 13;
+
+ /**
+ * Constructor.
+ *
+ * @param process Process to be run during consensus algorithm.
+ * @param comm MPI Communicator
+ */
+ ConsensusAlgorithm_NBX(ConsensusAlgorithmProcess<T1, T2> &process,
+ const MPI_Comm & comm);
+
+ /**
+ * Destructor.
+ */
+ virtual ~ConsensusAlgorithm_NBX() = default;
+
+ /**
+ * Run consensus algorithm.
+ */
+ virtual void
+ run() override;
+
+ private:
+#ifdef DEAL_II_WITH_MPI
+ /**
+ * List of processes this process wants to send requests to.
+ */
+ std::vector<unsigned int> targets;
+
+ /**
+ * Buffers for sending requests.
+ */
+ std::vector<std::vector<T1>> send_buffers;
+
+ /**
+ * Requests for sending requests.
+ */
+ std::vector<MPI_Request> send_requests;
+
+ /**
+ * Buffers for receiving answers to requests.
+ */
+ std::vector<std::vector<T2>> recv_buffers;
+
+
+ /**
+ * Requests for receiving answers to requests.
+ */
+ std::vector<MPI_Request> recv_requests;
+
+ /**
+ * Buffers for sending answers to requests.
+ */
+ std::vector<std::vector<T2>> request_buffers;
+
+ /**
+ * Requests for sending answers to requests.
+ */
+ std::vector<std::shared_ptr<MPI_Request>> request_requests;
+
+ // request for barrier
+ MPI_Request barrier_request;
+#endif
+
+#ifdef DEBUG
+ /**
+ * List of processes who have made a request to this process.
+ */
+ std::set<unsigned int> requesting_processes;
+#endif
+
+ /**
+ * Check if all request answers have been received by this rank.
+ */
+ bool
+ check_own_state();
+
+ /**
+ * Signal to all other ranks that this rank has received all request
+ * answers via entering IBarrier.
+ */
+ void
+ signal_finish();
+
+ /**
+ * Check if all ranks have received all their request answers, i.e.
+ * all ranks have reached the IBarrier.
+ */
+ bool
+ check_global_state();
+
+ /**
+ * A request message from another rank has been received: process the
+ * request and send an answer.
+ */
+ void
+ process_requests();
+
+ /**
+ * Start to send all requests via ISend and post IRecvs for the incoming
+ * answer messages.
+ */
+ void
+ start_communication();
+
+ /**
+ * After all rank has received all answers, the MPI data structures can be
+ * freed and the received answers can be processed.
+ */
+ void
+ clean_up_and_end_communication();
+ };
+
+ /**
+ * This class implements ConsensusAlgorithm, using a two step approach. In
+ * the first step the source ranks are determined and in the second step
+ * a static sparse data exchange is performed.
+ *
+ * @note In contrast to ConsensusAlgorithm_NBX, this class splits the same
+ * task into two distinct steps. In the first step, all processes are
+ * identified who want to send a request to this process. In the
+ * second step, the data is exchanged. However, since - in the second
+ * step - now it is clear how many requests have to be answered, i.e.
+ * when this process can stop waiting for requests, no IBarrier is
+ * needed.
+ *
+ * @note The function compute_point_to_point_communication_pattern() is used
+ * to determine the source processes, which implements a
+ * PEX-algorithm from Hoefner et. al. "Scalable Communication
+ * Protocols for Dynamic Sparse Data Exchange"
+ *
+ * @tparam T1 the type of the elements of the vector to sent
+ * @tparam T2 the type of the elements of the vector to received
+ *
+ * @author Peter Munch, 2019
+ */
+ template <typename T1, typename T2>
+ class ConsensusAlgorithm_PEX : public ConsensusAlgorithm<T1, T2>
+ {
+ public:
+ // Unique tags to be used during Isend and Irecv
+ static const unsigned int tag_request = 14;
+ static const unsigned int tag_delivery = 15;
+
+ /**
+ * Constructor.
+ *
+ * @param process Process to be run during consensus algorithm.
+ * @param comm MPI Communicator
+ */
+ ConsensusAlgorithm_PEX(ConsensusAlgorithmProcess<T1, T2> &process,
+ const MPI_Comm & comm);
+
+ /**
+ * Destructor.
+ */
+ virtual ~ConsensusAlgorithm_PEX() = default;
+
+ /**
+ * Run consensus algorithm.
+ */
+ virtual void
+ run() override;
+
+ private:
+#ifdef DEAL_II_WITH_MPI
+ /**
+ * List of ranks of processes this processes wants to send a request to.
+ */
+ std::vector<unsigned int> targets;
+
+ /**
+ * List of ranks of processes wanting to send a request to this process.
+ */
+ std::vector<unsigned int> sources;
+
+ // data structures to send and receive requests
+
+ /**
+ * Buffers for sending requests.
+ */
+ std::vector<std::vector<T1>> send_buffers;
+
+ /**
+ * Buffers for receiving answers to requests.
+ */
+ std::vector<std::vector<T2>> recv_buffers;
+
+ /**
+ * Requests for sending requests and receiving answers to requests.
+ */
+ std::vector<MPI_Request> send_and_recv_buffers;
+
+ /**
+ * Buffers for sending answers to requests.
+ */
+ std::vector<std::vector<T2>> requests_buffers;
+
+ /**
+ * Requests for sending answers to requests.
+ */
+ std::vector<MPI_Request> requests_answers;
+#endif
+
+ /**
+ * The ith request message from another rank has been received: process
+ * the request and send an answer.
+ */
+ void
+ process_requests(int index);
+
+ /**
+ * Start to send all requests via ISend and post IRecvs for the incoming
+ * answer messages.
+ */
+ unsigned int
+ start_communication();
+
+ /**
+ * After all answers have been exchanged, the MPI data structures can be
+ * freed and the received answers can be processed.
+ */
+ void
+ clean_up_and_end_communication();
+ };
+
+ /**
+ * A class which delegates its task to other ConsensusAlgorithm
+ * implementations depending on the number of processes in the
+ * MPI communicator. For a small number of processes it uses
+ * ConsensusAlgorithm_PEX and for large number of processes
+ * ConsensusAlgorithm_NBX. The threshold depends if the program is
+ * compiled in debug or release mode.
+ *
+ * @tparam T1 the type of the elements of the vector to sent
+ * @tparam T2 the type of the elements of the vector to received
+ *
+ * @author Peter Munch, 2019
+ */
+ template <typename T1, typename T2>
+ class ConsensusAlgorithmSelector : public ConsensusAlgorithm<T1, T2>
+ {
+ public:
+ /**
+ * Constructor.
+ *
+ * @param process Process to be run during consensus algorithm.
+ * @param comm MPI Communicator.
+ */
+ ConsensusAlgorithmSelector(ConsensusAlgorithmProcess<T1, T2> &process,
+ const MPI_Comm & comm);
+
+ /**
+ * Destructor.
+ */
+ virtual ~ConsensusAlgorithmSelector() = default;
+
+ /**
+ * Run consensus algorithm. The function call is delegated to another
+ * ConsensusAlgorithm implementation.
+ */
+ virtual void
+ run() override;
+
+ private:
+ // Pointer to the actual ConsensusAlgorithm implementation.
+ std::shared_ptr<ConsensusAlgorithm<T1, T2>> consensus_algo;
+ };
+
+ /**
+ * Given a partitioned index set space, compute the owning MPI process rank
+ * of each element of a second index set according to the partitioned index
+ * set. A natural usage of this function is to compute for each ghosted
+ * degree of freedom the MPI rank of the process owning that index.
+ *
+ * One might think: "But we know which rank a ghost DoF belongs to based on
+ * the subdomain id of the cell it is on". But this heuristic fails for DoFs
+ * on interfaces between ghost cells with different subdomain_ids, or
+ * between a ghost cell and an artificial cell. Furthermore, this class
+ * enables a completely abstract exchange of information without the help of
+ * the grid in terms of neighbors.
+ *
+ * The first argument passed to this function, @p owned_indices, must
+ * uniquely partition an index space between all processes.
+ * Otherwise, there are no limitations on this argument: In particular,
+ * there is no need in partitioning
+ * the index space into contiguous subsets. Furthermore, there are no
+ * limitations
+ * on the second index set @p indices_to_look_up as long as the size matches
+ * the first one. It can be chosen arbitrarily and independently on each
+ * process. In the case that the second index set also contains locally
+ * owned indices, these indices will be treated correctly and the rank of
+ * this process is returned for those entries.
+ *
+ * @note This is a collective operation: all processes within the given
+ * communicator have to call this function. Since this function does not
+ * use MPI_Alltoall or MPI_Allgather, but instead uses non-blocking
+ * point-to-point communication instead, and only a single non-blocking
+ * barrier, it reduces the memory consumption significantly. This function
+ * is suited for large-scale simulations with >100k MPI ranks.
+ *
+ * @param[in] owned_indices Index set with indices locally owned by this
+ * process.
+ * @param[in] indices_to_look_up Index set containing indices of which the
+ * user is interested the rank of the owning process.
+ * @param[in] comm MPI communicator.
+ *
+ * @return List containing the MPI process rank for each entry in the index
+ * set @p indices_to_look_up. The order coincides with the order
+ * within the ElementIterator.
+ *
+ * @author Peter Munch, 2019
+ */
+ std::vector<unsigned int>
+ compute_index_owner(const IndexSet &owned_indices,
+ const IndexSet &indices_to_look_up,
+ const MPI_Comm &comm);
+
#ifndef DOXYGEN
// declaration for an internal function that lives in mpi.templates.h
namespace internal
#include <iostream>
#include <numeric>
+#include <set>
+#include <vector>
#ifdef DEAL_II_WITH_TRILINOS
# ifdef DEAL_II_WITH_MPI
#endif
}
+ template <typename T1, typename T2>
+ void
+ ConsensusAlgorithmProcess<T1, T2>::process_request(const unsigned int,
+ const std::vector<T1> &,
+ std::vector<T2> &)
+ {
+ // noting to do
+ }
+
+
+
+ template <typename T1, typename T2>
+ void
+ ConsensusAlgorithmProcess<T1, T2>::pack_recv_buffer(const int,
+ std::vector<T1> &)
+ {
+ // noting to do
+ }
+
+
+
+ template <typename T1, typename T2>
+ void
+ ConsensusAlgorithmProcess<T1, T2>::prepare_recv_buffer(const int,
+ std::vector<T2> &)
+ {
+ // noting to do
+ }
+
+
+
+ template <typename T1, typename T2>
+ void
+ ConsensusAlgorithmProcess<T1, T2>::unpack_recv_buffer(
+ const int,
+ const std::vector<T2> &)
+ {
+ // noting to do
+ }
+
+
+
+ template <typename T1, typename T2>
+ ConsensusAlgorithm<T1, T2>::ConsensusAlgorithm(
+ ConsensusAlgorithmProcess<T1, T2> &process,
+ const MPI_Comm & comm)
+ : process(process)
+ , comm(comm)
+ , my_rank(this_mpi_process(comm))
+ , n_procs(n_mpi_processes(comm))
+ {}
+
+
+
+ template <typename T1, typename T2>
+ ConsensusAlgorithm_NBX<T1, T2>::ConsensusAlgorithm_NBX(
+ ConsensusAlgorithmProcess<T1, T2> &process,
+ const MPI_Comm & comm)
+ : ConsensusAlgorithm<T1, T2>(process, comm)
+ {}
+
+
+
+ template <typename T1, typename T2>
+ void
+ ConsensusAlgorithm_NBX<T1, T2>::run()
+ {
+ // 1) send requests and start receiving the answers
+ start_communication();
+
+ // 2) answer requests and check if all requests of this process have been
+ // answered
+ while (!check_own_state())
+ process_requests();
+
+ // 3) signal to all other processes that all requests of this process have
+ // been answered
+ signal_finish();
+
+ // 4) nevertheless, this process has to keep on answering (potential)
+ // incoming requests until all processes have received the
+ // answer to all requests
+ while (!check_global_state())
+ process_requests();
+
+ // 5) process the answer to all requests
+ clean_up_and_end_communication();
+ }
+
+
+
+ template <typename T1, typename T2>
+ bool
+ ConsensusAlgorithm_NBX<T1, T2>::check_own_state()
+ {
+#ifdef DEAL_II_WITH_MPI
+ int all_receive_requests_are_done;
+ const auto ierr = MPI_Testall(recv_requests.size(),
+ recv_requests.data(),
+ &all_receive_requests_are_done,
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+
+ return all_receive_requests_are_done;
+#else
+ return true;
+#endif
+ }
+
+
+
+ template <typename T1, typename T2>
+ void
+ ConsensusAlgorithm_NBX<T1, T2>::signal_finish()
+ {
+#ifdef DEAL_II_WITH_MPI
+# if DEAL_II_MPI_VERSION_GTE(3, 0)
+ const auto ierr = MPI_Ibarrier(this->comm, &barrier_request);
+ AssertThrowMPI(ierr);
+# else
+ AssertThrow(
+ false,
+ ExcMessage(
+ "ConsensusAlgorithm_NBX uses MPI 3.0 features. You should compile with at least MPI 3.0."));
+# endif
+#endif
+ }
+
+
+
+ template <typename T1, typename T2>
+ bool
+ ConsensusAlgorithm_NBX<T1, T2>::check_global_state()
+ {
+#ifdef DEAL_II_WITH_MPI
+ int all_ranks_reached_barrier;
+ const auto ierr = MPI_Test(&barrier_request,
+ &all_ranks_reached_barrier,
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ return all_ranks_reached_barrier;
+#else
+ return true;
+#endif
+ }
+
+
+
+ template <typename T1, typename T2>
+ void
+ ConsensusAlgorithm_NBX<T1, T2>::process_requests()
+ {
+#ifdef DEAL_II_WITH_MPI
+ // check if there is a request pending
+ MPI_Status status;
+ int request_is_pending;
+ const auto ierr = MPI_Iprobe(
+ MPI_ANY_SOURCE, tag_request, this->comm, &request_is_pending, &status);
+ AssertThrowMPI(ierr);
+
+ if (request_is_pending) // request is pending
+ {
+ // get rank of requesting process
+ const auto other_rank = status.MPI_SOURCE;
+
+# ifdef DEBUG
+ Assert(requesting_processes.find(other_rank) ==
+ requesting_processes.end(),
+ ExcMessage("Process is requesting a second time!"));
+ requesting_processes.insert(other_rank);
+# endif
+
+ std::vector<T1> buffer_recv;
+ // get size of of incoming message
+ int number_amount;
+ auto ierr = MPI_Get_count(&status,
+ internal::mpi_type_id(buffer_recv.data()),
+ &number_amount);
+ AssertThrowMPI(ierr);
+
+ // allocate memory for incoming message
+ buffer_recv.resize(number_amount);
+ ierr = MPI_Recv(buffer_recv.data(),
+ number_amount,
+ internal::mpi_type_id(buffer_recv.data()),
+ other_rank,
+ tag_request,
+ this->comm,
+ &status);
+ AssertThrowMPI(ierr);
+
+ // allocate memory for answer message
+ request_buffers.emplace_back();
+ request_requests.emplace_back(new MPI_Request);
+
+ // process request
+ auto &request_buffer = request_buffers.back();
+ this->process.process_request(other_rank,
+ buffer_recv,
+ request_buffer);
+
+ // start to send answer back
+ ierr = MPI_Isend(request_buffer.data(),
+ request_buffer.size(),
+ internal::mpi_type_id(request_buffer.data()),
+ other_rank,
+ tag_delivery,
+ this->comm,
+ request_requests.back().get());
+ AssertThrowMPI(ierr);
+ }
+#endif
+ }
+
+
+
+ template <typename T1, typename T2>
+ void
+ ConsensusAlgorithm_NBX<T1, T2>::start_communication()
+ {
+#ifdef DEAL_II_WITH_MPI
+ // 1)
+ targets = this->process.compute_targets();
+ const auto n_targets = targets.size();
+
+ // 2) allocate memory
+ recv_buffers.resize(n_targets);
+ recv_requests.resize(n_targets);
+ send_requests.resize(n_targets);
+ send_buffers.resize(n_targets);
+
+ {
+ // 4) send and receive
+ for (unsigned int i = 0; i < n_targets; i++)
+ {
+ const unsigned int rank = targets[i];
+ const unsigned int index = i;
+
+ // translate index set to a list of pairs
+ auto &send_buffer = send_buffers[index];
+ this->process.pack_recv_buffer(rank, send_buffer);
+
+ // start to send data
+ auto ierr = MPI_Isend(send_buffer.data(),
+ send_buffer.size(),
+ internal::mpi_type_id(send_buffer.data()),
+ rank,
+ tag_request,
+ this->comm,
+ &send_requests[index]);
+ AssertThrowMPI(ierr);
+
+ // start to receive data
+ auto &recv_buffer = recv_buffers[index];
+ this->process.prepare_recv_buffer(rank, recv_buffer);
+ ierr = MPI_Irecv(recv_buffer.data(),
+ recv_buffer.size(),
+ internal::mpi_type_id(recv_buffer.data()),
+ rank,
+ tag_delivery,
+ this->comm,
+ &recv_requests[index]);
+ AssertThrowMPI(ierr);
+ }
+ }
+#endif
+ }
+
+
+
+ template <typename T1, typename T2>
+ void
+ ConsensusAlgorithm_NBX<T1, T2>::clean_up_and_end_communication()
+ {
+#ifdef DEAL_II_WITH_MPI
+ // clean up
+ {
+ auto ierr = MPI_Waitall(send_requests.size(),
+ send_requests.data(),
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+
+ ierr = MPI_Waitall(recv_requests.size(),
+ recv_requests.data(),
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+
+ ierr = MPI_Wait(&barrier_request, MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+
+ for (auto &i : request_requests)
+ {
+ const auto ierr = MPI_Wait(i.get(), MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+ }
+
+# ifdef DEBUG
+ // note: IBarrier seems to make problem during testing, this additional
+ // Barrier seems to help
+ MPI_Barrier(this->comm);
+# endif
+ }
+
+ // unpack data
+ {
+ for (unsigned int i = 0; i < targets.size(); i++)
+ this->process.unpack_recv_buffer(targets[i], recv_buffers[i]);
+ }
+#endif
+ }
+
+
+
+ /**
+ * A re-implementation of compute_point_to_point_communication_pattern
+ * using the ConsensusAlgorithm.
+ */
+ class ConsensusAlgorithmProcessTargets
+ : public ConsensusAlgorithmProcess<int, int>
+ {
+ public:
+ ConsensusAlgorithmProcessTargets(std::vector<unsigned int> &target)
+ : target(target)
+ {}
+
+ using T1 = int;
+ using T2 = int;
+
+ virtual void
+ process_request(const unsigned int other_rank,
+ const std::vector<T1> &buffer_recv,
+ std::vector<T2> & request_buffer) override
+ {
+ (void)buffer_recv;
+ (void)request_buffer;
+ this->sources.push_back(other_rank);
+ }
+
+ /**
+ * Simply return the user-provided list.
+ *
+ * @return List of processes this process wants to send requests to.
+ */
+ virtual std::vector<unsigned int>
+ compute_targets() override
+ {
+ return target;
+ }
+
+ /**
+ * The result of the consensus algorithm.
+ * @return Sorted list of ranks of processes wanting to send a request to
+ * this process.
+ */
+ std::vector<unsigned int>
+ get_result()
+ {
+ std::sort(sources.begin(), sources.end());
+ return sources;
+ }
+
+ private:
+ /**
+ * List of processes this process wants to send requests to.
+ */
+ const std::vector<unsigned int> ⌖
+
+ /**
+ * List of ranks of processes wanting to send a request to this process.
+ */
+ std::vector<unsigned int> sources;
+ };
+
+
+
+ template <typename T1, typename T2>
+ ConsensusAlgorithm_PEX<T1, T2>::ConsensusAlgorithm_PEX(
+ ConsensusAlgorithmProcess<T1, T2> &process,
+ const MPI_Comm & comm)
+ : ConsensusAlgorithm<T1, T2>(process, comm)
+ {}
+
+
+
+ template <typename T1, typename T2>
+ void
+ ConsensusAlgorithm_PEX<T1, T2>::run()
+ {
+ // 1) send requests and start receiving the answers
+ // especially determine how many requests are expected
+ const unsigned int n_requests = start_communication();
+
+ // 2) answer requests
+ for (unsigned int request = 0; request < n_requests; request++)
+ process_requests(request);
+
+ // 3) process answers
+ clean_up_and_end_communication();
+ }
+
+
+
+ template <typename T1, typename T2>
+ void
+ ConsensusAlgorithm_PEX<T1, T2>::process_requests(int index)
+ {
+#ifdef DEAL_II_WITH_MPI
+ MPI_Status status;
+ MPI_Probe(MPI_ANY_SOURCE, tag_request, this->comm, &status);
+
+ // get rank of incoming message
+ const auto other_rank = status.MPI_SOURCE;
+
+ std::vector<T1> buffer_recv;
+
+ // get size of incoming message
+ int number_amount;
+ auto ierr = MPI_Get_count(&status,
+ internal::mpi_type_id(buffer_recv.data()),
+ &number_amount);
+ AssertThrowMPI(ierr);
+
+ // allocate memory for incoming message
+ buffer_recv.resize(number_amount);
+ ierr = MPI_Recv(buffer_recv.data(),
+ number_amount,
+ internal::mpi_type_id(buffer_recv.data()),
+ other_rank,
+ tag_request,
+ this->comm,
+ &status);
+ AssertThrowMPI(ierr);
+
+ // process request
+ auto &request_buffer = requests_buffers[index];
+ this->process.process_request(other_rank, buffer_recv, request_buffer);
+
+ // start to send answer back
+ ierr = MPI_Isend(request_buffer.data(),
+ request_buffer.size(),
+ MPI_UNSIGNED,
+ other_rank,
+ tag_delivery,
+ this->comm,
+ &requests_answers[index]);
+ AssertThrowMPI(ierr);
+#else
+ (void)index;
+#endif
+ }
+
+
+ template <typename T1, typename T2>
+ unsigned int
+ ConsensusAlgorithm_PEX<T1, T2>::start_communication()
+ {
+#ifdef DEAL_II_WITH_MPI
+ // 1) determine with which processes this process wants to communicate
+ targets = this->process.compute_targets();
+
+ // 2) determine who wants to communicate with this process
+ const bool use_nbx = false;
+ if (!use_nbx)
+ {
+ sources =
+ compute_point_to_point_communication_pattern(this->comm, targets);
+ }
+ else
+ {
+ ConsensusAlgorithmProcessTargets process(targets);
+ ConsensusAlgorithm_NBX<ConsensusAlgorithmProcessTargets::T1,
+ ConsensusAlgorithmProcessTargets::T2>
+ consensus_algorithm(process, this->comm);
+ consensus_algorithm.run();
+ sources = process.get_result();
+ }
+
+ const auto n_targets = targets.size();
+ const auto n_sources = sources.size();
+
+ // 2) allocate memory
+ recv_buffers.resize(n_targets);
+ send_buffers.resize(n_targets);
+ send_and_recv_buffers.resize(2 * n_targets);
+
+ requests_answers.resize(n_sources);
+ requests_buffers.resize(n_sources);
+
+ // 4) send and receive
+ for (unsigned int i = 0; i < n_targets; i++)
+ {
+ const unsigned int rank = targets[i];
+
+ // pack data which should be sent
+ auto &send_buffer = send_buffers[i];
+ this->process.pack_recv_buffer(rank, send_buffer);
+
+ // start to send data
+ auto ierr = MPI_Isend(send_buffer.data(),
+ send_buffer.size(),
+ internal::mpi_type_id(send_buffer.data()),
+ rank,
+ tag_request,
+ this->comm,
+ &send_and_recv_buffers[n_targets + i]);
+ AssertThrowMPI(ierr);
+
+ // start to receive data
+ auto &recv_buffer = recv_buffers[i];
+ this->process.prepare_recv_buffer(rank, recv_buffer);
+ ierr = MPI_Irecv(recv_buffer.data(),
+ recv_buffer.size(),
+ MPI_UNSIGNED,
+ rank,
+ tag_delivery,
+ this->comm,
+ &send_and_recv_buffers[i]);
+ AssertThrowMPI(ierr);
+ }
+
+ return sources.size();
+#else
+ return 0;
+#endif
+ }
+
+
+
+ template <typename T1, typename T2>
+ void
+ ConsensusAlgorithm_PEX<T1, T2>::clean_up_and_end_communication()
+ {
+#ifdef DEAL_II_WITH_MPI
+ // finalize all MPI_Requests
+ MPI_Waitall(send_and_recv_buffers.size(),
+ send_and_recv_buffers.data(),
+ MPI_STATUSES_IGNORE);
+ MPI_Waitall(requests_answers.size(),
+ requests_answers.data(),
+ MPI_STATUSES_IGNORE);
+
+ // unpack received data
+ for (unsigned int i = 0; i < targets.size(); i++)
+ this->process.unpack_recv_buffer(targets[i], recv_buffers[i]);
+#endif
+ }
+
+
+
+ template <typename T1, typename T2>
+ ConsensusAlgorithmSelector<T1, T2>::ConsensusAlgorithmSelector(
+ ConsensusAlgorithmProcess<T1, T2> &process,
+ const MPI_Comm & comm)
+ : ConsensusAlgorithm<T1, T2>(process, comm)
+ {
+ // Depending on the number of processes we switch between implementations.
+ // We reduce the threshold for debug mode to be able to test also the
+ // non-blocking implementation. This feature is tested by:
+ // tests/multigrid/transfer_matrix_free_06.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=15.output
+#ifdef DEAL_II_WITH_MPI
+# if DEAL_II_MPI_VERSION_GTE(3, 0)
+# ifdef DEBUG
+ if (Utilities::MPI::n_mpi_processes(comm) > 14)
+# else
+ if (Utilities::MPI::n_mpi_processes(comm) > 99)
+# endif
+ consensus_algo.reset(new ConsensusAlgorithm_NBX<T1, T2>(process, comm));
+ else
+# endif
+#endif
+ consensus_algo.reset(new ConsensusAlgorithm_PEX<T1, T2>(process, comm));
+ }
+
+
+
+ template <typename T1, typename T2>
+ void
+ ConsensusAlgorithmSelector<T1, T2>::run()
+ {
+ consensus_algo->run();
+ }
+
+
+
+ namespace ComputeIndexOwner
+ {
+ struct Dictionary
+ {
+ static const unsigned int tag_setup = 11;
+
+ std::vector<unsigned int> actually_owning_ranks;
+
+ types::global_dof_index dofs_per_process;
+ std::pair<types::global_dof_index, types::global_dof_index> local_range;
+ types::global_dof_index local_size;
+ types::global_dof_index size;
+
+ void
+ reinit(const IndexSet &owned_indices, const MPI_Comm &comm)
+ {
+ this->partition(owned_indices, comm);
+
+#ifdef DEAL_II_WITH_MPI
+ unsigned int my_rank = this_mpi_process(comm);
+
+ types::global_dof_index dic_local_rececived = 0;
+ std::map<unsigned int, unsigned int> relevant_procs_map;
+
+ // 2) collect relevant processes and process local dict entries
+ {
+ std::vector<unsigned int> relevant_procs;
+ for (auto i : owned_indices)
+ {
+ unsigned int other_rank = this->dof_to_dict_rank(i);
+ if (other_rank == my_rank)
+ {
+ this->actually_owning_ranks[i - this->local_range.first] =
+ my_rank;
+ dic_local_rececived++;
+ }
+ else if (relevant_procs.empty() ||
+ relevant_procs.back() != other_rank)
+ relevant_procs.push_back(other_rank);
+ }
+
+ {
+ unsigned int c = 0;
+ for (auto i : relevant_procs)
+ relevant_procs_map[i] = c++;
+ }
+ }
+
+ const unsigned int n_relevant_procs = relevant_procs_map.size();
+ std::vector<std::vector<
+ std::pair<types::global_dof_index, types::global_dof_index>>>
+ buffers(n_relevant_procs);
+ std::vector<MPI_Request> request(n_relevant_procs);
+
+ // 3) send messages with local dofs to the right dict process
+ {
+ std::vector<std::vector<types::global_dof_index>> temp(
+ n_relevant_procs);
+
+ // collect dofs of each dict process
+ for (auto i : owned_indices)
+ {
+ unsigned int other_rank = this->dof_to_dict_rank(i);
+ if (other_rank != my_rank)
+ temp[relevant_procs_map[other_rank]].push_back(i);
+ }
+
+ // send dofs to each process
+ for (auto rank_pair : relevant_procs_map)
+ {
+ const int rank = rank_pair.first;
+ const int index = rank_pair.second;
+
+ // create index set and compress data to be sent
+ auto & indices_i = temp[index];
+ IndexSet is(this->size);
+ is.add_indices(indices_i.begin(), indices_i.end());
+ is.compress();
+
+ // translate index set to a list of pairs
+ auto &buffer = buffers[index];
+ for (auto interval = is.begin_intervals();
+ interval != is.end_intervals();
+ interval++)
+ buffer.emplace_back(*interval->begin(), interval->last() + 1);
+
+ // send data
+ const auto ierr = MPI_Isend(buffer.data(),
+ buffer.size() * 2,
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ rank,
+ tag_setup,
+ comm,
+ &request[index]);
+ AssertThrowMPI(ierr);
+ }
+ }
+
+
+ // 4) receive messages until all dofs in dict are processed
+ while (this->local_size != dic_local_rececived)
+ {
+ // wait for an incoming message
+ MPI_Status status;
+ auto ierr = MPI_Probe(MPI_ANY_SOURCE, tag_setup, comm, &status);
+ AssertThrowMPI(ierr);
+
+ // retrieve size of incoming message
+ int number_amount;
+ ierr = MPI_Get_count(&status,
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ &number_amount);
+ AssertThrowMPI(ierr);
+
+ const auto other_rank = status.MPI_SOURCE;
+
+ // receive message
+ Assert(number_amount % 2 == 0, ExcInternalError());
+ std::vector<
+ std::pair<types::global_dof_index, types::global_dof_index>>
+ buffer(number_amount / 2);
+ ierr = MPI_Recv(buffer.data(),
+ number_amount,
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ other_rank,
+ tag_setup,
+ comm,
+ &status);
+ AssertThrowMPI(ierr);
+
+ // process message: loop over all intervals
+ for (auto interval : buffer)
+ for (types::global_dof_index i = interval.first;
+ i < interval.second;
+ i++)
+ {
+ this->actually_owning_ranks[i - this->local_range.first] =
+ other_rank;
+ dic_local_rececived++;
+ }
+ }
+
+ // 5) make sure that all messages have been sent
+ const auto ierr =
+ MPI_Waitall(n_relevant_procs, request.data(), MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+#else
+ (void)owned_indices;
+ (void)comm;
+#endif
+ }
+
+ unsigned int
+ dof_to_dict_rank(const types::global_dof_index i)
+ {
+ return i / dofs_per_process;
+ }
+
+ private:
+ void
+ partition(const IndexSet &owned_indices, const MPI_Comm &comm)
+ {
+#ifdef DEAL_II_WITH_MPI
+ const unsigned int n_procs = n_mpi_processes(comm);
+ const unsigned int my_rank = this_mpi_process(comm);
+
+ size = owned_indices.size();
+ dofs_per_process = (size + n_procs - 1) / n_procs;
+ local_range.first = std::min(dofs_per_process * my_rank, size);
+ local_range.second = std::min(dofs_per_process * (my_rank + 1), size);
+ local_size = local_range.second - local_range.first;
+
+ actually_owning_ranks.resize(local_size);
+#else
+ (void)owned_indices;
+ (void)comm;
+#endif
+ }
+ };
+
+ class ConsensusAlgorithmProcess
+ : public dealii::Utilities::MPI::
+ ConsensusAlgorithmProcess<types::global_dof_index, unsigned int>
+ {
+ public:
+ ConsensusAlgorithmProcess(const IndexSet & owned_indices,
+ const IndexSet & indices_to_look_up,
+ const MPI_Comm & comm,
+ std::vector<unsigned int> &owning_ranks)
+ : owned_indices(owned_indices)
+ , indices_to_look_up(indices_to_look_up)
+ , comm(comm)
+ , my_rank(this_mpi_process(comm))
+ , n_procs(n_mpi_processes(comm))
+ , owning_ranks(owning_ranks)
+ {
+ this->dict.reinit(owned_indices, comm);
+ }
+
+ const IndexSet & owned_indices;
+ const IndexSet & indices_to_look_up;
+ const MPI_Comm & comm;
+ const unsigned int my_rank;
+ const unsigned int n_procs;
+ std::vector<unsigned int> &owning_ranks;
+
+ Dictionary dict;
+
+ std::map<unsigned int, std::vector<types::global_dof_index>> temp;
+ std::map<unsigned int, std::vector<unsigned int>> recv_indices;
+
+ virtual void
+ process_request(const unsigned int other_rank,
+ const std::vector<types::global_dof_index> &buffer_recv,
+ std::vector<unsigned int> &request_buffer) override
+ {
+ (void)other_rank;
+ Assert(buffer_recv.size() % 2 == 0, ExcInternalError());
+ for (unsigned int j = 0; j < buffer_recv.size(); j += 2)
+ for (auto i = buffer_recv[j]; i < buffer_recv[j + 1]; i++)
+ request_buffer.push_back(
+ dict.actually_owning_ranks[i - dict.local_range.first]);
+ }
+
+ virtual std::vector<unsigned int>
+ compute_targets() override
+ {
+ std::vector<unsigned int> targets;
+
+ // 1) collect relevant processes and process local dict entries
+ {
+ unsigned int index = 0;
+ for (auto i : indices_to_look_up)
+ {
+ unsigned int other_rank = dict.dof_to_dict_rank(i);
+ if (other_rank == my_rank)
+ owning_ranks[index] =
+ dict.actually_owning_ranks[i - dict.local_range.first];
+ else if (targets.empty() || targets.back() != other_rank)
+ targets.push_back(other_rank);
+ index++;
+ }
+ }
+
+
+ for (auto i : targets)
+ {
+ recv_indices[i] = {};
+ temp[i] = {};
+ }
+
+ // 3) collect indices for each process
+ {
+ unsigned int index = 0;
+ for (auto i : indices_to_look_up)
+ {
+ unsigned int other_rank = dict.dof_to_dict_rank(i);
+ if (other_rank != my_rank)
+ {
+ recv_indices[other_rank].push_back(index);
+ temp[other_rank].push_back(i);
+ }
+ index++;
+ }
+ }
+
+ Assert(targets.size() == recv_indices.size() &&
+ targets.size() == temp.size(),
+ ExcMessage("Size does not match!"));
+
+ return targets;
+ }
+
+ virtual void
+ pack_recv_buffer(
+ const int other_rank,
+ std::vector<types::global_dof_index> &send_buffer) override
+ {
+ // create index set and compress data to be sent
+ auto & indices_i = temp[other_rank];
+ IndexSet is(dict.size);
+ is.add_indices(indices_i.begin(), indices_i.end());
+ is.compress();
+
+ for (auto interval = is.begin_intervals();
+ interval != is.end_intervals();
+ interval++)
+ {
+ send_buffer.push_back(*interval->begin());
+ send_buffer.push_back(interval->last() + 1);
+ }
+ }
+
+ virtual void
+ prepare_recv_buffer(const int other_rank,
+ std::vector<unsigned int> &recv_buffer) override
+ {
+ recv_buffer.resize(recv_indices[other_rank].size());
+ }
+
+ virtual void
+ unpack_recv_buffer(
+ const int other_rank,
+ const std::vector<unsigned int> &recv_buffer) override
+ {
+ Assert(recv_indices[other_rank].size() == recv_buffer.size(),
+ ExcMessage("Sizes do not match!"));
+
+ for (unsigned int j = 0; j < recv_indices[other_rank].size(); j++)
+ owning_ranks[recv_indices[other_rank][j]] = recv_buffer[j];
+ }
+ };
+
+ } // namespace ComputeIndexOwner
+
+
+
+ std::vector<unsigned int>
+ compute_index_owner(const IndexSet &owned_indices,
+ const IndexSet &indices_to_look_up,
+ const MPI_Comm &comm)
+ {
+ Assert(owned_indices.size() == indices_to_look_up.size(),
+ ExcMessage("IndexSets have to have the same sizes."));
+
+ std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
+
+ // Step 1: setup dictionary
+ // The input owned_indices can be partitioned arbitrarily. In the
+ // dictionary, the index set is statically repartitioned among the
+ // processes again and extended with information with the actual owner
+ // of that the index.
+ ComputeIndexOwner::ConsensusAlgorithmProcess process(owned_indices,
+ indices_to_look_up,
+ comm,
+ owning_ranks);
+
+ // Step 2: read dictionary
+ // Communicate with the process who owns the index in the static
+ // partition (i.e. in the partition). This process returns the actual
+ // owner of the index.
+ ConsensusAlgorithmSelector<types::global_dof_index, unsigned int>
+ consensus_algorithm(process, comm);
+ consensus_algorithm.run();
+
+ return owning_ranks;
+ }
#include "mpi.inst"
} // end of namespace MPI
}
}
- // Allocate memory for data that will be exported
- std::vector<types::global_dof_index> expanded_ghost_indices(
- n_ghost_indices_data);
unsigned int n_ghost_targets = 0;
- if (n_ghost_indices_data > 0)
- {
- // Create first a vector of ghost_targets from the list of ghost
- // indices and then push back new values. When we are done, copy the
- // data to that field of the partitioner. This way, the variable
- // ghost_targets will have exactly the size we need, whereas the
- // vector filled with emplace_back might actually be too long.
- unsigned int current_proc = 0;
- ghost_indices_data.fill_index_vector(expanded_ghost_indices);
- types::global_dof_index current_index = expanded_ghost_indices[0];
- while (current_index >= first_index[current_proc + 1])
- current_proc++;
- AssertIndexRange(current_proc, n_procs);
-
- // since DoFs are contiguous, populate a vector which stores
- // a process rank and the number of ghosts
- std::vector<std::pair<unsigned int, unsigned int>> ghost_targets_temp(
- 1, std::pair<unsigned int, unsigned int>(current_proc, 0));
- n_ghost_targets++;
-
- // find which process is the owner of other indices:
- for (unsigned int iterator = 1; iterator < n_ghost_indices_data;
- ++iterator)
- {
- current_index = expanded_ghost_indices[iterator];
- while (current_index >= first_index[current_proc + 1])
- current_proc++;
- AssertIndexRange(current_proc, n_procs);
- // if we found a new target (i.e. higher rank) then adjust the
- // pair.second in the last element so that it stores the total
- // number of ghosts owned by pair.first
- if (ghost_targets_temp[n_ghost_targets - 1].first < current_proc)
- {
- ghost_targets_temp[n_ghost_targets - 1].second =
- iterator - ghost_targets_temp[n_ghost_targets - 1].second;
- ghost_targets_temp.emplace_back(current_proc, iterator);
- n_ghost_targets++;
- }
- }
- // adjust the last element so that pair.second stores the number of
- // elements
- ghost_targets_temp[n_ghost_targets - 1].second =
- n_ghost_indices_data -
- ghost_targets_temp[n_ghost_targets - 1].second;
- ghost_targets_data = ghost_targets_temp;
- }
+ {
+ const auto index_owner =
+ Utilities::MPI::compute_index_owner(this->locally_owned_range_data,
+ this->ghost_indices_data,
+ this->communicator);
+
+ ghost_targets_data.clear();
+
+ if (index_owner.size() > 0)
+ {
+ ghost_targets_data.emplace_back(index_owner[0], 0);
+ for (auto i : index_owner)
+ {
+ Assert(i >= ghost_targets_data.back().first,
+ ExcInternalError(
+ "Expect result of compute_index_owner to be sorted"));
+ if (i == ghost_targets_data.back().first)
+ ghost_targets_data.back().second++;
+ else
+ ghost_targets_data.emplace_back(i, 1);
+ }
+ }
+
+ n_ghost_targets = ghost_targets_data.size();
+ }
// find the processes that want to import to me
{
std::vector<int> send_buffer(n_procs, 0);
// now that we know how many indices each process will receive from
// ghosts, send and receive indices for import data. non-blocking receives
// and blocking sends
- std::vector<types::global_dof_index> expanded_import_indices(
- n_import_indices_data);
{
+ std::vector<types::global_dof_index> expanded_import_indices(
+ n_import_indices_data);
unsigned int current_index_start = 0;
std::vector<MPI_Request> import_requests(import_targets_data.size() +
n_ghost_targets);
// use non-blocking send for ghost indices stored in
// expanded_ghost_indices
+ std::vector<types::global_dof_index> expanded_ghost_indices;
+ if (n_ghost_indices_data > 0)
+ ghost_indices_data.fill_index_vector(expanded_ghost_indices);
+
current_index_start = 0;
for (unsigned int i = 0; i < n_ghost_targets; i++)
{
{}
};
-
-
template <int dim, int spacedim>
void
fill_copy_indices(
tria->level_ghost_owners();
std::map<int, std::vector<DoFPair>> send_data;
- // * find owners of the level dofs and insert into send_data
- // accordingly
- for (const auto &dofpair : send_data_temp)
+ std::sort(send_data_temp.begin(),
+ send_data_temp.end(),
+ [](const DoFPair &lhs, const DoFPair &rhs) {
+ if (lhs.level < rhs.level)
+ return true;
+ if (lhs.level > rhs.level)
+ return false;
+
+ if (lhs.level_dof_index < rhs.level_dof_index)
+ return true;
+ if (lhs.level_dof_index > rhs.level_dof_index)
+ return false;
+
+ if (lhs.global_dof_index < rhs.global_dof_index)
+ return true;
+ else
+ return false;
+ });
+ send_data_temp.erase(
+ std::unique(send_data_temp.begin(),
+ send_data_temp.end(),
+ [](const DoFPair &lhs, const DoFPair &rhs) {
+ return (lhs.level == rhs.level) &&
+ (lhs.level_dof_index == rhs.level_dof_index) &&
+ (lhs.global_dof_index == rhs.global_dof_index);
+ }),
+ send_data_temp.end());
+
+ for (unsigned int level = 0; level < n_levels; ++level)
{
- std::set<types::subdomain_id>::iterator it;
- for (it = neighbors.begin(); it != neighbors.end(); ++it)
- {
- if (locally_owned_mg_dofs_per_processor[dofpair.level][*it]
- .is_element(dofpair.level_dof_index))
- {
- send_data[*it].push_back(dofpair);
- break;
- }
- }
- // Is this level dof not owned by any of our neighbors? That would
- // certainly be a bug!
- Assert(it != neighbors.end(),
- ExcMessage("could not find DoF owner."));
+ const IndexSet &is_local = mg_dof.locally_owned_mg_dofs(level);
+
+ std::vector<unsigned int> level_dof_indices;
+ std::vector<unsigned int> global_dof_indices;
+ for (const auto &dofpair : send_data_temp)
+ if (dofpair.level == level)
+ {
+ level_dof_indices.push_back(dofpair.level_dof_index);
+ global_dof_indices.push_back(dofpair.global_dof_index);
+ }
+
+ IndexSet is_ghost(is_local.size());
+ is_ghost.add_indices(level_dof_indices.begin(),
+ level_dof_indices.end());
+
+ AssertThrow(level_dof_indices.size() == is_ghost.n_elements(),
+ ExcMessage("Size does not match!"));
+
+ const auto index_owner =
+ Utilities::MPI::compute_index_owner(is_local,
+ is_ghost,
+ tria->get_communicator());
+
+ AssertThrow(level_dof_indices.size() == index_owner.size(),
+ ExcMessage("Size does not match!"));
+
+ for (unsigned int i = 0; i < index_owner.size(); i++)
+ send_data[index_owner[i]].emplace_back(level,
+ global_dof_indices[i],
+ level_dof_indices[i]);
}
// * send
--- /dev/null
+
+DEAL::FE: FESystem<2>[FE_Q<2>(1)^2]
+DEAL::no. cells: 250
+DEAL::Diff prolongate l1: 0.00000
+DEAL::Diff prolongate l2: 0.00000
+DEAL::Diff prolongate l3: 9.61481e-17
+DEAL::Diff prolongate l4: 2.54384e-16
+DEAL::Diff prolongate l5: 2.97645e-16
+DEAL::Diff prolongate l6: 3.06884e-16
+DEAL::Diff restrict l1: 0.00000
+DEAL::Diff restrict add l1: 0.00000
+DEAL::Diff restrict l2: 0.00000
+DEAL::Diff restrict add l2: 0.00000
+DEAL::Diff restrict l3: 1.04148e-15
+DEAL::Diff restrict add l3: 8.88178e-16
+DEAL::Diff restrict l4: 9.55050e-16
+DEAL::Diff restrict add l4: 9.42055e-16
+DEAL::Diff restrict l5: 1.42178e-15
+DEAL::Diff restrict add l5: 1.60119e-15
+DEAL::Diff restrict l6: 1.33227e-15
+DEAL::Diff restrict add l6: 1.50598e-15
+DEAL::
+DEAL::no. cells: 1887
+DEAL::Diff prolongate l1: 5.55112e-17
+DEAL::Diff prolongate l2: 2.71948e-16
+DEAL::Diff prolongate l3: 5.75552e-16
+DEAL::Diff prolongate l4: 6.31096e-16
+DEAL::Diff prolongate l5: 7.90669e-16
+DEAL::Diff prolongate l6: 1.02785e-15
+DEAL::Diff restrict l1: 8.30815e-16
+DEAL::Diff restrict add l1: 8.88178e-16
+DEAL::Diff restrict l2: 1.42178e-15
+DEAL::Diff restrict add l2: 1.77636e-15
+DEAL::Diff restrict l3: 3.87307e-15
+DEAL::Diff restrict add l3: 4.02752e-15
+DEAL::Diff restrict l4: 3.21773e-15
+DEAL::Diff restrict add l4: 3.74196e-15
+DEAL::Diff restrict l5: 3.55098e-15
+DEAL::Diff restrict add l5: 4.13027e-15
+DEAL::Diff restrict l6: 4.89947e-15
+DEAL::Diff restrict add l6: 5.45706e-15
+DEAL::
+DEAL::FE: FESystem<2>[FE_Q<2>(3)^2]
+DEAL::no. cells: 88
+DEAL::Diff prolongate l1: 4.25485e-16
+DEAL::Diff prolongate l2: 1.29017e-15
+DEAL::Diff prolongate l3: 2.00231e-15
+DEAL::Diff prolongate l4: 2.31946e-15
+DEAL::Diff prolongate l5: 1.85168e-15
+DEAL::Diff restrict l1: 7.69185e-16
+DEAL::Diff restrict add l1: 7.69185e-16
+DEAL::Diff restrict l2: 2.67435e-15
+DEAL::Diff restrict add l2: 2.85221e-15
+DEAL::Diff restrict l3: 4.72265e-15
+DEAL::Diff restrict add l3: 5.11185e-15
+DEAL::Diff restrict l4: 5.29601e-15
+DEAL::Diff restrict add l4: 6.06877e-15
+DEAL::Diff restrict l5: 4.22786e-15
+DEAL::Diff restrict add l5: 4.66822e-15
+DEAL::
+DEAL::no. cells: 510
+DEAL::Diff prolongate l1: 1.95148e-15
+DEAL::Diff prolongate l2: 4.10380e-15
+DEAL::Diff prolongate l3: 3.44413e-15
+DEAL::Diff prolongate l4: 4.64940e-15
+DEAL::Diff prolongate l5: 5.43550e-15
+DEAL::Diff restrict l1: 5.04518e-15
+DEAL::Diff restrict add l1: 5.79447e-15
+DEAL::Diff restrict l2: 1.14867e-14
+DEAL::Diff restrict add l2: 1.25214e-14
+DEAL::Diff restrict l3: 1.02271e-14
+DEAL::Diff restrict add l3: 1.09412e-14
+DEAL::Diff restrict l4: 1.17679e-14
+DEAL::Diff restrict add l4: 1.32526e-14
+DEAL::Diff restrict l5: 1.38716e-14
+DEAL::Diff restrict add l5: 1.55288e-14
+DEAL::
+DEAL::FE: FESystem<3>[FE_Q<3>(1)^2]
+DEAL::no. cells: 15
+DEAL::Diff prolongate l1: 0.00000
+DEAL::Diff prolongate l2: 0.00000
+DEAL::Diff restrict l1: 0.00000
+DEAL::Diff restrict add l1: 0.00000
+DEAL::Diff restrict l2: 0.00000
+DEAL::Diff restrict add l2: 0.00000
+DEAL::
+DEAL::no. cells: 1791
+DEAL::Diff prolongate l1: 2.41669e-16
+DEAL::Diff prolongate l2: 3.91794e-16
+DEAL::Diff prolongate l3: 1.01512e-15
+DEAL::Diff prolongate l4: 1.55516e-15
+DEAL::Diff restrict l1: 2.51215e-15
+DEAL::Diff restrict add l1: 2.17558e-15
+DEAL::Diff restrict l2: 2.94575e-15
+DEAL::Diff restrict add l2: 2.94575e-15
+DEAL::Diff restrict l3: 6.51541e-15
+DEAL::Diff restrict add l3: 7.00761e-15
+DEAL::Diff restrict l4: 9.59492e-15
+DEAL::Diff restrict add l4: 1.07043e-14
+DEAL::
+DEAL::FE: FESystem<3>[FE_Q<3>(3)^2]
+DEAL::no. cells: 1
+DEAL::
+DEAL::no. cells: 223
+DEAL::Diff prolongate l1: 6.16822e-15
+DEAL::Diff prolongate l2: 9.02624e-15
+DEAL::Diff prolongate l3: 6.05143e-15
+DEAL::Diff restrict l1: 2.49384e-14
+DEAL::Diff restrict add l1: 2.64056e-14
+DEAL::Diff restrict l2: 3.99229e-14
+DEAL::Diff restrict add l2: 4.12483e-14
+DEAL::Diff restrict l3: 3.05119e-14
+DEAL::Diff restrict add l3: 3.08762e-14
+DEAL::
+DEAL::FE: FESystem<2>[FE_Q<2>(2)^2]
+DEAL::no. cells: 250
+DEAL::Diff prolongate l1: 4.16500e-08
+DEAL::Diff prolongate l2: 1.14933e-07
+DEAL::Diff prolongate l3: 4.36323e-07
+DEAL::Diff prolongate l4: 4.56555e-07
+DEAL::Diff prolongate l5: 6.44943e-07
+DEAL::Diff prolongate l6: 6.88182e-07
+DEAL::Diff restrict l1: 1.30930e-07
+DEAL::Diff restrict add l1: 1.61321e-07
+DEAL::Diff restrict l2: 3.31534e-07
+DEAL::Diff restrict add l2: 3.70787e-07
+DEAL::Diff restrict l3: 9.06752e-07
+DEAL::Diff restrict add l3: 1.06748e-06
+DEAL::Diff restrict l4: 1.12259e-06
+DEAL::Diff restrict add l4: 1.27800e-06
+DEAL::Diff restrict l5: 1.32543e-06
+DEAL::Diff restrict add l5: 1.66341e-06
+DEAL::Diff restrict l6: 1.39826e-06
+DEAL::Diff restrict add l6: 1.79262e-06
+DEAL::
+DEAL::no. cells: 1887
+DEAL::Diff prolongate l1: 3.33987e-07
+DEAL::Diff prolongate l2: 7.05824e-07
+DEAL::Diff prolongate l3: 1.48105e-06
+DEAL::Diff prolongate l4: 1.34450e-06
+DEAL::Diff prolongate l5: 1.51185e-06
+DEAL::Diff prolongate l6: 1.87422e-06
+DEAL::Diff restrict l1: 6.89860e-07
+DEAL::Diff restrict add l1: 9.08311e-07
+DEAL::Diff restrict l2: 1.47143e-06
+DEAL::Diff restrict add l2: 1.75614e-06
+DEAL::Diff restrict l3: 3.26568e-06
+DEAL::Diff restrict add l3: 3.98218e-06
+DEAL::Diff restrict l4: 2.87348e-06
+DEAL::Diff restrict add l4: 3.39202e-06
+DEAL::Diff restrict l5: 3.24632e-06
+DEAL::Diff restrict add l5: 3.95530e-06
+DEAL::Diff restrict l6: 3.95620e-06
+DEAL::Diff restrict add l6: 4.98782e-06
+DEAL::
+DEAL::FE: FESystem<3>[FE_Q<3>(2)^2]
+DEAL::no. cells: 15
+DEAL::Diff prolongate l1: 1.41616e-07
+DEAL::Diff prolongate l2: 2.80670e-07
+DEAL::Diff restrict l1: 6.42811e-07
+DEAL::Diff restrict add l1: 6.42811e-07
+DEAL::Diff restrict l2: 1.11616e-06
+DEAL::Diff restrict add l2: 1.15446e-06
+DEAL::
+DEAL::no. cells: 1791
+DEAL::Diff prolongate l1: 1.19894e-06
+DEAL::Diff prolongate l2: 1.53705e-06
+DEAL::Diff prolongate l3: 2.59694e-06
+DEAL::Diff prolongate l4: 3.66785e-06
+DEAL::Diff restrict l1: 3.54901e-06
+DEAL::Diff restrict add l1: 3.80668e-06
+DEAL::Diff restrict l2: 4.41015e-06
+DEAL::Diff restrict add l2: 4.66617e-06
+DEAL::Diff restrict l3: 7.02679e-06
+DEAL::Diff restrict add l3: 7.83739e-06
+DEAL::Diff restrict l4: 9.56069e-06
+DEAL::Diff restrict add l4: 1.05378e-05
+DEAL::