From 21b0ed43097da0a35521db496545e53ea1037786 Mon Sep 17 00:00:00 2001 From: Wolfgang Bangerth Date: Wed, 25 May 2022 12:28:24 -0600 Subject: [PATCH] Consensus Algorithms: Move template functions into .h file. --- .../deal.II/base/mpi_consensus_algorithms.h | 894 +++++++++++++++++ .../base/mpi_consensus_algorithms.templates.h | 904 +----------------- .../fe/fe_tools_extrapolate.templates.h | 1 - source/base/CMakeLists.txt | 1 - source/base/mpi.cc | 1 - .../base/mpi_compute_index_owner_internal.cc | 2 + source/base/mpi_consensus_algorithms.cc | 106 -- source/base/mpi_noncontiguous_partitioner.cc | 3 + source/base/mpi_remote_point_evaluation.cc | 1 - source/base/partitioner.cc | 3 + source/grid/grid_tools.cc | 1 - source/grid/tria_description.cc | 1 - source/matrix_free/vector_data_exchange.cc | 6 +- source/multigrid/mg_tools.cc | 1 - 14 files changed, 905 insertions(+), 1020 deletions(-) delete mode 100644 source/base/mpi_consensus_algorithms.cc diff --git a/include/deal.II/base/mpi_consensus_algorithms.h b/include/deal.II/base/mpi_consensus_algorithms.h index 4e98650285..411317afdc 100644 --- a/include/deal.II/base/mpi_consensus_algorithms.h +++ b/include/deal.II/base/mpi_consensus_algorithms.h @@ -1046,6 +1046,900 @@ namespace Utilities } // end of namespace Utilities + +#ifndef DOXYGEN + +// ----------------- Implementation of template functions + +namespace Utilities +{ + namespace MPI + { + namespace ConsensusAlgorithms + { + namespace + { + /** + * Return whether a vector of targets (MPI ranks) has only unique + * elements. + * + * This function is only used within assertions, which causes GCC + * to issue a warning in release mode that due to -Werror then causes an + * error. We suppress this by using the [[gnu::unused]] error (because + * the + * [[maybe_unused]] attribute is only supported from C++17 forward). + * + * Unfortunately, in contrast to what the standard says, the Microsoft + * compiler does not ignore the gnu::unused attribute as it should, + * and then produces an error of its own. So we disable the attribute + * for that compiler. + */ +# ifndef DEAL_II_MSVC + [[gnu::unused]] +# endif + inline bool + has_unique_elements(const std::vector &targets) + { + std::vector my_destinations = targets; + std::sort(my_destinations.begin(), my_destinations.end()); + return (std::adjacent_find(my_destinations.begin(), + my_destinations.end()) == + my_destinations.end()); + } + } // namespace + + + + template + void + Process::answer_request(const unsigned int, + const RequestType &, + AnswerType &) + { + // nothing to do + } + + + + template + void + Process::create_request(const unsigned int, + RequestType &) + { + // nothing to do + } + + + + template + void + Process::read_answer(const unsigned int, + const AnswerType &) + { + // nothing to do + } + + + + template + Interface::Interface( + Process &process, + const MPI_Comm & comm) + : process(&process) + , comm(comm) + {} + + + + template + Interface::Interface() + : process(nullptr) + , comm(MPI_COMM_NULL) + {} + + + + template + std::vector + Interface::run() + { + Assert(process != nullptr, + ExcMessage("This function can only be called if the " + "deprecated non-default constructor of this class " + "has previously been called to set the Process " + "object and a communicator.")); + return run(*process, comm); + } + + + + template + std::vector + Interface::run( + Process &process, + const MPI_Comm & comm) + { + // Unpack the 'process' object and call the function that takes + // function objects for all operations. + return run( + process.compute_targets(), + /* create_request: */ + [&process](const unsigned int target) { + RequestType request; + process.create_request(target, request); + return request; + }, + /* answer_request: */ + [&process](const unsigned int source, const RequestType &request) { + AnswerType answer; + process.answer_request(source, request, answer); + return answer; + }, + /* process_answer: */ + [&process](const unsigned int target, const AnswerType &answer) { + process.read_answer(target, answer); + }, + comm); + } + + + + template + NBX::NBX( + Process &process, + const MPI_Comm & comm) + : Interface(process, comm) + {} + + + + template + std::vector + NBX::run( + const std::vector & targets, + const std::function &create_request, + const std::function + &answer_request, + const std::function + & process_answer, + const MPI_Comm &comm) + { + Assert(has_unique_elements(targets), + ExcMessage("The consensus algorithms expect that each process " + "only sends a single message to another process, " + "but the targets provided include duplicates.")); + + static CollectiveMutex mutex; + CollectiveMutex::ScopedLock lock(mutex, comm); + + // 1) Send data to identified targets and start receiving + // the answers from these very same processes. + start_communication(targets, create_request, comm); + + // 2) Until all posted receive operations are known to have completed, + // answer requests and keep checking whether all requests of + // this process have been answered. + // + // The requests that we catch in the answer_requests() function + // originate elsewhere, that is, they are not in response + // to our own messages + // + // Note also that we may not catch all incoming requests in + // the following two lines: our own requests may have been + // satisfied before we've dealt with all incoming requests. + // That's ok: We will get around to dealing with all remaining + // message later. We just want to move on to the next step + // as early as possible. + while (all_locally_originated_receives_are_completed(process_answer, + comm) == false) + maybe_answer_one_request(answer_request, comm); + + // 3) Signal to all other processes that all requests of this process + // have been answered + signal_finish(comm); + + // 4) Nevertheless, this process has to keep on answering (potential) + // incoming requests until all processes have received the + // answer to all requests + while (all_remotely_originated_receives_are_completed() == false) + maybe_answer_one_request(answer_request, comm); + + // 5) process the answer to all requests + clean_up_and_end_communication(comm); + + return std::vector(requesting_processes.begin(), + requesting_processes.end()); + } + + + + template + void + NBX::start_communication( + const std::vector & targets, + const std::function &create_request, + const MPI_Comm & comm) + { +# ifdef DEAL_II_WITH_MPI + // 1) + const auto n_targets = targets.size(); + + const int tag_request = Utilities::MPI::internal::Tags:: + consensus_algorithm_nbx_answer_request; + + // 2) allocate memory + send_requests.resize(n_targets); + send_buffers.resize(n_targets); + + { + // 4) send and receive + for (unsigned int index = 0; index < n_targets; ++index) + { + const unsigned int rank = targets[index]; + AssertIndexRange(rank, Utilities::MPI::n_mpi_processes(comm)); + + auto &send_buffer = send_buffers[index]; + send_buffer = + (create_request ? Utilities::pack(create_request(rank), false) : + std::vector()); + + // Post a request to send data + auto ierr = MPI_Isend(send_buffer.data(), + send_buffer.size(), + MPI_CHAR, + rank, + tag_request, + comm, + &send_requests[index]); + AssertThrowMPI(ierr); + } + + // Also record that we expect an answer from each target we sent + // a request to: + n_outstanding_answers = n_targets; + } +# else + (void)targets; + (void)create_request; + (void)comm; +# endif + } + + + + template + bool + NBX:: + all_locally_originated_receives_are_completed( + const std::function + & process_answer, + const MPI_Comm &comm) + { +# ifdef DEAL_II_WITH_MPI + // We know that all requests have come in when we have pending + // messages from all targets with the right tag (some of which we may + // have already taken care of below, after discovering their existence). + // We can check for pending messages with MPI_IProbe, which returns + // immediately with a return code that indicates whether + // it has found a message from any process with a given + // tag. + if (n_outstanding_answers == 0) + return true; + else + { + const int tag_deliver = Utilities::MPI::internal::Tags:: + consensus_algorithm_nbx_process_deliver; + + int request_is_pending; + MPI_Status status; + const auto ierr = MPI_Iprobe( + MPI_ANY_SOURCE, tag_deliver, comm, &request_is_pending, &status); + AssertThrowMPI(ierr); + + // If there is no pending message with this tag, + // then we are clearly not done receiving everything + // yet -- so return false. + if (request_is_pending == 0) + return false; + else + { + // OK, so we have gotten a reply to our answer from + // one rank. Let us process it, after double checking + // that it is indeed one we were still expecting: + const auto target = status.MPI_SOURCE; + + // Then query the size of the message, allocate enough memory, + // receive the data, and process it. + int message_size; + { + const int ierr = + MPI_Get_count(&status, MPI_CHAR, &message_size); + AssertThrowMPI(ierr); + } + std::vector recv_buffer(message_size); + + { + const int tag_deliver = Utilities::MPI::internal::Tags:: + consensus_algorithm_nbx_process_deliver; + + const int ierr = MPI_Recv(recv_buffer.data(), + recv_buffer.size(), + MPI_CHAR, + target, + tag_deliver, + comm, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + } + + if (process_answer) + process_answer(target, + Utilities::unpack(recv_buffer, + false)); + + // Finally, remove this rank from the list of outstanding + // targets: + --n_outstanding_answers; + + // We could do another go-around from the top of this + // else-branch to see whether there are actually other messages + // that are currently pending. But that would mean spending + // substantial time in receiving answers while we should also be + // sending answers to requests we have received from other + // places. So let it be enough for now. If there are outstanding + // answers, we will get back to this function before long and + // can take care of them then. + return (n_outstanding_answers == 0); + } + } + +# else + (void)process_answer; + (void)comm; + + return true; +# endif + } + + + + template + void + NBX::maybe_answer_one_request( + const std::function + & answer_request, + const MPI_Comm &comm) + { +# ifdef DEAL_II_WITH_MPI + + const int tag_request = Utilities::MPI::internal::Tags:: + consensus_algorithm_nbx_answer_request; + const int tag_deliver = Utilities::MPI::internal::Tags:: + consensus_algorithm_nbx_process_deliver; + + // Check if there is a request pending. By selecting the + // tag_request tag, these are other processes asking for + // our own replies, not these other processes' replies + // to our own requests. + // + // There may be multiple such pending messages. We + // only answer one. + MPI_Status status; + int request_is_pending; + const auto ierr = MPI_Iprobe( + MPI_ANY_SOURCE, tag_request, comm, &request_is_pending, &status); + AssertThrowMPI(ierr); + + if (request_is_pending != 0) + { + // Get the rank of the requesting process and add it to the + // list of requesting processes (which may contain duplicates). + const auto other_rank = status.MPI_SOURCE; + + Assert(requesting_processes.find(other_rank) == + requesting_processes.end(), + ExcMessage("Process is requesting a second time!")); + requesting_processes.insert(other_rank); + + // get size of incoming message + int number_amount; + auto ierr = MPI_Get_count(&status, MPI_CHAR, &number_amount); + AssertThrowMPI(ierr); + + // allocate memory for incoming message + std::vector buffer_recv(number_amount); + ierr = MPI_Recv(buffer_recv.data(), + number_amount, + MPI_CHAR, + other_rank, + tag_request, + comm, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + // Allocate memory for an answer message to the current request, + // and ask the 'process' object to produce an answer: + request_buffers.emplace_back(std::make_unique>()); + auto &request_buffer = *request_buffers.back(); + if (answer_request) + request_buffer = + Utilities::pack(answer_request(other_rank, + Utilities::unpack( + buffer_recv, false)), + false); + + // Then initiate sending the answer back to the requester. + request_requests.emplace_back(std::make_unique()); + ierr = MPI_Isend(request_buffer.data(), + request_buffer.size(), + MPI_CHAR, + other_rank, + tag_deliver, + comm, + request_requests.back().get()); + AssertThrowMPI(ierr); + } +# else + (void)answer_request; + (void)comm; +# endif + } + + + + template + void + NBX::signal_finish(const MPI_Comm &comm) + { +# ifdef DEAL_II_WITH_MPI + const auto ierr = MPI_Ibarrier(comm, &barrier_request); + AssertThrowMPI(ierr); +# else + (void)comm; +# endif + } + + + + template + bool + NBX::all_remotely_originated_receives_are_completed() + { +# ifdef DEAL_II_WITH_MPI + int all_ranks_reached_barrier; + const auto ierr = MPI_Test(&barrier_request, + &all_ranks_reached_barrier, + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + return all_ranks_reached_barrier != 0; +# else + return true; +# endif + } + + + + template + void + NBX::clean_up_and_end_communication( + const MPI_Comm &comm) + { + (void)comm; +# ifdef DEAL_II_WITH_MPI + // clean up + { + if (send_requests.size() > 0) + { + const int ierr = MPI_Waitall(send_requests.size(), + send_requests.data(), + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } + + int ierr = MPI_Wait(&barrier_request, MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + for (auto &i : request_requests) + { + ierr = MPI_Wait(i.get(), MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + } + +# ifdef DEBUG + // note: IBarrier seems to make problem during testing, this + // additional Barrier seems to help + ierr = MPI_Barrier(comm); + AssertThrowMPI(ierr); +# endif + } +# endif + } + + + + template + PEX::PEX( + Process &process, + const MPI_Comm & comm) + : Interface(process, comm) + {} + + + + template + std::vector + PEX::run( + const std::vector & targets, + const std::function &create_request, + const std::function + &answer_request, + const std::function + & process_answer, + const MPI_Comm &comm) + { + Assert(has_unique_elements(targets), + ExcMessage("The consensus algorithms expect that each process " + "only sends a single message to another process, " + "but the targets provided include duplicates.")); + + static CollectiveMutex mutex; + CollectiveMutex::ScopedLock lock(mutex, comm); + + // 1) Send requests and start receiving the answers. + // In particular, determine how many requests we should expect + // on the current process. + const unsigned int n_requests = + start_communication(targets, create_request, comm); + + // 2) Answer requests: + for (unsigned int request = 0; request < n_requests; ++request) + answer_one_request(request, answer_request, comm); + + // 3) Process answers: + process_incoming_answers(targets.size(), process_answer, comm); + + // 4) Make sure all sends have successfully terminated: + clean_up_and_end_communication(); + + return std::vector(requesting_processes.begin(), + requesting_processes.end()); + } + + + + template + unsigned int + PEX::start_communication( + const std::vector & targets, + const std::function &create_request, + const MPI_Comm & comm) + { +# ifdef DEAL_II_WITH_MPI + const int tag_request = Utilities::MPI::internal::Tags:: + consensus_algorithm_pex_answer_request; + + // 1) determine with which processes this process wants to communicate + // with + const unsigned int n_targets = targets.size(); + + // 2) determine who wants to communicate with this process + const unsigned int n_sources = + compute_n_point_to_point_communications(comm, targets); + + // 2) allocate memory + recv_buffers.resize(n_targets); + send_buffers.resize(n_targets); + send_request_requests.resize(n_targets); + + send_answer_requests.resize(n_sources); + requests_buffers.resize(n_sources); + + // 4) send and receive + for (unsigned int i = 0; i < n_targets; ++i) + { + const unsigned int rank = targets[i]; + AssertIndexRange(rank, Utilities::MPI::n_mpi_processes(comm)); + + // pack data which should be sent + auto &send_buffer = send_buffers[i]; + if (create_request) + send_buffer = Utilities::pack(create_request(rank), false); + + // start to send data + auto ierr = MPI_Isend(send_buffer.data(), + send_buffer.size(), + MPI_CHAR, + rank, + tag_request, + comm, + &send_request_requests[i]); + AssertThrowMPI(ierr); + } + + return n_sources; +# else + (void)targets; + (void)create_request; + (void)comm; + return 0; +# endif + } + + + + template + void + PEX::answer_one_request( + const unsigned int index, + const std::function + & answer_request, + const MPI_Comm &comm) + { +# ifdef DEAL_II_WITH_MPI + const int tag_request = Utilities::MPI::internal::Tags:: + consensus_algorithm_pex_answer_request; + const int tag_deliver = Utilities::MPI::internal::Tags:: + consensus_algorithm_pex_process_deliver; + + // Wait until we have a message ready for retrieval, though we don't + // care which process it is from. + MPI_Status status; + int ierr = MPI_Probe(MPI_ANY_SOURCE, tag_request, comm, &status); + AssertThrowMPI(ierr); + + // Get rank of incoming message and verify that it makes sense + const unsigned int other_rank = status.MPI_SOURCE; + + Assert(requesting_processes.find(other_rank) == + requesting_processes.end(), + ExcMessage( + "A process is sending a request after a request from " + "the same process has previously already been " + "received. This algorithm does not expect this to happen.")); + requesting_processes.insert(other_rank); + + // Actually get the incoming message: + int number_amount; + ierr = MPI_Get_count(&status, MPI_CHAR, &number_amount); + AssertThrowMPI(ierr); + + std::vector buffer_recv(number_amount); + ierr = MPI_Recv(buffer_recv.data(), + number_amount, + MPI_CHAR, + other_rank, + tag_request, + comm, + &status); + AssertThrowMPI(ierr); + + // Process request by asking the user-provided function for + // the answer and post a send for it. + auto &request_buffer = requests_buffers[index]; + request_buffer = + (answer_request ? + Utilities::pack(answer_request(other_rank, + Utilities::unpack( + buffer_recv, false)), + false) : + std::vector()); + + ierr = MPI_Isend(request_buffer.data(), + request_buffer.size(), + MPI_CHAR, + other_rank, + tag_deliver, + comm, + &send_answer_requests[index]); + AssertThrowMPI(ierr); +# else + (void)answer_request; + (void)comm; + (void)index; +# endif + } + + + + template + void + PEX::process_incoming_answers( + const unsigned int n_targets, + const std::function + & process_answer, + const MPI_Comm &comm) + { +# ifdef DEAL_II_WITH_MPI + const int tag_deliver = Utilities::MPI::internal::Tags:: + consensus_algorithm_pex_process_deliver; + + // We know how many targets we have sent requests to. These + // targets will all eventually send us their responses, but + // we need not process them in order -- rather, just see what + // comes in and then look at message originators' ranks and + // message sizes + for (unsigned int i = 0; i < n_targets; ++i) + { + MPI_Status status; + { + const int ierr = + MPI_Probe(MPI_ANY_SOURCE, tag_deliver, comm, &status); + AssertThrowMPI(ierr); + } + + const auto other_rank = status.MPI_SOURCE; + int message_size; + { + const int ierr = MPI_Get_count(&status, MPI_CHAR, &message_size); + AssertThrowMPI(ierr); + } + std::vector recv_buffer(message_size); + + // Now actually receive the answer. Because the MPI_Probe + // above blocks until we have a message, we know that the + // following MPI_Recv call will immediately succeed. + { + const int ierr = MPI_Recv(recv_buffer.data(), + recv_buffer.size(), + MPI_CHAR, + other_rank, + tag_deliver, + comm, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + } + + if (process_answer) + process_answer(other_rank, + Utilities::unpack(recv_buffer, false)); + } +# else + (void)n_targets; + (void)process_answer; + (void)comm; +# endif + } + + + + template + void + PEX::clean_up_and_end_communication() + { +# ifdef DEAL_II_WITH_MPI + // Finalize all MPI_Request objects for both the + // send-request and receive-answer operations. + if (send_request_requests.size() > 0) + { + const int ierr = MPI_Waitall(send_request_requests.size(), + send_request_requests.data(), + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } + + // Then also check the send-answer requests. + if (send_answer_requests.size() > 0) + { + const int ierr = MPI_Waitall(send_answer_requests.size(), + send_answer_requests.data(), + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } +# endif + } + + + + template + Serial::Serial( + Process &process, + const MPI_Comm & comm) + : Interface(process, comm) + {} + + + + template + std::vector + Serial::run( + const std::vector & targets, + const std::function &create_request, + const std::function + &answer_request, + const std::function + & process_answer, + const MPI_Comm &comm) + { + (void)comm; + Assert((Utilities::MPI::job_supports_mpi() == false) || + (Utilities::MPI::n_mpi_processes(comm) == 1), + ExcMessage("You shouldn't use the 'Serial' class on " + "communicators that have more than one process " + "associated with it.")); + + // The only valid target for a serial program is itself. + if (targets.size() != 0) + { + Assert(targets.size() == 1, + ExcMessage( + "On a single process, the only valid target " + "is process zero (the process itself), which can only be " + "listed once.")); + AssertDimension(targets[0], 0); + + // Since the caller indicates that there is a target, and since we + // know that it is the current process, let the process send + // something to itself. + const RequestType request = + (create_request ? create_request(0) : RequestType()); + const AnswerType answer = + (answer_request ? answer_request(0, request) : AnswerType()); + + if (process_answer) + process_answer(0, answer); + } + + return targets; // nothing to do + } + + + + template + Selector::Selector( + Process &process, + const MPI_Comm & comm) + : Interface(process, comm) + {} + + + + template + std::vector + Selector::run( + const std::vector & targets, + const std::function &create_request, + const std::function + &answer_request, + const std::function + & process_answer, + const MPI_Comm &comm) + { + // Depending on the number of processes we switch between + // implementations. We reduce the threshold for debug mode to be + // able to test also the non-blocking implementation. This feature + // is tested by: + // tests/multigrid/transfer_matrix_free_06.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=10.output + + const unsigned int n_procs = (Utilities::MPI::job_supports_mpi() ? + Utilities::MPI::n_mpi_processes(comm) : + 1); +# ifdef DEAL_II_WITH_MPI +# ifdef DEBUG + if (n_procs > 10) +# else + if (n_procs > 99) +# endif + consensus_algo.reset(new NBX()); + else +# endif + if (n_procs > 1) + consensus_algo.reset(new PEX()); + else + consensus_algo.reset(new Serial()); + + return consensus_algo->run( + targets, create_request, answer_request, process_answer, comm); + } + + + } // namespace ConsensusAlgorithms + } // end of namespace MPI +} // end of namespace Utilities + +#endif // DOXYGEN + + DEAL_II_NAMESPACE_CLOSE #endif diff --git a/include/deal.II/base/mpi_consensus_algorithms.templates.h b/include/deal.II/base/mpi_consensus_algorithms.templates.h index 004dc90791..bdd817cff5 100644 --- a/include/deal.II/base/mpi_consensus_algorithms.templates.h +++ b/include/deal.II/base/mpi_consensus_algorithms.templates.h @@ -18,908 +18,8 @@ #include -#include -#include -#include -#include -#include +DEAL_II_WARNING( + "This file is deprecated. Simply use .") -#include -#include -#include -#include - -#include - -DEAL_II_NAMESPACE_OPEN - -namespace Utilities -{ - namespace MPI - { - namespace ConsensusAlgorithms - { - namespace - { - /** - * Return whether a vector of targets (MPI ranks) has only unique - * elements. - * - * This function is only used within assertions, which causes GCC - * to issue a warning in release mode that due to -Werror then causes an - * error. We suppress this by using the [[gnu::unused]] error (because - * the - * [[maybe_unused]] attribute is only supported from C++17 forward). - * - * Unfortunately, in contrast to what the standard says, the Microsoft - * compiler does not ignore the gnu::unused attribute as it should, - * and then produces an error of its own. So we disable the attribute - * for that compiler. - */ -#ifndef DEAL_II_MSVC - [[gnu::unused]] -#endif - bool - has_unique_elements(const std::vector &targets) - { - std::vector my_destinations = targets; - std::sort(my_destinations.begin(), my_destinations.end()); - return (std::adjacent_find(my_destinations.begin(), - my_destinations.end()) == - my_destinations.end()); - } - } // namespace - - - - template - void - Process::answer_request(const unsigned int, - const RequestType &, - AnswerType &) - { - // nothing to do - } - - - - template - void - Process::create_request(const unsigned int, - RequestType &) - { - // nothing to do - } - - - - template - void - Process::read_answer(const unsigned int, - const AnswerType &) - { - // nothing to do - } - - - - template - Interface::Interface( - Process &process, - const MPI_Comm & comm) - : process(&process) - , comm(comm) - {} - - - - template - Interface::Interface() - : process(nullptr) - , comm(MPI_COMM_NULL) - {} - - - - template - std::vector - Interface::run() - { - Assert(process != nullptr, - ExcMessage("This function can only be called if the " - "deprecated non-default constructor of this class " - "has previously been called to set the Process " - "object and a communicator.")); - return run(*process, comm); - } - - - - template - std::vector - Interface::run( - Process &process, - const MPI_Comm & comm) - { - // Unpack the 'process' object and call the function that takes - // function objects for all operations. - return run( - process.compute_targets(), - /* create_request: */ - [&process](const unsigned int target) { - RequestType request; - process.create_request(target, request); - return request; - }, - /* answer_request: */ - [&process](const unsigned int source, const RequestType &request) { - AnswerType answer; - process.answer_request(source, request, answer); - return answer; - }, - /* process_answer: */ - [&process](const unsigned int target, const AnswerType &answer) { - process.read_answer(target, answer); - }, - comm); - } - - - - template - NBX::NBX( - Process &process, - const MPI_Comm & comm) - : Interface(process, comm) - {} - - - - template - std::vector - NBX::run( - const std::vector & targets, - const std::function &create_request, - const std::function - &answer_request, - const std::function - & process_answer, - const MPI_Comm &comm) - { - Assert(has_unique_elements(targets), - ExcMessage("The consensus algorithms expect that each process " - "only sends a single message to another process, " - "but the targets provided include duplicates.")); - - static CollectiveMutex mutex; - CollectiveMutex::ScopedLock lock(mutex, comm); - - // 1) Send data to identified targets and start receiving - // the answers from these very same processes. - start_communication(targets, create_request, comm); - - // 2) Until all posted receive operations are known to have completed, - // answer requests and keep checking whether all requests of - // this process have been answered. - // - // The requests that we catch in the answer_requests() function - // originate elsewhere, that is, they are not in response - // to our own messages - // - // Note also that we may not catch all incoming requests in - // the following two lines: our own requests may have been - // satisfied before we've dealt with all incoming requests. - // That's ok: We will get around to dealing with all remaining - // message later. We just want to move on to the next step - // as early as possible. - while (all_locally_originated_receives_are_completed(process_answer, - comm) == false) - maybe_answer_one_request(answer_request, comm); - - // 3) Signal to all other processes that all requests of this process - // have been answered - signal_finish(comm); - - // 4) Nevertheless, this process has to keep on answering (potential) - // incoming requests until all processes have received the - // answer to all requests - while (all_remotely_originated_receives_are_completed() == false) - maybe_answer_one_request(answer_request, comm); - - // 5) process the answer to all requests - clean_up_and_end_communication(comm); - - return std::vector(requesting_processes.begin(), - requesting_processes.end()); - } - - - - template - void - NBX::start_communication( - const std::vector & targets, - const std::function &create_request, - const MPI_Comm & comm) - { -#ifdef DEAL_II_WITH_MPI - // 1) - const auto n_targets = targets.size(); - - const int tag_request = Utilities::MPI::internal::Tags:: - consensus_algorithm_nbx_answer_request; - - // 2) allocate memory - send_requests.resize(n_targets); - send_buffers.resize(n_targets); - - { - // 4) send and receive - for (unsigned int index = 0; index < n_targets; ++index) - { - const unsigned int rank = targets[index]; - AssertIndexRange(rank, Utilities::MPI::n_mpi_processes(comm)); - - auto &send_buffer = send_buffers[index]; - send_buffer = - (create_request ? Utilities::pack(create_request(rank), false) : - std::vector()); - - // Post a request to send data - auto ierr = MPI_Isend(send_buffer.data(), - send_buffer.size(), - MPI_CHAR, - rank, - tag_request, - comm, - &send_requests[index]); - AssertThrowMPI(ierr); - } - - // Also record that we expect an answer from each target we sent - // a request to: - n_outstanding_answers = n_targets; - } -#else - (void)targets; - (void)create_request; - (void)comm; -#endif - } - - - - template - bool - NBX:: - all_locally_originated_receives_are_completed( - const std::function - & process_answer, - const MPI_Comm &comm) - { -#ifdef DEAL_II_WITH_MPI - // We know that all requests have come in when we have pending - // messages from all targets with the right tag (some of which we may - // have already taken care of below, after discovering their existence). - // We can check for pending messages with MPI_IProbe, which returns - // immediately with a return code that indicates whether - // it has found a message from any process with a given - // tag. - if (n_outstanding_answers == 0) - return true; - else - { - const int tag_deliver = Utilities::MPI::internal::Tags:: - consensus_algorithm_nbx_process_deliver; - - int request_is_pending; - MPI_Status status; - const auto ierr = MPI_Iprobe( - MPI_ANY_SOURCE, tag_deliver, comm, &request_is_pending, &status); - AssertThrowMPI(ierr); - - // If there is no pending message with this tag, - // then we are clearly not done receiving everything - // yet -- so return false. - if (request_is_pending == 0) - return false; - else - { - // OK, so we have gotten a reply to our answer from - // one rank. Let us process it, after double checking - // that it is indeed one we were still expecting: - const auto target = status.MPI_SOURCE; - - // Then query the size of the message, allocate enough memory, - // receive the data, and process it. - int message_size; - { - const int ierr = - MPI_Get_count(&status, MPI_CHAR, &message_size); - AssertThrowMPI(ierr); - } - std::vector recv_buffer(message_size); - - { - const int tag_deliver = Utilities::MPI::internal::Tags:: - consensus_algorithm_nbx_process_deliver; - - const int ierr = MPI_Recv(recv_buffer.data(), - recv_buffer.size(), - MPI_CHAR, - target, - tag_deliver, - comm, - MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); - } - - if (process_answer) - process_answer(target, - Utilities::unpack(recv_buffer, - false)); - - // Finally, remove this rank from the list of outstanding - // targets: - --n_outstanding_answers; - - // We could do another go-around from the top of this - // else-branch to see whether there are actually other messages - // that are currently pending. But that would mean spending - // substantial time in receiving answers while we should also be - // sending answers to requests we have received from other - // places. So let it be enough for now. If there are outstanding - // answers, we will get back to this function before long and - // can take care of them then. - return (n_outstanding_answers == 0); - } - } - -#else - (void)process_answer; - (void)comm; - - return true; -#endif - } - - - - template - void - NBX::maybe_answer_one_request( - const std::function - & answer_request, - const MPI_Comm &comm) - { -#ifdef DEAL_II_WITH_MPI - - const int tag_request = Utilities::MPI::internal::Tags:: - consensus_algorithm_nbx_answer_request; - const int tag_deliver = Utilities::MPI::internal::Tags:: - consensus_algorithm_nbx_process_deliver; - - // Check if there is a request pending. By selecting the - // tag_request tag, these are other processes asking for - // our own replies, not these other processes' replies - // to our own requests. - // - // There may be multiple such pending messages. We - // only answer one. - MPI_Status status; - int request_is_pending; - const auto ierr = MPI_Iprobe( - MPI_ANY_SOURCE, tag_request, comm, &request_is_pending, &status); - AssertThrowMPI(ierr); - - if (request_is_pending != 0) - { - // Get the rank of the requesting process and add it to the - // list of requesting processes (which may contain duplicates). - const auto other_rank = status.MPI_SOURCE; - - Assert(requesting_processes.find(other_rank) == - requesting_processes.end(), - ExcMessage("Process is requesting a second time!")); - requesting_processes.insert(other_rank); - - // get size of incoming message - int number_amount; - auto ierr = MPI_Get_count(&status, MPI_CHAR, &number_amount); - AssertThrowMPI(ierr); - - // allocate memory for incoming message - std::vector buffer_recv(number_amount); - ierr = MPI_Recv(buffer_recv.data(), - number_amount, - MPI_CHAR, - other_rank, - tag_request, - comm, - MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); - - // Allocate memory for an answer message to the current request, - // and ask the 'process' object to produce an answer: - request_buffers.emplace_back(std::make_unique>()); - auto &request_buffer = *request_buffers.back(); - if (answer_request) - request_buffer = - Utilities::pack(answer_request(other_rank, - Utilities::unpack( - buffer_recv, false)), - false); - - // Then initiate sending the answer back to the requester. - request_requests.emplace_back(std::make_unique()); - ierr = MPI_Isend(request_buffer.data(), - request_buffer.size(), - MPI_CHAR, - other_rank, - tag_deliver, - comm, - request_requests.back().get()); - AssertThrowMPI(ierr); - } -#else - (void)answer_request; - (void)comm; -#endif - } - - - - template - void - NBX::signal_finish(const MPI_Comm &comm) - { -#ifdef DEAL_II_WITH_MPI - const auto ierr = MPI_Ibarrier(comm, &barrier_request); - AssertThrowMPI(ierr); -#else - (void)comm; -#endif - } - - - - template - bool - NBX::all_remotely_originated_receives_are_completed() - { -#ifdef DEAL_II_WITH_MPI - int all_ranks_reached_barrier; - const auto ierr = MPI_Test(&barrier_request, - &all_ranks_reached_barrier, - MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); - return all_ranks_reached_barrier != 0; -#else - return true; -#endif - } - - - - template - void - NBX::clean_up_and_end_communication( - const MPI_Comm &comm) - { - (void)comm; -#ifdef DEAL_II_WITH_MPI - // clean up - { - if (send_requests.size() > 0) - { - const int ierr = MPI_Waitall(send_requests.size(), - send_requests.data(), - MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); - } - - int ierr = MPI_Wait(&barrier_request, MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); - - for (auto &i : request_requests) - { - ierr = MPI_Wait(i.get(), MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); - } - -# ifdef DEBUG - // note: IBarrier seems to make problem during testing, this - // additional Barrier seems to help - ierr = MPI_Barrier(comm); - AssertThrowMPI(ierr); -# endif - } -#endif - } - - - - template - PEX::PEX( - Process &process, - const MPI_Comm & comm) - : Interface(process, comm) - {} - - - - template - std::vector - PEX::run( - const std::vector & targets, - const std::function &create_request, - const std::function - &answer_request, - const std::function - & process_answer, - const MPI_Comm &comm) - { - Assert(has_unique_elements(targets), - ExcMessage("The consensus algorithms expect that each process " - "only sends a single message to another process, " - "but the targets provided include duplicates.")); - - static CollectiveMutex mutex; - CollectiveMutex::ScopedLock lock(mutex, comm); - - // 1) Send requests and start receiving the answers. - // In particular, determine how many requests we should expect - // on the current process. - const unsigned int n_requests = - start_communication(targets, create_request, comm); - - // 2) Answer requests: - for (unsigned int request = 0; request < n_requests; ++request) - answer_one_request(request, answer_request, comm); - - // 3) Process answers: - process_incoming_answers(targets.size(), process_answer, comm); - - // 4) Make sure all sends have successfully terminated: - clean_up_and_end_communication(); - - return std::vector(requesting_processes.begin(), - requesting_processes.end()); - } - - - - template - unsigned int - PEX::start_communication( - const std::vector & targets, - const std::function &create_request, - const MPI_Comm & comm) - { -#ifdef DEAL_II_WITH_MPI - const int tag_request = Utilities::MPI::internal::Tags:: - consensus_algorithm_pex_answer_request; - - // 1) determine with which processes this process wants to communicate - // with - const unsigned int n_targets = targets.size(); - - // 2) determine who wants to communicate with this process - const unsigned int n_sources = - compute_n_point_to_point_communications(comm, targets); - - // 2) allocate memory - recv_buffers.resize(n_targets); - send_buffers.resize(n_targets); - send_request_requests.resize(n_targets); - - send_answer_requests.resize(n_sources); - requests_buffers.resize(n_sources); - - // 4) send and receive - for (unsigned int i = 0; i < n_targets; ++i) - { - const unsigned int rank = targets[i]; - AssertIndexRange(rank, Utilities::MPI::n_mpi_processes(comm)); - - // pack data which should be sent - auto &send_buffer = send_buffers[i]; - if (create_request) - send_buffer = Utilities::pack(create_request(rank), false); - - // start to send data - auto ierr = MPI_Isend(send_buffer.data(), - send_buffer.size(), - MPI_CHAR, - rank, - tag_request, - comm, - &send_request_requests[i]); - AssertThrowMPI(ierr); - } - - return n_sources; -#else - (void)targets; - (void)create_request; - (void)comm; - return 0; -#endif - } - - - - template - void - PEX::answer_one_request( - const unsigned int index, - const std::function - & answer_request, - const MPI_Comm &comm) - { -#ifdef DEAL_II_WITH_MPI - const int tag_request = Utilities::MPI::internal::Tags:: - consensus_algorithm_pex_answer_request; - const int tag_deliver = Utilities::MPI::internal::Tags:: - consensus_algorithm_pex_process_deliver; - - // Wait until we have a message ready for retrieval, though we don't - // care which process it is from. - MPI_Status status; - int ierr = MPI_Probe(MPI_ANY_SOURCE, tag_request, comm, &status); - AssertThrowMPI(ierr); - - // Get rank of incoming message and verify that it makes sense - const unsigned int other_rank = status.MPI_SOURCE; - - Assert(requesting_processes.find(other_rank) == - requesting_processes.end(), - ExcMessage( - "A process is sending a request after a request from " - "the same process has previously already been " - "received. This algorithm does not expect this to happen.")); - requesting_processes.insert(other_rank); - - // Actually get the incoming message: - int number_amount; - ierr = MPI_Get_count(&status, MPI_CHAR, &number_amount); - AssertThrowMPI(ierr); - - std::vector buffer_recv(number_amount); - ierr = MPI_Recv(buffer_recv.data(), - number_amount, - MPI_CHAR, - other_rank, - tag_request, - comm, - &status); - AssertThrowMPI(ierr); - - // Process request by asking the user-provided function for - // the answer and post a send for it. - auto &request_buffer = requests_buffers[index]; - request_buffer = - (answer_request ? - Utilities::pack(answer_request(other_rank, - Utilities::unpack( - buffer_recv, false)), - false) : - std::vector()); - - ierr = MPI_Isend(request_buffer.data(), - request_buffer.size(), - MPI_CHAR, - other_rank, - tag_deliver, - comm, - &send_answer_requests[index]); - AssertThrowMPI(ierr); -#else - (void)answer_request; - (void)comm; - (void)index; -#endif - } - - - - template - void - PEX::process_incoming_answers( - const unsigned int n_targets, - const std::function - & process_answer, - const MPI_Comm &comm) - { -#ifdef DEAL_II_WITH_MPI - const int tag_deliver = Utilities::MPI::internal::Tags:: - consensus_algorithm_pex_process_deliver; - - // We know how many targets we have sent requests to. These - // targets will all eventually send us their responses, but - // we need not process them in order -- rather, just see what - // comes in and then look at message originators' ranks and - // message sizes - for (unsigned int i = 0; i < n_targets; ++i) - { - MPI_Status status; - { - const int ierr = - MPI_Probe(MPI_ANY_SOURCE, tag_deliver, comm, &status); - AssertThrowMPI(ierr); - } - - const auto other_rank = status.MPI_SOURCE; - int message_size; - { - const int ierr = MPI_Get_count(&status, MPI_CHAR, &message_size); - AssertThrowMPI(ierr); - } - std::vector recv_buffer(message_size); - - // Now actually receive the answer. Because the MPI_Probe - // above blocks until we have a message, we know that the - // following MPI_Recv call will immediately succeed. - { - const int ierr = MPI_Recv(recv_buffer.data(), - recv_buffer.size(), - MPI_CHAR, - other_rank, - tag_deliver, - comm, - MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); - } - - if (process_answer) - process_answer(other_rank, - Utilities::unpack(recv_buffer, false)); - } -#else - (void)n_targets; - (void)process_answer; - (void)comm; -#endif - } - - - - template - void - PEX::clean_up_and_end_communication() - { -#ifdef DEAL_II_WITH_MPI - // Finalize all MPI_Request objects for both the - // send-request and receive-answer operations. - if (send_request_requests.size() > 0) - { - const int ierr = MPI_Waitall(send_request_requests.size(), - send_request_requests.data(), - MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); - } - - // Then also check the send-answer requests. - if (send_answer_requests.size() > 0) - { - const int ierr = MPI_Waitall(send_answer_requests.size(), - send_answer_requests.data(), - MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); - } -#endif - } - - - - template - Serial::Serial( - Process &process, - const MPI_Comm & comm) - : Interface(process, comm) - {} - - - - template - std::vector - Serial::run( - const std::vector & targets, - const std::function &create_request, - const std::function - &answer_request, - const std::function - & process_answer, - const MPI_Comm &comm) - { - (void)comm; - Assert((Utilities::MPI::job_supports_mpi() == false) || - (Utilities::MPI::n_mpi_processes(comm) == 1), - ExcMessage("You shouldn't use the 'Serial' class on " - "communicators that have more than one process " - "associated with it.")); - - // The only valid target for a serial program is itself. - if (targets.size() != 0) - { - Assert(targets.size() == 1, - ExcMessage( - "On a single process, the only valid target " - "is process zero (the process itself), which can only be " - "listed once.")); - AssertDimension(targets[0], 0); - - // Since the caller indicates that there is a target, and since we - // know that it is the current process, let the process send - // something to itself. - const RequestType request = - (create_request ? create_request(0) : RequestType()); - const AnswerType answer = - (answer_request ? answer_request(0, request) : AnswerType()); - - if (process_answer) - process_answer(0, answer); - } - - return targets; // nothing to do - } - - - - template - Selector::Selector( - Process &process, - const MPI_Comm & comm) - : Interface(process, comm) - {} - - - - template - std::vector - Selector::run( - const std::vector & targets, - const std::function &create_request, - const std::function - &answer_request, - const std::function - & process_answer, - const MPI_Comm &comm) - { - // Depending on the number of processes we switch between - // implementations. We reduce the threshold for debug mode to be - // able to test also the non-blocking implementation. This feature - // is tested by: - // tests/multigrid/transfer_matrix_free_06.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=10.output - - const unsigned int n_procs = (Utilities::MPI::job_supports_mpi() ? - Utilities::MPI::n_mpi_processes(comm) : - 1); -#ifdef DEAL_II_WITH_MPI -# ifdef DEBUG - if (n_procs > 10) -# else - if (n_procs > 99) -# endif - consensus_algo.reset(new NBX()); - else -#endif - if (n_procs > 1) - consensus_algo.reset(new PEX()); - else - consensus_algo.reset(new Serial()); - - return consensus_algo->run( - targets, create_request, answer_request, process_answer, comm); - } - - - } // namespace ConsensusAlgorithms - } // end of namespace MPI -} // end of namespace Utilities - - -DEAL_II_NAMESPACE_CLOSE #endif diff --git a/include/deal.II/fe/fe_tools_extrapolate.templates.h b/include/deal.II/fe/fe_tools_extrapolate.templates.h index 7106bb5b1e..3688b3a694 100644 --- a/include/deal.II/fe/fe_tools_extrapolate.templates.h +++ b/include/deal.II/fe/fe_tools_extrapolate.templates.h @@ -20,7 +20,6 @@ #include #include -#include #include #include diff --git a/source/base/CMakeLists.txt b/source/base/CMakeLists.txt index 07e6d9c009..6ce9596c4a 100644 --- a/source/base/CMakeLists.txt +++ b/source/base/CMakeLists.txt @@ -50,7 +50,6 @@ SET(_unity_include_src hdf5.cc mpi.cc mpi_compute_index_owner_internal.cc - mpi_consensus_algorithms.cc mpi_noncontiguous_partitioner.cc mpi_remote_point_evaluation.cc mu_parser_internal.cc diff --git a/source/base/mpi.cc b/source/base/mpi.cc index fc4c154000..6973cacf6b 100644 --- a/source/base/mpi.cc +++ b/source/base/mpi.cc @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include diff --git a/source/base/mpi_compute_index_owner_internal.cc b/source/base/mpi_compute_index_owner_internal.cc index 92b82a3dd8..40efac7c79 100644 --- a/source/base/mpi_compute_index_owner_internal.cc +++ b/source/base/mpi_compute_index_owner_internal.cc @@ -18,6 +18,8 @@ #include #include +#include + DEAL_II_NAMESPACE_OPEN namespace Utilities diff --git a/source/base/mpi_consensus_algorithms.cc b/source/base/mpi_consensus_algorithms.cc deleted file mode 100644 index 86acf360f0..0000000000 --- a/source/base/mpi_consensus_algorithms.cc +++ /dev/null @@ -1,106 +0,0 @@ -// --------------------------------------------------------------------- -// -// Copyright (C) 2005 - 2022 by the deal.II authors -// -// This file is part of the deal.II library. -// -// The deal.II library is free software; you can use it, redistribute -// it, and/or modify it under the terms of the GNU Lesser General -// Public License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// The full text of the license can be found in the file LICENSE.md at -// the top level directory of deal.II. -// -// --------------------------------------------------------------------- - -#include - -#include - - -DEAL_II_NAMESPACE_OPEN - -namespace Utilities -{ - namespace MPI - { - namespace ConsensusAlgorithms - { - template class Process, - std::vector>; - - template class Interface, - std::vector>; - - template class NBX, std::vector>; - - template class PEX, std::vector>; - - template class Serial, - std::vector>; - - template class Selector, - std::vector>; - - - template class Process>, - std::vector>; - - template class Interface>, - std::vector>; - - template class Selector>, - std::vector>; - - template class NBX>, - std::vector>; - - template class Serial>, - std::vector>; - - template class PEX>, - std::vector>; - -#ifdef DEAL_II_WITH_64BIT_INDICES - template class Process, - std::vector>; - - template class Interface, - std::vector>; - - template class NBX, - std::vector>; - - template class Serial, - std::vector>; - - template class PEX, - std::vector>; - - template class Selector, - std::vector>; -#endif - - template class Process, std::vector>; - - template class Interface, std::vector>; - - template class NBX, std::vector>; - - template class PEX, std::vector>; - - template class Serial, std::vector>; - - template class Selector, std::vector>; - - } // namespace ConsensusAlgorithms - } // end of namespace MPI -} // end of namespace Utilities - -DEAL_II_NAMESPACE_CLOSE diff --git a/source/base/mpi_noncontiguous_partitioner.cc b/source/base/mpi_noncontiguous_partitioner.cc index 73f7e67bc9..3ae4c9c51f 100644 --- a/source/base/mpi_noncontiguous_partitioner.cc +++ b/source/base/mpi_noncontiguous_partitioner.cc @@ -20,6 +20,9 @@ #include #include +#include + + DEAL_II_NAMESPACE_OPEN namespace Utilities diff --git a/source/base/mpi_remote_point_evaluation.cc b/source/base/mpi_remote_point_evaluation.cc index 125777ebb4..c5c7a5b759 100644 --- a/source/base/mpi_remote_point_evaluation.cc +++ b/source/base/mpi_remote_point_evaluation.cc @@ -17,7 +17,6 @@ #include #include -#include #include #include diff --git a/source/base/partitioner.cc b/source/base/partitioner.cc index c79b6de336..d6c12c07df 100644 --- a/source/base/partitioner.cc +++ b/source/base/partitioner.cc @@ -17,6 +17,9 @@ #include #include +#include + + DEAL_II_NAMESPACE_OPEN namespace Utilities diff --git a/source/grid/grid_tools.cc b/source/grid/grid_tools.cc index d15f5b4c11..d8c3279452 100644 --- a/source/grid/grid_tools.cc +++ b/source/grid/grid_tools.cc @@ -16,7 +16,6 @@ #include #include #include -#include #include #include diff --git a/source/grid/tria_description.cc b/source/grid/tria_description.cc index c1ae201b2a..bdeaec046b 100644 --- a/source/grid/tria_description.cc +++ b/source/grid/tria_description.cc @@ -16,7 +16,6 @@ #include #include #include -#include #include #include diff --git a/source/matrix_free/vector_data_exchange.cc b/source/matrix_free/vector_data_exchange.cc index 7a761991f0..4c9745516d 100644 --- a/source/matrix_free/vector_data_exchange.cc +++ b/source/matrix_free/vector_data_exchange.cc @@ -22,11 +22,7 @@ #include -#ifdef DEAL_II_WITH_64BIT_INDICES -# include - -# include -#endif +#include #include #include diff --git a/source/multigrid/mg_tools.cc b/source/multigrid/mg_tools.cc index b89c744e3b..9737241408 100644 --- a/source/multigrid/mg_tools.cc +++ b/source/multigrid/mg_tools.cc @@ -16,7 +16,6 @@ #include #include #include -#include #include #include -- 2.39.5