From: Peter Munch Date: Mon, 1 May 2023 13:07:58 +0000 (+0200) Subject: RPE::evaluate_and_process(): pack only if needed X-Git-Tag: v9.6.0-rc1~188^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3915bc38424757308f23dc5652914da2aa523367;p=dealii.git RPE::evaluate_and_process(): pack only if needed --- diff --git a/include/deal.II/base/mpi_remote_point_evaluation.h b/include/deal.II/base/mpi_remote_point_evaluation.h index 9691d4349d..f1b3c7da48 100644 --- a/include/deal.II/base/mpi_remote_point_evaluation.h +++ b/include/deal.II/base/mpi_remote_point_evaluation.h @@ -272,24 +272,32 @@ namespace Utilities * * @warning This is a collective call that needs to be executed by all * processors in the communicator. + * + * @note This function can handle arbitrary types including scalar and + * Tensor objects. In the case that scalar types are used, one can + * specify the number of components @p n_components. This allows to + * provide unrolled tensors, which is useful, e.g., if its dimension + * and its rank is not known at compile time. */ - template + template void evaluate_and_process( std::vector &output, std::vector &buffer, const std::function &, const CellData &)> - &evaluation_function) const; + &evaluation_function, + const bool sort_data = true) const; /** * Same as above but with the result provided as return value and * without external allocation of a user-provided buffer. */ - template + template std::vector evaluate_and_process( const std::function &, const CellData &)> - &evaluation_function) const; + &evaluation_function, + const bool sort_data = true) const; /** * This method is the inverse of the method evaluate_and_process(). It @@ -298,25 +306,33 @@ namespace Utilities * * @warning This is a collective call that needs to be executed by all * processors in the communicator. + * + * @note This function can handle arbitrary types including scalar and + * Tensor objects. In the case that scalar types are used, one can + * specify the number of components @p n_components. This allows to + * provide unrolled tensors, which is useful, e.g., if its dimension + * and its rank is not known at compile time. */ - template + template void process_and_evaluate( const std::vector &input, std::vector &buffer, const std::function &, const CellData &)> - &evaluation_function) const; + &evaluation_function, + const bool sort_data = true) const; /** * Same as above but without external allocation of a user-provided * buffer. */ - template + template void process_and_evaluate( const std::vector &input, const std::function &, const CellData &)> - &evaluation_function) const; + &evaluation_function, + const bool sort_data = true) const; /** * Return a CRS-like data structure to determine the position of the @@ -366,6 +382,24 @@ namespace Utilities bool is_ready() const; + /** + * Return permutation needed for sending. If this is applied, evaluation + * results do not have to be sorted according to the receiving ranks + * during evaluate_and_process(). + */ + const std::vector & + get_send_permutation() const; + + /** + * Data is contiguous rank by rank directly after MPI communication. + * If no sorting is requested during evaluate_and_process(), + * this permutation allows to access all values corresponding to an + * evaluation point (sorted into different cells). + */ + const std::vector & + get_inverse_recv_permutation() const; + + private: /** * Additional data with basic settings. @@ -416,6 +450,11 @@ namespace Utilities */ std::vector recv_permutation; + /** + * Inverse of permutation index within a recv buffer. + */ + std::vector recv_permutation_inv; + /** * Pointers of ranges within a receive buffer that are filled by ranks * specified by recv_ranks. @@ -438,6 +477,11 @@ namespace Utilities */ std::vector send_permutation; + /** + * Inverse of permutation index within a send buffer. + */ + std::vector send_permutation_inv; + /** * Ranks to send to. */ @@ -448,8 +492,184 @@ namespace Utilities * specified by send_ranks. */ std::vector send_ptrs; + + /** + * Buffer size (if internal sorting is requested) determined as max of: + * + * needed during evaluate_and_process() + * - point data (sorted according to cells) -> input from cell loop + * - point data (sorted according to ranks) -> to be sent + * - memory for receiving data (by one process at a time) + * + * needed during process_and_evaluate() + * - point data (sorted according to cells) -> output to cell loop + * - point data (sorted according to ranks) -> to be sent + * - memory for receiving data (by one process at a time) + */ + unsigned int buffer_size_with_sorting; + + /** + * Buffer size (if sorting is not requested); corresponds to the + * number of evaluation points. + */ + unsigned int buffer_size_without_sorting; }; + namespace internal + { +#ifdef DEAL_II_WITH_MPI + /** + * Pack @p data and send it via MPI_Isend. + */ + template + std::enable_if_t == false, void> + pack_and_isend(const ArrayView &data, + const unsigned int rank, + const unsigned int tag, + const MPI_Comm comm, + std::vector> &buffers, + std::vector &requests) + { + requests.emplace_back(MPI_Request()); + + buffers.emplace_back(Utilities::pack( + std::vector(data.data(), data.data() + data.size()), false)); + + const int ierr = MPI_Isend(buffers.back().data(), + buffers.back().size(), + MPI_CHAR, + rank, + tag, + comm, + &requests.back()); + AssertThrowMPI(ierr); + } + + + + /** + * Above function specialized for data types supported by MPI + * so that one can skip packing. + */ + template + std::enable_if_t == true, void> + pack_and_isend(const ArrayView &data, + const unsigned int rank, + const unsigned int tag, + const MPI_Comm comm, + std::vector> & /*buffers*/, + std::vector &requests) + { + requests.emplace_back(MPI_Request()); + + const int ierr = MPI_Isend(data.data(), + data.size(), + Utilities::MPI::mpi_type_id_for_type, + rank, + tag, + comm, + &requests.back()); + AssertThrowMPI(ierr); + } + + + + /** + * Above function specialized for Tenors objects. The underlying data type + * might be supported by MPI so that one can skip packing. + */ + template + std::enable_if_t == true, void> + pack_and_isend(const ArrayView> &data, + const unsigned int rank, + const unsigned int tag, + const MPI_Comm comm, + std::vector> &buffers, + std::vector &requests) + { + ArrayView data_(reinterpret_cast(data.data()), + data.size() * Utilities::pow(dim, rank_)); + + pack_and_isend(data_, rank, tag, comm, buffers, requests); + } + + + /** + * Receive message, unpack it, and store the result in @p data. + */ + template + std::enable_if_t == false, void> + recv_and_unpack(const ArrayView &data, + const MPI_Comm comm, + const MPI_Status &status, + std::vector &buffer) + { + int message_length; + int ierr = MPI_Get_count(&status, MPI_CHAR, &message_length); + AssertThrowMPI(ierr); + + buffer.resize(message_length); + + ierr = MPI_Recv(buffer.data(), + buffer.size(), + MPI_CHAR, + status.MPI_SOURCE, + internal::Tags::remote_point_evaluation, + comm, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + // unpack data + const auto temp = Utilities::unpack>(buffer, false); + + for (unsigned int i = 0; i < data.size(); ++i) + data[i] = temp[i]; + } + + + + /** + * Above function specialized for data types supported by MPI + * so that one can skip unpacking. + */ + template + std::enable_if_t == true, void> + recv_and_unpack(const ArrayView &data, + const MPI_Comm comm, + const MPI_Status &status, + std::vector & /*buffer*/) + { + const auto ierr = MPI_Recv(data.data(), + data.size(), + Utilities::MPI::mpi_type_id_for_type, + status.MPI_SOURCE, + internal::Tags::remote_point_evaluation, + comm, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + } + + + + /** + * Above function specialized for Tensor objects. The underlying data type + * might be supported by MPI so that one can skip unpacking. + */ + template + std::enable_if_t == true, void> + recv_and_unpack(const ArrayView> &data, + const MPI_Comm comm, + const MPI_Status &status, + std::vector &buffer) + { + const ArrayView data_(reinterpret_cast(data.data()), + data.size() * Utilities::pow(dim, rank_)); + + recv_and_unpack(data_, comm, status, buffer); + } +#endif + } // namespace internal + template @@ -467,19 +687,21 @@ namespace Utilities template - template + template void RemotePointEvaluation::evaluate_and_process( std::vector &output, std::vector &buffer, const std::function &, const CellData &)> - &evaluation_function) const + &evaluation_function, + const bool sort_data) const { #ifndef DEAL_II_WITH_MPI Assert(false, ExcNeedsMPI()); (void)output; (void)buffer; (void)evaluation_function; + (void)sort_data; #else static CollectiveMutex mutex; CollectiveMutex::ScopedLock lock(mutex, tria->get_communicator()); @@ -488,57 +710,75 @@ namespace Utilities Utilities::MPI::this_mpi_process(tria->get_communicator()); // allocate memory for output and buffer - output.resize(point_ptrs.back()); - buffer.resize(std::max(send_permutation.size() * 2, - point_ptrs.back() + send_permutation.size())); + output.resize(point_ptrs.back() * n_components); + + buffer.resize( + (sort_data ? buffer_size_with_sorting : buffer_size_without_sorting) * + n_components); // ... for evaluation - ArrayView buffer_eval(buffer.data(), send_permutation.size()); + ArrayView buffer_eval(buffer.data(), + send_permutation.size() * n_components); - // ... for communication - ArrayView buffer_comm(buffer.data() + send_permutation.size(), - send_permutation.size()); + // ... for communication (send) + ArrayView buffer_send(sort_data ? + (buffer.data() + + send_permutation.size() * n_components) : + buffer.data(), + send_permutation.size() * n_components); + + // more arrays + std::vector send_requests; + std::vector> send_buffers_packed; + std::vector recv_buffer_packed; // evaluate functions at points evaluation_function(buffer_eval, *cell_data); - // sort for communication - const auto my_rank_local_recv_ptr = - std::find(recv_ranks.begin(), recv_ranks.end(), my_rank); - - if (my_rank_local_recv_ptr != recv_ranks.end()) + // sort for communication (optional) + if (sort_data) { - const unsigned int my_rank_local_recv = - std::distance(recv_ranks.begin(), my_rank_local_recv_ptr); - const unsigned int my_rank_local_send = std::distance( - send_ranks.begin(), - std::find(send_ranks.begin(), send_ranks.end(), my_rank)); - const unsigned int start = send_ptrs[my_rank_local_send]; - const unsigned int end = send_ptrs[my_rank_local_send + 1]; - const unsigned int *recv_ptr = - recv_permutation.data() + recv_ptrs[my_rank_local_recv]; - for (unsigned int i = 0; i < send_permutation.size(); ++i) - { - const unsigned int send_index = send_permutation[i]; + const auto my_rank_local_recv_ptr = + std::find(recv_ranks.begin(), recv_ranks.end(), my_rank); - // local data -> can be copied to output directly - if (start <= send_index && send_index < end) - output[recv_ptr[send_index - start]] = buffer_eval[i]; - else // data to be sent - buffer_comm[send_index] = buffer_eval[i]; + if (my_rank_local_recv_ptr != recv_ranks.end()) + { + const unsigned int my_rank_local_recv = + std::distance(recv_ranks.begin(), my_rank_local_recv_ptr); + const unsigned int my_rank_local_send = std::distance( + send_ranks.begin(), + std::find(send_ranks.begin(), send_ranks.end(), my_rank)); + const unsigned int start = send_ptrs[my_rank_local_send]; + const unsigned int end = send_ptrs[my_rank_local_send + 1]; + const unsigned int *recv_ptr = + recv_permutation.data() + recv_ptrs[my_rank_local_recv]; + for (unsigned int i = 0; i < send_permutation.size(); ++i) + { + const unsigned int send_index = send_permutation[i]; + + if (start <= send_index && send_index < end) + // local data -> can be copied to output directly + for (unsigned int c = 0; c < n_components; ++c) + output[recv_ptr[send_index - start] * n_components + c] = + buffer_eval[i * n_components + c]; + else + // data to be sent + for (unsigned int c = 0; c < n_components; ++c) + buffer_send[send_index * n_components + c] = + buffer_eval[i * n_components + c]; + } + } + else + { + for (unsigned int i = 0; i < send_permutation.size(); ++i) + for (unsigned int c = 0; c < n_components; ++c) + buffer_send[send_permutation[i] * n_components + c] = + buffer_eval[i * n_components + c]; } - } - else - { - for (unsigned int i = 0; i < send_permutation.size(); ++i) - buffer_comm[send_permutation[i]] = buffer_eval[i]; } // send data - std::vector> send_buffer; - send_buffer.reserve(send_ranks.size()); - - std::vector send_requests; + send_buffers_packed.reserve(send_ranks.size()); send_requests.reserve(send_ranks.size()); for (unsigned int i = 0; i < send_ranks.size(); ++i) @@ -546,26 +786,42 @@ namespace Utilities if (send_ranks[i] == my_rank) continue; - send_requests.emplace_back(MPI_Request()); + internal::pack_and_isend( + ArrayView( + buffer_send.begin() + send_ptrs[i] * n_components, + (send_ptrs[i + 1] - send_ptrs[i]) * n_components), + send_ranks[i], + internal::Tags::remote_point_evaluation, + tria->get_communicator(), + send_buffers_packed, + send_requests); + } - send_buffer.emplace_back(Utilities::pack( - std::vector(buffer_comm.begin() + send_ptrs[i], - buffer_comm.begin() + send_ptrs[i + 1]), - false)); + // copy local data directly to output if no sorting is requested + if (!sort_data) + { + const auto my_rank_local_recv_ptr = + std::find(recv_ranks.begin(), recv_ranks.end(), my_rank); - const int ierr = MPI_Isend(send_buffer.back().data(), - send_buffer.back().size(), - MPI_CHAR, - send_ranks[i], - internal::Tags::remote_point_evaluation, - tria->get_communicator(), - &send_requests.back()); - AssertThrowMPI(ierr); + if (my_rank_local_recv_ptr != recv_ranks.end()) + { + const unsigned int my_rank_local_recv = + std::distance(recv_ranks.begin(), my_rank_local_recv_ptr); + const unsigned int my_rank_local_send = std::distance( + send_ranks.begin(), + std::find(send_ranks.begin(), send_ranks.end(), my_rank)); + + for (unsigned int j = recv_ptrs[my_rank_local_recv], + k = send_ptrs[my_rank_local_send]; + j < recv_ptrs[my_rank_local_recv + 1]; + ++j, ++k) + for (unsigned int c = 0; c < n_components; ++c) + output[j * n_components + c] = + buffer_eval[k * n_components + c]; + } } // receive data - std::vector buffer_char; - for (unsigned int i = 0; i < recv_ranks.size(); ++i) { if (recv_ranks[i] == my_rank) @@ -579,26 +835,6 @@ namespace Utilities &status); AssertThrowMPI(ierr); - int message_length; - ierr = MPI_Get_count(&status, MPI_CHAR, &message_length); - AssertThrowMPI(ierr); - - buffer_char.resize(message_length); - - ierr = MPI_Recv(buffer_char.data(), - buffer_char.size(), - MPI_CHAR, - status.MPI_SOURCE, - internal::Tags::remote_point_evaluation, - tria->get_communicator(), - MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); - - // unpack data - const auto buffer = - Utilities::unpack>(buffer_char, false); - - // write data into output vector const auto ptr = std::find(recv_ranks.begin(), recv_ranks.end(), status.MPI_SOURCE); @@ -606,11 +842,27 @@ namespace Utilities const unsigned int j = std::distance(recv_ranks.begin(), ptr); - AssertDimension(buffer.size(), recv_ptrs[j + 1] - recv_ptrs[j]); + // ... for communication (recv) + ArrayView recv_buffer( + sort_data ? + (buffer.data() + send_permutation.size() * 2 * n_components) : + (output.data() + recv_ptrs[j] * n_components), + (recv_ptrs[j + 1] - recv_ptrs[j]) * n_components); - for (unsigned int i = recv_ptrs[j], c = 0; i < recv_ptrs[j + 1]; - ++i, ++c) - output[recv_permutation[i]] = buffer[c]; + internal::recv_and_unpack(recv_buffer, + tria->get_communicator(), + status, + recv_buffer_packed); + + if (sort_data) + { + // sort data into output vector (optional) + for (unsigned int i = recv_ptrs[j], k = 0; i < recv_ptrs[j + 1]; + ++i, ++k) + for (unsigned int c = 0; c < n_components; ++c) + output[recv_permutation[i] * n_components + c] = + recv_buffer[k * n_components + c]; + } } // make sure all messages have been sent @@ -626,16 +878,20 @@ namespace Utilities template - template + template std::vector RemotePointEvaluation::evaluate_and_process( const std::function &, const CellData &)> - &evaluation_function) const + &evaluation_function, + const bool sort_data) const { std::vector output; std::vector buffer; - this->evaluate_and_process(output, buffer, evaluation_function); + this->evaluate_and_process(output, + buffer, + evaluation_function, + sort_data); return output; } @@ -643,19 +899,21 @@ namespace Utilities template - template + template void RemotePointEvaluation::process_and_evaluate( const std::vector &input, std::vector &buffer, const std::function &, const CellData &)> - &evaluation_function) const + &evaluation_function, + const bool sort_data) const { #ifndef DEAL_II_WITH_MPI Assert(false, ExcNeedsMPI()); (void)input; (void)buffer; (void)evaluation_function; + (void)sort_data; #else static CollectiveMutex mutex; CollectiveMutex::ScopedLock lock(mutex, tria->get_communicator()); @@ -663,74 +921,86 @@ namespace Utilities const unsigned int my_rank = Utilities::MPI::this_mpi_process(tria->get_communicator()); - // invert permutation matrices (TODO: precompute) - std::vector recv_permutation_inv(recv_permutation.size()); - for (unsigned int c = 0; c < recv_permutation.size(); ++c) - recv_permutation_inv[recv_permutation[c]] = c; - - std::vector send_permutation_inv(send_permutation.size()); - for (unsigned int c = 0; c < send_permutation.size(); ++c) - send_permutation_inv[send_permutation[c]] = c; - // allocate memory for buffer const auto &point_ptrs = this->get_point_ptrs(); - AssertDimension(input.size(), point_ptrs.size() - 1); - buffer.resize(std::max(send_permutation.size() * 2, - point_ptrs.back() + send_permutation.size())); + + AssertDimension(input.size(), + sort_data ? ((point_ptrs.size() - 1) * n_components) : + (point_ptrs.back() * n_components)); + buffer.resize( + (sort_data ? buffer_size_with_sorting : buffer_size_without_sorting) * + n_components); // ... for evaluation - ArrayView buffer_eval(buffer.data(), send_permutation.size()); + ArrayView buffer_eval(buffer.data(), + send_permutation.size() * n_components); - // ... for communication - ArrayView buffer_comm(buffer.data() + send_permutation.size(), - point_ptrs.back()); + // ... for communication (send) + ArrayView buffer_send(sort_data ? + (buffer.data() + + send_permutation.size() * n_components) : + const_cast(input.data()), + point_ptrs.back() * n_components); - // sort for communication (and duplicate data if necessary) + // more arrays + std::vector send_requests; + std::vector> send_buffers_packed; + std::vector recv_buffer_packed; - const auto my_rank_local_recv_ptr = - std::find(recv_ranks.begin(), recv_ranks.end(), my_rank); + // sort for communication (and duplicate data if necessary) - if (my_rank_local_recv_ptr != recv_ranks.end()) + if (sort_data) { - // optimize the case if we have our own rank among the possible list - const unsigned int my_rank_local_recv = - std::distance(recv_ranks.begin(), my_rank_local_recv_ptr); - const unsigned int my_rank_local_send = std::distance( - send_ranks.begin(), - std::find(send_ranks.begin(), send_ranks.end(), my_rank)); - - const unsigned int start = recv_ptrs[my_rank_local_recv]; - const unsigned int end = recv_ptrs[my_rank_local_recv + 1]; - const unsigned int *send_ptr = - send_permutation_inv.data() + send_ptrs[my_rank_local_send]; - for (unsigned int i = 0, c = 0; i < point_ptrs.size() - 1; ++i) + const auto my_rank_local_recv_ptr = + std::find(recv_ranks.begin(), recv_ranks.end(), my_rank); + + if (my_rank_local_recv_ptr != recv_ranks.end()) { - const unsigned int next = point_ptrs[i + 1]; - for (unsigned int j = point_ptrs[i]; j < next; ++j, ++c) + // optimize the case if we have our own rank among the possible + // list + const unsigned int my_rank_local_recv = + std::distance(recv_ranks.begin(), my_rank_local_recv_ptr); + const unsigned int my_rank_local_send = std::distance( + send_ranks.begin(), + std::find(send_ranks.begin(), send_ranks.end(), my_rank)); + + const unsigned int start = recv_ptrs[my_rank_local_recv]; + const unsigned int end = recv_ptrs[my_rank_local_recv + 1]; + const unsigned int *send_ptr = + send_permutation_inv.data() + send_ptrs[my_rank_local_send]; + for (unsigned int i = 0, k = 0; i < point_ptrs.size() - 1; ++i) { - const unsigned int recv_index = recv_permutation_inv[c]; - - // local data -> can be copied to final buffer directly - if (start <= recv_index && recv_index < end) - buffer_eval[send_ptr[recv_index - start]] = input[i]; - else // data to be sent - buffer_comm[recv_index] = input[i]; + const unsigned int next = point_ptrs[i + 1]; + for (unsigned int j = point_ptrs[i]; j < next; ++j, ++k) + { + const unsigned int recv_index = recv_permutation_inv[k]; + + // local data -> can be copied to final buffer directly + if (start <= recv_index && recv_index < end) + for (unsigned int c = 0; c < n_components; ++c) + buffer_eval[send_ptr[recv_index - start] * + n_components + + c] = input[i * n_components + c]; + else // data to be sent + for (unsigned int c = 0; c < n_components; ++c) + buffer_send[recv_index * n_components + c] = + input[i * n_components + c]; + } } } - } - else - { - for (unsigned int i = 0, c = 0; i < point_ptrs.size() - 1; ++i) - for (unsigned int j = point_ptrs[i]; j < point_ptrs[i + 1]; - ++j, ++c) - buffer_comm[recv_permutation_inv[c]] = input[i]; + else + { + for (unsigned int i = 0, k = 0; i < point_ptrs.size() - 1; ++i) + for (unsigned int j = point_ptrs[i]; j < point_ptrs[i + 1]; + ++j, ++k) + for (unsigned int c = 0; c < n_components; ++c) + buffer_send[recv_permutation_inv[k] * n_components + c] = + input[i * n_components + c]; + } } // send data - std::vector> send_buffer; - send_buffer.reserve(recv_ranks.size()); - - std::vector send_requests; + send_buffers_packed.reserve(recv_ranks.size()); send_requests.reserve(recv_ranks.size()); for (unsigned int i = 0; i < recv_ranks.size(); ++i) @@ -738,26 +1008,41 @@ namespace Utilities if (recv_ranks[i] == my_rank) continue; - send_requests.push_back(MPI_Request()); + internal::pack_and_isend( + ArrayView( + buffer_send.begin() + recv_ptrs[i] * n_components, + (recv_ptrs[i + 1] - recv_ptrs[i]) * n_components), + recv_ranks[i], + internal::Tags::remote_point_evaluation, + tria->get_communicator(), + send_buffers_packed, + send_requests); + } - send_buffer.emplace_back(Utilities::pack( - std::vector(buffer_comm.begin() + recv_ptrs[i], - buffer_comm.begin() + recv_ptrs[i + 1]), - false)); + // copy local data directly to output if no sorting is requested + if (!sort_data) + { + const auto my_rank_local_recv_ptr = + std::find(recv_ranks.begin(), recv_ranks.end(), my_rank); - const int ierr = MPI_Isend(send_buffer.back().data(), - send_buffer.back().size(), - MPI_CHAR, - recv_ranks[i], - internal::Tags::remote_point_evaluation, - tria->get_communicator(), - &send_requests.back()); - AssertThrowMPI(ierr); + if (my_rank_local_recv_ptr != recv_ranks.end()) + { + const unsigned int my_rank_local_recv = + std::distance(recv_ranks.begin(), my_rank_local_recv_ptr); + const unsigned int my_rank_local_send = std::distance( + send_ranks.begin(), + std::find(send_ranks.begin(), send_ranks.end(), my_rank)); + + for (unsigned int j = recv_ptrs[my_rank_local_recv], + k = send_ptrs[my_rank_local_send]; + j < recv_ptrs[my_rank_local_recv + 1]; + ++j, ++k) + for (unsigned int c = 0; c < n_components; ++c) + buffer_eval[k * n_components + c] = + input[j * n_components + c]; + } } - // receive data - std::vector recv_buffer; - for (unsigned int i = 0; i < send_ranks.size(); ++i) { if (send_ranks[i] == my_rank) @@ -771,25 +1056,6 @@ namespace Utilities &status); AssertThrowMPI(ierr); - int message_length; - ierr = MPI_Get_count(&status, MPI_CHAR, &message_length); - AssertThrowMPI(ierr); - - recv_buffer.resize(message_length); - - ierr = MPI_Recv(recv_buffer.data(), - recv_buffer.size(), - MPI_CHAR, - status.MPI_SOURCE, - internal::Tags::remote_point_evaluation, - tria->get_communicator(), - MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); - - // unpack data - const auto recv_buffer_unpacked = - Utilities::unpack>(recv_buffer, false); - // write data into buffer vector const auto ptr = std::find(send_ranks.begin(), send_ranks.end(), status.MPI_SOURCE); @@ -798,12 +1064,26 @@ namespace Utilities const unsigned int j = std::distance(send_ranks.begin(), ptr); - AssertDimension(recv_buffer_unpacked.size(), - send_ptrs[j + 1] - send_ptrs[j]); + ArrayView recv_buffer( + sort_data ? + (buffer.data() + + (point_ptrs.back() + send_permutation.size()) * n_components) : + (buffer.data() + send_ptrs[j] * n_components), + (send_ptrs[j + 1] - send_ptrs[j]) * n_components); - for (unsigned int i = send_ptrs[j], c = 0; i < send_ptrs[j + 1]; - ++i, ++c) - buffer_eval[send_permutation_inv[i]] = recv_buffer_unpacked[c]; + internal::recv_and_unpack(recv_buffer, + tria->get_communicator(), + status, + recv_buffer_packed); + + if (sort_data) + { + for (unsigned int i = send_ptrs[j], k = 0; i < send_ptrs[j + 1]; + ++i, ++k) + for (unsigned int c = 0; c < n_components; ++c) + buffer_eval[send_permutation_inv[i] * n_components + c] = + recv_buffer[k * n_components + c]; + } } if (!send_requests.empty()) @@ -822,15 +1102,19 @@ namespace Utilities template - template + template void RemotePointEvaluation::process_and_evaluate( const std::vector &input, const std::function &, const CellData &)> - &evaluation_function) const + &evaluation_function, + const bool sort_data) const { std::vector buffer; - this->process_and_evaluate(input, buffer, evaluation_function); + this->process_and_evaluate(input, + buffer, + evaluation_function, + sort_data); } } // end of namespace MPI diff --git a/include/deal.II/multigrid/mg_transfer_global_coarsening.h b/include/deal.II/multigrid/mg_transfer_global_coarsening.h index 6f6e637272..8d00a58819 100644 --- a/include/deal.II/multigrid/mg_transfer_global_coarsening.h +++ b/include/deal.II/multigrid/mg_transfer_global_coarsening.h @@ -259,9 +259,10 @@ public: /** * Enable inplace vector operations if external and internal vectors - * are compatible. + * are compatible. The returned pair indicates if the operation + * was successful on the coarse and the fine level. */ - virtual void + virtual std::pair enable_inplace_operations_if_possible( const std::shared_ptr &partitioner_coarse, @@ -320,7 +321,7 @@ protected: * are compatible. */ template - void + std::pair internal_enable_inplace_operations_if_possible( const std::shared_ptr &partitioner_coarse, @@ -505,7 +506,7 @@ public: * Enable inplace vector operations if external and internal vectors * are compatible. */ - void + std::pair enable_inplace_operations_if_possible( const std::shared_ptr &partitioner_coarse, @@ -750,7 +751,7 @@ public: * Enable inplace vector operations if external and internal vectors * are compatible. */ - void + std::pair enable_inplace_operations_if_possible( const std::shared_ptr &partitioner_coarse, @@ -835,6 +836,16 @@ private: */ Utilities::MPI::RemotePointEvaluation rpe; + /** + * Vectors for input/output for rpe. + */ + mutable std::vector rpe_input_output; + + /** + * Buffers to be reused by rpe. + */ + mutable std::vector rpe_buffer; + /** * MappingInfo object needed as Mapping argument by FEPointEvaluation. */ diff --git a/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h b/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h index a407cb3245..3d0becb8c3 100644 --- a/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h +++ b/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h @@ -3408,7 +3408,7 @@ namespace internal template template -void +std::pair MGTwoLevelTransferBase:: internal_enable_inplace_operations_if_possible( const std::shared_ptr @@ -3421,11 +3421,14 @@ MGTwoLevelTransferBase:: &constraint_info_coarse, std::vector &dof_indices_fine) { + std::pair success_flags = {false, false}; + if (this->partitioner_coarse->is_globally_compatible( *external_partitioner_coarse)) { this->vec_coarse.reinit(0); this->partitioner_coarse = external_partitioner_coarse; + success_flags.first = true; } else if (internal::is_partitioner_contained(this->partitioner_coarse, external_partitioner_coarse)) @@ -3445,6 +3448,7 @@ MGTwoLevelTransferBase:: external_partitioner_coarse); this->partitioner_coarse = external_partitioner_coarse; + success_flags.first = true; } vec_fine_needs_ghost_update = @@ -3456,6 +3460,7 @@ MGTwoLevelTransferBase:: { this->vec_fine.reinit(0); this->partitioner_fine = external_partitioner_fine; + success_flags.second = true; } else if (internal::is_partitioner_contained(this->partitioner_fine, external_partitioner_fine)) @@ -3471,20 +3476,23 @@ MGTwoLevelTransferBase:: external_partitioner_fine); this->partitioner_fine = external_partitioner_fine; + success_flags.second = true; } + + return success_flags; } template -void +std::pair MGTwoLevelTransfer::enable_inplace_operations_if_possible( const std::shared_ptr &external_partitioner_coarse, const std::shared_ptr &external_partitioner_fine) { - this->internal_enable_inplace_operations_if_possible( + return this->internal_enable_inplace_operations_if_possible( external_partitioner_coarse, external_partitioner_fine, this->vec_fine_needs_ghost_update, @@ -4908,10 +4916,6 @@ MGTwoLevelTransferNonNested::prolongate_and_add_internal_comp( VectorType &dst, const VectorType &src) const { - using Traits = internal::FEPointEvaluation:: - EvaluatorTypeTraits; - using value_type = typename Traits::value_type; - const auto evaluation_function = [&](auto &values, const auto &cell_data) { this->signals_non_nested.prolongation_cell_loop(true); std::vector solution_values; @@ -4919,6 +4923,8 @@ MGTwoLevelTransferNonNested::prolongate_and_add_internal_comp( FEPointEvaluation evaluator(*mapping_info, *fe_coarse); + const auto &send_permutation = rpe.get_send_permutation(); + for (unsigned int cell = 0; cell < cell_data.cells.size(); ++cell) { solution_values.resize(fe_coarse->n_dofs_per_cell()); @@ -4940,19 +4946,25 @@ MGTwoLevelTransferNonNested::prolongate_and_add_internal_comp( evaluator.evaluate(solution_values, dealii::EvaluationFlags::values); for (const auto q : evaluator.quadrature_point_indices()) - values[q + cell_data.reference_point_ptrs[cell]] = - evaluator.get_value(q); + { + const unsigned int index = + send_permutation[q + cell_data.reference_point_ptrs[cell]]; + + for (unsigned int c = 0; c < n_components; ++c) + values[index * n_components + c] = + internal::access(evaluator.get_value(q), c); + } } this->signals_non_nested.prolongation_cell_loop(false); }; this->signals_non_nested.prolongation(true); - std::vector evaluation_point_results; - std::vector buffer; - rpe.template evaluate_and_process(evaluation_point_results, - buffer, - evaluation_function); + std::vector &evaluation_point_results = this->rpe_input_output; + std::vector &buffer = this->rpe_buffer; + + rpe.template evaluate_and_process( + evaluation_point_results, buffer, evaluation_function, false); this->signals_non_nested.prolongation(false); // Keep a vector of typical inverse touch counts that avoid divisions, all @@ -4961,11 +4973,12 @@ MGTwoLevelTransferNonNested::prolongate_and_add_internal_comp( for (unsigned int i = 0; i < typical_weights.size(); ++i) typical_weights[i] = Number(1) / Number(i + 1); - const bool must_interpolate = (rpe.is_map_unique() == false); - const auto &ptr = rpe.get_point_ptrs(); + const bool must_interpolate = (rpe.is_map_unique() == false); + const auto &ptr = rpe.get_point_ptrs(); + const auto &recv_permutation_inv = rpe.get_inverse_recv_permutation(); for (unsigned int j = 0; j < ptr.size() - 1; ++j) { - value_type result; + std::array result; // Weight operator in case some points are owned by multiple cells. if (must_interpolate) { @@ -4975,19 +4988,33 @@ MGTwoLevelTransferNonNested::prolongate_and_add_internal_comp( { result = {}; for (unsigned int k = 0; k < n_entries; ++k) - result += evaluation_point_results[ptr[j] + k]; + { + const unsigned int index = recv_permutation_inv[ptr[j] + k]; + + for (unsigned int c = 0; c < n_components; ++c) + result[c] += + evaluation_point_results[index * n_components + c]; + } if (n_entries <= typical_weights.size()) - result *= typical_weights[n_entries - 1]; + for (unsigned int c = 0; c < n_components; ++c) + result[c] *= typical_weights[n_entries - 1]; else - result /= Number(n_entries); + for (unsigned int c = 0; c < n_components; ++c) + result[c] *= Number(1) / Number(n_entries); } else if (n_entries == 1) - result = evaluation_point_results[ptr[j]]; + { + const unsigned int index = recv_permutation_inv[ptr[j]]; + + for (unsigned int c = 0; c < n_components; ++c) + result[c] = evaluation_point_results[index * n_components + c]; + } else result = {}; } else - result = evaluation_point_results[j]; + for (unsigned int c = 0; c < n_components; ++c) + result[c] = evaluation_point_results[j * n_components + c]; if (level_dof_indices_fine_ptrs.empty()) { @@ -4997,7 +5024,7 @@ MGTwoLevelTransferNonNested::prolongate_and_add_internal_comp( this->level_dof_indices_fine.size()); dst.local_element( this->level_dof_indices_fine[n_components * j + c]) += - internal::access(result, c); + result[c]; } } else @@ -5011,7 +5038,7 @@ MGTwoLevelTransferNonNested::prolongate_and_add_internal_comp( this->level_dof_indices_fine.size()); dst.local_element( this->level_dof_indices_fine[n_components * i + c]) += - internal::access(result, c); + result[c]; } } } @@ -5046,20 +5073,21 @@ MGTwoLevelTransferNonNested::restrict_and_add_internal_comp( EvaluatorTypeTraits; using value_type = typename Traits::value_type; - std::vector evaluation_point_results; - std::vector buffer; + std::vector &evaluation_point_results = this->rpe_input_output; + std::vector &buffer = this->rpe_buffer; std::array typical_weights; for (unsigned int i = 0; i < typical_weights.size(); ++i) typical_weights[i] = Number(1) / Number(i + 1); - const bool must_interpolate = (rpe.is_map_unique() == false); - const auto &ptr = rpe.get_point_ptrs(); - evaluation_point_results.resize(ptr.size() - 1); + const bool must_interpolate = (rpe.is_map_unique() == false); + const auto &ptr = rpe.get_point_ptrs(); + const auto &recv_permutation_inv = rpe.get_inverse_recv_permutation(); + evaluation_point_results.resize(ptr.back() * n_components); - for (unsigned int j = 0; j < evaluation_point_results.size(); ++j) + for (unsigned int j = 0; j < ptr.size() - 1; ++j) { - value_type result; + std::array result; if (level_dof_indices_fine_ptrs.empty()) { for (unsigned int c = 0; c < n_components; ++c) @@ -5067,24 +5095,26 @@ MGTwoLevelTransferNonNested::restrict_and_add_internal_comp( AssertIndexRange(n_components * j + c, this->level_dof_indices_fine.size()); - internal::access(result, c) = src.local_element( + result[c] = src.local_element( this->level_dof_indices_fine[n_components * j + c]); } } else { - result = value_type(); + result = {}; for (unsigned int i = this->level_dof_indices_fine_ptrs[j]; i < this->level_dof_indices_fine_ptrs[j + 1]; ++i) - for (unsigned int c = 0; c < n_components; ++c) - { - AssertIndexRange(n_components * i + c, - this->level_dof_indices_fine.size()); - internal::access(result, c) += src.local_element( - this->level_dof_indices_fine[n_components * i + c]); - } + { + for (unsigned int c = 0; c < n_components; ++c) + { + AssertIndexRange(n_components * i + c, + this->level_dof_indices_fine.size()); + result[c] += src.local_element( + this->level_dof_indices_fine[n_components * i + c]); + } + } } if (must_interpolate) { @@ -5092,12 +5122,21 @@ MGTwoLevelTransferNonNested::restrict_and_add_internal_comp( if (n_entries > 1) { if (n_entries <= typical_weights.size()) - result *= typical_weights[n_entries - 1]; + for (unsigned int c = 0; c < n_components; ++c) + result[c] *= typical_weights[n_entries - 1]; else - result /= Number(n_entries); + for (unsigned int c = 0; c < n_components; ++c) + result[c] /= Number(n_entries); } } - evaluation_point_results[j] = result; + + for (unsigned int i = ptr[j]; i < ptr[j + 1]; ++i) + { + const unsigned int index = recv_permutation_inv[i]; + + for (unsigned int c = 0; c < n_components; ++c) + evaluation_point_results[index * n_components + c] = result[c]; + } } const auto evaluation_function = [&](const auto &values, @@ -5107,6 +5146,8 @@ MGTwoLevelTransferNonNested::restrict_and_add_internal_comp( FEPointEvaluation evaluator(*mapping_info, *fe_coarse); + const auto &send_permutation = rpe.get_send_permutation(); + for (unsigned int cell = 0; cell < cell_data.cells.size(); ++cell) { solution_values.resize(fe_coarse->n_dofs_per_cell()); @@ -5115,8 +5156,19 @@ MGTwoLevelTransferNonNested::restrict_and_add_internal_comp( evaluator.reinit(cell); for (const auto q : evaluator.quadrature_point_indices()) - evaluator.submit_value( - values[q + cell_data.reference_point_ptrs[cell]], q); + { + typename internal::FEPointEvaluation:: + EvaluatorTypeTraits::value_type + value; + + const unsigned int index = + send_permutation[q + cell_data.reference_point_ptrs[cell]]; + + for (unsigned int c = 0; c < n_components; ++c) + internal::access(value, c) = values[index * n_components + c]; + + evaluator.submit_value(value, q); + } evaluator.test_and_sum(solution_values, EvaluationFlags::values); @@ -5136,9 +5188,8 @@ MGTwoLevelTransferNonNested::restrict_and_add_internal_comp( }; this->signals_non_nested.restriction(true); - rpe.template process_and_evaluate(evaluation_point_results, - buffer, - evaluation_function); + rpe.template process_and_evaluate( + evaluation_point_results, buffer, evaluation_function, false); this->signals_non_nested.restriction(false); } @@ -5174,7 +5225,7 @@ MGTwoLevelTransferNonNested::interpolate( template -void +std::pair MGTwoLevelTransferNonNested:: enable_inplace_operations_if_possible( const std::shared_ptr @@ -5182,7 +5233,7 @@ MGTwoLevelTransferNonNested:: const std::shared_ptr &external_partitioner_fine) { - this->internal_enable_inplace_operations_if_possible( + return this->internal_enable_inplace_operations_if_possible( external_partitioner_coarse, external_partitioner_fine, this->vec_fine_needs_ghost_update, diff --git a/source/base/mpi_remote_point_evaluation.cc b/source/base/mpi_remote_point_evaluation.cc index 76f6580d58..147dbf08c7 100644 --- a/source/base/mpi_remote_point_evaluation.cc +++ b/source/base/mpi_remote_point_evaluation.cc @@ -233,6 +233,31 @@ namespace Utilities cell_data->reference_point_ptrs.emplace_back( cell_data->reference_point_values.size()); + unsigned int max_size_recv = 0; + for (unsigned int i = 0; i < recv_ranks.size(); ++i) + max_size_recv = + std::max(max_size_recv, recv_ptrs[i + 1] - recv_ptrs[i]); + + unsigned int max_size_send = 0; + for (unsigned int i = 0; i < send_ranks.size(); ++i) + max_size_send = + std::max(max_size_send, send_ptrs[i + 1] - send_ptrs[i]); + + this->buffer_size_with_sorting = + std::max(send_permutation.size() * 2 + max_size_recv, + point_ptrs.back() + send_permutation.size() + max_size_send); + + this->buffer_size_without_sorting = send_permutation.size(); + + // invert permutation matrices + recv_permutation_inv.resize(recv_permutation.size()); + for (unsigned int c = 0; c < recv_permutation.size(); ++c) + recv_permutation_inv[recv_permutation[c]] = c; + + send_permutation_inv.resize(send_permutation.size()); + for (unsigned int c = 0; c < send_permutation.size(); ++c) + send_permutation_inv[send_permutation[c]] = c; + this->ready_flag = true; } @@ -354,6 +379,24 @@ namespace Utilities return ready_flag; } + + + template + const std::vector & + RemotePointEvaluation::get_send_permutation() const + { + return send_permutation; + } + + + + template + const std::vector & + RemotePointEvaluation::get_inverse_recv_permutation() const + { + return recv_permutation_inv; + } + } // end of namespace MPI } // end of namespace Utilities diff --git a/tests/remote_point_evaluation/remote_point_evaluation_01.cc b/tests/remote_point_evaluation/remote_point_evaluation_01.cc index 84d1e4f966..d2bc960037 100644 --- a/tests/remote_point_evaluation/remote_point_evaluation_01.cc +++ b/tests/remote_point_evaluation/remote_point_evaluation_01.cc @@ -264,7 +264,7 @@ compute_force_vector_sharp_interface( const VectorType &curvature_vector, VectorType &force_vector) { - using T = Tensor<1, spacedim, double>; + using T = double; const auto integration_points = [&]() { std::vector> integration_points; @@ -330,12 +330,10 @@ compute_force_vector_sharp_interface( for (const auto q : fe_eval_dim.quadrature_point_indices()) { - T result; for (unsigned int i = 0; i < spacedim; ++i) - result[i] = -curvature_values[q] * normal_values[q][i] * - fe_eval.JxW(q) * surface_tension; - - integration_values.push_back(result); + integration_values.push_back(-curvature_values[q] * + normal_values[q][i] * + fe_eval.JxW(q) * surface_tension); } } @@ -371,8 +369,9 @@ compute_force_vector_sharp_interface( cell_data.reference_point_ptrs[i + 1] - cell_data.reference_point_ptrs[i]); - const ArrayView force_JxW( - values.data() + cell_data.reference_point_ptrs[i], + const ArrayView> force_JxW( + reinterpret_cast *>(values.data()) + + cell_data.reference_point_ptrs[i], cell_data.reference_point_ptrs[i + 1] - cell_data.reference_point_ptrs[i]); @@ -391,7 +390,9 @@ compute_force_vector_sharp_interface( std::vector buffer; - eval.template process_and_evaluate(integration_values, buffer, fu); + eval.template process_and_evaluate(integration_values, + buffer, + fu); }