From 9363cab6b03d1bac6d139a3d828a4ebff92fa055 Mon Sep 17 00:00:00 2001 From: Denis Davydov Date: Tue, 19 Mar 2019 17:55:41 +0100 Subject: [PATCH] use Utilities::MPI::some_to_some() in SparsityTools::distribute_sparsity_pattern(), improve const-correctness --- source/lac/dynamic_sparsity_pattern.cc | 5 ++ source/lac/sparsity_tools.cc | 116 ++++++------------------- 2 files changed, 30 insertions(+), 91 deletions(-) diff --git a/source/lac/dynamic_sparsity_pattern.cc b/source/lac/dynamic_sparsity_pattern.cc index dcf53e1bbc..95ca12cdc4 100644 --- a/source/lac/dynamic_sparsity_pattern.cc +++ b/source/lac/dynamic_sparsity_pattern.cc @@ -612,6 +612,11 @@ template void DynamicSparsityPattern::Line::add_entries(std::vector::iterator, std::vector::iterator, const bool); +template void +DynamicSparsityPattern::Line::add_entries( + std::vector::const_iterator, + std::vector::const_iterator, + const bool); #endif template void diff --git a/source/lac/sparsity_tools.cc b/source/lac/sparsity_tools.cc index e04c9e54f4..eeba2592de 100644 --- a/source/lac/sparsity_tools.cc +++ b/source/lac/sparsity_tools.cc @@ -925,21 +925,19 @@ namespace SparsityTools for (DynamicSparsityPattern::size_type i = 0; i < rows_per_cpu.size(); ++i) start_index[i + 1] = start_index[i] + rows_per_cpu[i]; - using map_vec_t = std::map>; + using map_vec_t = + std::map>; map_vec_t send_data; { - unsigned int dest_cpu = 0; - - DynamicSparsityPattern::size_type n_local_rel_rows = myrange.n_elements(); + unsigned int dest_cpu = 0; + const auto n_local_rel_rows = myrange.n_elements(); for (DynamicSparsityPattern::size_type row_idx = 0; row_idx < n_local_rel_rows; ++row_idx) { - DynamicSparsityPattern::size_type row = - myrange.nth_index_in_set(row_idx); + const auto row = myrange.nth_index_in_set(row_idx); // calculate destination CPU while (row >= start_index[dest_cpu + 1]) @@ -952,109 +950,45 @@ namespace SparsityTools continue; } - DynamicSparsityPattern::size_type rlen = dsp.row_length(row); + const auto rlen = dsp.row_length(row); // skip empty lines if (!rlen) continue; // save entries - std::vector &dst = - send_data[dest_cpu]; + auto &dst = send_data[dest_cpu]; dst.push_back(rlen); // number of entries dst.push_back(row); // row index for (DynamicSparsityPattern::size_type c = 0; c < rlen; ++c) { // columns - DynamicSparsityPattern::size_type column = - dsp.column_number(row, c); + const auto column = dsp.column_number(row, c); dst.push_back(column); } } } - unsigned int num_receive = 0; - { - std::vector send_to; - send_to.reserve(send_data.size()); - for (const auto &sparsity_line : send_data) - send_to.push_back(sparsity_line.first); - - num_receive = - Utilities::MPI::compute_n_point_to_point_communications(mpi_comm, - send_to); - } - - std::vector requests(send_data.size()); + const auto receive_data = Utilities::MPI::some_to_some(mpi_comm, send_data); - - // send data - { - unsigned int idx = 0; - for (const auto &sparsity_line : send_data) - { - const int ierr = - MPI_Isend(DEAL_II_MPI_CONST_CAST(sparsity_line.second.data()), - sparsity_line.second.size(), - DEAL_II_DOF_INDEX_MPI_TYPE, - sparsity_line.first, - 124, - mpi_comm, - &requests[idx++]); - AssertThrowMPI(ierr); - } - } - - { - // receive - std::vector recv_buf; - for (unsigned int index = 0; index < num_receive; ++index) - { - MPI_Status status; - int len; - int ierr = MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status); - AssertThrowMPI(ierr); - Assert(status.MPI_TAG == 124, ExcInternalError()); - - ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len); - AssertThrowMPI(ierr); - recv_buf.resize(len); - ierr = MPI_Recv(recv_buf.data(), - len, - DEAL_II_DOF_INDEX_MPI_TYPE, - status.MPI_SOURCE, - status.MPI_TAG, - mpi_comm, - &status); - AssertThrowMPI(ierr); - - std::vector::const_iterator ptr = - recv_buf.begin(); - std::vector::const_iterator end = - recv_buf.end(); - while (ptr != end) - { - DynamicSparsityPattern::size_type num = *(ptr++); - Assert(ptr != end, ExcInternalError()); - DynamicSparsityPattern::size_type row = *(ptr++); - for (unsigned int c = 0; c < num; ++c) - { - Assert(ptr != end, ExcInternalError()); - dsp.add(row, *ptr); - ++ptr; - } - } - Assert(ptr == end, ExcInternalError()); - } - } - - // complete all sends, so that we can safely destroy the buffers. - if (requests.size()) + // add what we received + for (const auto &data : receive_data) { - const int ierr = - MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); + const auto &recv_buf = data.second; + auto ptr = recv_buf.begin(); + const auto end = recv_buf.end(); + while (ptr != end) + { + const DynamicSparsityPattern::size_type num = *(ptr++); + Assert(ptr != end, ExcInternalError()); + const DynamicSparsityPattern::size_type row = *(ptr++); + + Assert(ptr + (num - 1) != end, ExcInternalError()); + dsp.add_entries(row, ptr, ptr + num, true); + ptr += num; + } + Assert(ptr == end, ExcInternalError()); } } -- 2.39.5