From: Martin Kronbichler Date: Sun, 22 Mar 2020 06:27:43 +0000 (+0100) Subject: distribute_sparsity_pattern with locally owned dofs only X-Git-Tag: v9.2.0-rc1~239^2~4 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=e3ce622b11a87039b5ee38412524b47240d1d5b2;p=dealii.git distribute_sparsity_pattern with locally owned dofs only --- diff --git a/include/deal.II/lac/sparsity_tools.h b/include/deal.II/lac/sparsity_tools.h index 312d304105..2c24ff59f5 100644 --- a/include/deal.II/lac/sparsity_tools.h +++ b/include/deal.II/lac/sparsity_tools.h @@ -246,26 +246,41 @@ namespace SparsityTools /** * Communicate rows in a dynamic sparsity pattern over MPI. * - * @param dsp A dynamic sparsity pattern that has been built locally and for - * which we need to exchange entries with other processors to make sure that - * each processor knows all the elements of the rows of a matrix it stores - * and that may eventually be written to. This sparsity pattern will be - * changed as a result of this function: All entries in rows that belong to - * a different processor are sent to them and added there. + * @param[in,out] dsp A dynamic sparsity pattern that has been built locally + * and for which we need to exchange entries with other processors to make + * sure that each processor knows all the elements of the rows of a matrix + * it stores and that may eventually be written to. This sparsity pattern + * will be changed as a result of this function: All entries in rows that + * belong to a different processor are sent to them and added there. * - * @param rows_per_cpu A vector containing the number of of rows per CPU for - * determining ownership. This is typically the value returned by - * DoFHandler::n_locally_owned_dofs_per_processor. + * @param locally_owned_rows An IndexSet describing the rows owned by the + * calling MPI process. The index set shall be one-to-one among the + * processors in the communicator. * * @param mpi_comm The MPI communicator shared between the processors that * participate in this operation. * - * @param myrange The range of elements stored locally. This should be the - * one used in the constructor of the DynamicSparsityPattern, and should - * also be the locally relevant set. Only rows contained in myrange are - * checked in dsp for transfer. This function needs to be used with - * PETScWrappers::MPI::SparseMatrix for it to work correctly in a parallel - * computation. + * @param locally_relevant_rows The range of elements stored on the local + * MPI process. This should be the one used in the constructor of the + * DynamicSparsityPattern, and should also be the locally relevant set. Only + * rows contained in this set are checked in dsp for transfer. This function + * needs to be used with PETScWrappers::MPI::SparseMatrix for it to work + * correctly in a parallel computation. + */ + void + distribute_sparsity_pattern(DynamicSparsityPattern &dsp, + const IndexSet & locally_owned_rows, + const MPI_Comm & mpi_comm, + const IndexSet & locally_relevant_rows); + + /** + * Communicate rows in a dynamic sparsity pattern over MPI, similar to the + * one above but using a vector `rows_per_cpu` containing the number of of + * rows per CPU for determining ownership. This is typically the value + * returned by DoFHandler::n_locally_owned_dofs_per_processor -- given that + * the construction of the input to this function involves all-to-all + * communication, it is typically slower than the function above for more + * than a thousand of processes (and quick enough also for small sizes). */ void distribute_sparsity_pattern( @@ -279,12 +294,24 @@ namespace SparsityTools * instead. * * @param[in,out] dsp The locally built sparsity pattern to be modified. - * @param owned_set_per_cpu Typically the value given by - * DoFHandler::locally_owned_dofs_per_processor. + * + * @param locally_owned_rows An IndexSet describing the rows owned by the + * calling MPI process. The index set shall be one-to-one among the + * processors in the communicator. * * @param mpi_comm The MPI communicator to use. * - * @param myrange Typically the locally relevant DoFs. + * @param locally_relevant_rows Typically the locally relevant DoFs. + */ + void + distribute_sparsity_pattern(BlockDynamicSparsityPattern &dsp, + const IndexSet & locally_owned_rows, + const MPI_Comm & mpi_comm, + const IndexSet &locally_relevant_rows); + + /** + * @deprecated Use the distribute_sparsity_pattern() with a single index set + * for the present MPI process only. */ void distribute_sparsity_pattern(BlockDynamicSparsityPattern &dsp, @@ -304,17 +331,28 @@ namespace SparsityTools * we need to extend according to the sparsity of rows stored on other MPI * processes. * - * @param owned_rows_per_processor A vector containing owned rows for each - * process in the MPI communicator. This input should be the same on all MPI - * processes. + * @param locally_owned_rows An IndexSet describing the rows owned by the + * calling MPI process. The index set shall be one-to-one among the + * processors in the communicator. * * @param mpi_comm The MPI communicator shared between the processors that * participate in this operation. * - * @param ghost_range The range of rows this MPI process needs to gather. - * Only a part which is not included in the locally owned rows will be used. + * @param locally_relevant_range The range of rows this MPI process needs to + * gather. Only the part which is not included in the locally owned rows will + * be used. */ void + gather_sparsity_pattern(DynamicSparsityPattern &dsp, + const IndexSet & locally_owned_rows, + const MPI_Comm & mpi_comm, + const IndexSet & locally_relevant_rows); + + /** + * @deprecated Use the gather_sparsity_pattern() method with the index set + * for the present processor only. + */ + DEAL_II_DEPRECATED void gather_sparsity_pattern(DynamicSparsityPattern & dsp, const std::vector &owned_rows_per_processor, const MPI_Comm & mpi_comm, diff --git a/source/lac/sparsity_tools.cc b/source/lac/sparsity_tools.cc index e0e1e8d35d..1b2c83187d 100644 --- a/source/lac/sparsity_tools.cc +++ b/source/lac/sparsity_tools.cc @@ -915,52 +915,49 @@ namespace SparsityTools void gather_sparsity_pattern(DynamicSparsityPattern & dsp, - const std::vector &owned, + const std::vector &owned_rows_per_processor, const MPI_Comm & mpi_comm, const IndexSet & ghost_range) { const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm); + gather_sparsity_pattern(dsp, + owned_rows_per_processor[myid], + mpi_comm, + ghost_range); + } - AssertDimension(owned.size(), Utilities::MPI::n_mpi_processes(mpi_comm)); -# ifdef DEBUG - for (const auto &set : owned) - Assert(set.is_contiguous(), ExcNotImplemented()); -# endif - - Assert(owned[myid].is_ascending_and_one_to_one(mpi_comm), - ExcNotImplemented()); - std::vector start_index(owned.size() + - 1); - start_index[0] = 0; - for (DynamicSparsityPattern::size_type i = 0; i < owned.size(); ++i) - start_index[i + 1] = start_index[i] + owned[i].n_elements(); + void + gather_sparsity_pattern(DynamicSparsityPattern &dsp, + const IndexSet & locally_owned_rows, + const MPI_Comm & mpi_comm, + const IndexSet & locally_relevant_rows) + { using map_vec_t = std::map>; // 1. limit rows to non owned: - IndexSet requested_rows(ghost_range); - requested_rows.subtract_set(owned[myid]); + IndexSet requested_rows(locally_relevant_rows); + requested_rows.subtract_set(locally_owned_rows); + + std::vector index_owner = + Utilities::MPI::compute_index_owner(locally_owned_rows, + requested_rows, + mpi_comm); // 2. go through requested_rows, figure out the owner and add the row to // request map_vec_t rows_data; - { - unsigned int cpu_rank = 0; - for (const auto &row : requested_rows) - { - // calculate destination CPU - while (row >= start_index[cpu_rank + 1]) - ++cpu_rank; - - // since we removed owned, we should not end up with - // our rows - Assert(cpu_rank != myid, ExcInternalError()); + for (DynamicSparsityPattern::size_type i = 0; + i < requested_rows.n_elements(); + ++i) + { + const DynamicSparsityPattern::size_type row = + requested_rows.nth_index_in_set(i); - rows_data[cpu_rank].push_back(row); - } - } + rows_data[index_owner[i]].push_back(row); + } // 3. get what others ask us to send const auto rows_data_received = @@ -1040,42 +1037,53 @@ namespace SparsityTools IndexSet owned(start_index.back()); owned.add_range(start_index[myid], start_index[myid] + rows_per_cpu[myid]); - IndexSet myrange_non_owned(myrange); - myrange_non_owned.subtract_set(owned); + distribute_sparsity_pattern(dsp, owned, mpi_comm, myrange); + } + + + + void + distribute_sparsity_pattern(DynamicSparsityPattern &dsp, + const IndexSet & locally_owned_rows, + const MPI_Comm & mpi_comm, + const IndexSet & locally_relevant_rows) + { + IndexSet requested_rows(locally_relevant_rows); + requested_rows.subtract_set(locally_owned_rows); + + std::vector index_owner = + Utilities::MPI::compute_index_owner(locally_owned_rows, + requested_rows, + mpi_comm); using map_vec_t = std::map>; map_vec_t send_data; - { - unsigned int dest_cpu = 0; - for (const auto &row : myrange_non_owned) - { - // calculate destination CPU - while (row >= start_index[dest_cpu + 1]) - ++dest_cpu; - - // we removed owned, thus shall not hit ourselves - Assert(dest_cpu != myid, ExcInternalError()); + for (DynamicSparsityPattern::size_type i = 0; + i < requested_rows.n_elements(); + ++i) + { + const DynamicSparsityPattern::size_type row = + requested_rows.nth_index_in_set(i); - const auto rlen = dsp.row_length(row); + const auto rlen = dsp.row_length(row); - // skip empty lines - if (!rlen) - continue; + // skip empty lines + if (!rlen) + continue; - // save entries - send_data[dest_cpu].push_back(row); // row index - send_data[dest_cpu].push_back(rlen); // number of entries - for (DynamicSparsityPattern::size_type c = 0; c < rlen; ++c) - { - // columns - const auto column = dsp.column_number(row, c); - send_data[dest_cpu].push_back(column); - } - } - } + // save entries + send_data[index_owner[i]].push_back(row); // row index + send_data[index_owner[i]].push_back(rlen); // number of entries + for (DynamicSparsityPattern::size_type c = 0; c < rlen; ++c) + { + // columns + const auto column = dsp.column_number(row, c); + send_data[index_owner[i]].push_back(column); + } + } const auto receive_data = Utilities::MPI::some_to_some(mpi_comm, send_data); @@ -1099,6 +1107,8 @@ namespace SparsityTools } } + + void distribute_sparsity_pattern(BlockDynamicSparsityPattern &dsp, const std::vector &owned_set_per_cpu, @@ -1106,59 +1116,60 @@ namespace SparsityTools const IndexSet & myrange) { const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm); + distribute_sparsity_pattern(dsp, + owned_set_per_cpu[myid], + mpi_comm, + myrange); + } + + + void + distribute_sparsity_pattern(BlockDynamicSparsityPattern &dsp, + const IndexSet & locally_owned_rows, + const MPI_Comm & mpi_comm, + const IndexSet &locally_relevant_rows) + { using map_vec_t = std::map>; map_vec_t send_data; - { - unsigned int dest_cpu = 0; + IndexSet requested_rows(locally_relevant_rows); + requested_rows.subtract_set(locally_owned_rows); - BlockDynamicSparsityPattern::size_type n_local_rel_rows = - myrange.n_elements(); - for (BlockDynamicSparsityPattern::size_type row_idx = 0; - row_idx < n_local_rel_rows; - ++row_idx) - { - BlockDynamicSparsityPattern::size_type row = - myrange.nth_index_in_set(row_idx); + std::vector index_owner = + Utilities::MPI::compute_index_owner(locally_owned_rows, + requested_rows, + mpi_comm); - // calculate destination CPU, note that we start the search - // at last destination cpu, because even if the owned ranges - // are not contiguous, they hopefully consist of large blocks - while (!owned_set_per_cpu[dest_cpu].is_element(row)) - { - ++dest_cpu; - if (dest_cpu == owned_set_per_cpu.size()) // wrap around - dest_cpu = 0; - } - - // skip myself - if (dest_cpu == myid) - continue; + for (DynamicSparsityPattern::size_type i = 0; + i < requested_rows.n_elements(); + ++i) + { + const DynamicSparsityPattern::size_type row = + requested_rows.nth_index_in_set(i); - BlockDynamicSparsityPattern::size_type rlen = dsp.row_length(row); + BlockDynamicSparsityPattern::size_type rlen = dsp.row_length(row); - // skip empty lines - if (!rlen) - continue; + // skip empty lines + if (!rlen) + continue; - // save entries - std::vector &dst = - send_data[dest_cpu]; + // save entries + std::vector &dst = + send_data[index_owner[i]]; - dst.push_back(rlen); // number of entries - dst.push_back(row); // row index - for (BlockDynamicSparsityPattern::size_type c = 0; c < rlen; ++c) - { - // columns - BlockDynamicSparsityPattern::size_type column = - dsp.column_number(row, c); - dst.push_back(column); - } - } - } + dst.push_back(rlen); // number of entries + dst.push_back(row); // row index + for (BlockDynamicSparsityPattern::size_type c = 0; c < rlen; ++c) + { + // columns + BlockDynamicSparsityPattern::size_type column = + dsp.column_number(row, c); + dst.push_back(column); + } + } unsigned int num_receive = 0; {